summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn Mainzer <mainzer@hdfgroup.org>2007-11-21 19:03:21 (GMT)
committerJohn Mainzer <mainzer@hdfgroup.org>2007-11-21 19:03:21 (GMT)
commitabf0f803056afc8362f5edc7cde7fb207486437d (patch)
tree0a2c5f8b22f87184afcbc23d6fafa3d5fa8bfeb9
parent78e8f7638a7cc69010f00dc8073361d498b16e52 (diff)
downloadhdf5-abf0f803056afc8362f5edc7cde7fb207486437d.zip
hdf5-abf0f803056afc8362f5edc7cde7fb207486437d.tar.gz
hdf5-abf0f803056afc8362f5edc7cde7fb207486437d.tar.bz2
[svn-r14282] Continuation of initial checkin of changes to metadata journaling
branch -- this commit needed at I forgot to svn add the new files created in support of metadata journaling. Again, this version may not compile.
-rw-r--r--src/H5AC2.c4790
-rw-r--r--src/H5AC2pkg.h335
-rw-r--r--src/H5AC2private.h315
-rw-r--r--src/H5AC2public.h393
-rw-r--r--src/H5C2.c11024
-rw-r--r--src/H5C2pkg.h949
-rw-r--r--src/H5C2private.h1350
-rw-r--r--src/H5C2public.h55
-rw-r--r--test/cache2.c26769
-rw-r--r--test/cache2_api.c3791
-rw-r--r--test/cache2_common.c5199
-rw-r--r--test/cache2_common.h823
-rw-r--r--testpar/t_cache2.c6108
13 files changed, 61901 insertions, 0 deletions
diff --git a/src/H5AC2.c b/src/H5AC2.c
new file mode 100644
index 0000000..a053b36
--- /dev/null
+++ b/src/H5AC2.c
@@ -0,0 +1,4790 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5AC2.c
+ * Jul 9 1997
+ * Robb Matzke <matzke@llnl.gov>
+ *
+ * Purpose: Functions in this file implement a cache for
+ * things which exist on disk. All "things" associated
+ * with a particular HDF file share the same cache; each
+ * HDF file has it's own cache.
+ *
+ * Modifications:
+ *
+ * Robb Matzke, 4 Aug 1997
+ * Added calls to H5E.
+ *
+ * Quincey Koziol, 22 Apr 2000
+ * Turned on "H5AC2_SORT_BY_ADDR"
+ *
+ * John Mainzer, 5/19/04
+ * Complete redesign and rewrite. See the header comments for
+ * H5AC2_t for an overview of what is going on.
+ *
+ * John Mainzer, 6/4/04
+ * Factored the new cache code into a separate file (H5C.c) to
+ * facilitate re-use. Re-worked this file again to use H5C.
+ *
+ * John Mainzer, 10/18/07
+ * Copied H5AC2.c to H5AC22.c and reworked to use H5C2 instead of H5C.
+ * All this is in support of cache API modifications needed for
+ * journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define H5C2_PACKAGE /*suppress error about including H5C2pkg */
+#define H5AC2_PACKAGE /*suppress error about including H5AC2pkg */
+#define H5F_PACKAGE /*suppress error about including H5Fpkg */
+
+/* Interface initialization */
+#define H5_INTERFACE_INIT_FUNC H5AC2_init_interface
+
+#ifdef H5_HAVE_PARALLEL
+#include <mpi.h>
+#endif /* H5_HAVE_PARALLEL */
+
+#include "H5private.h" /* Generic Functions */
+#include "H5AC2pkg.h" /* Metadata cache */
+#include "H5C2pkg.h" /* Cache */
+#include "H5Dprivate.h" /* Dataset functions */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5Fpkg.h" /* Files */
+#include "H5FDprivate.h" /* File drivers */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5Iprivate.h" /* IDs */
+#include "H5MMprivate.h" /* Memory management */
+#include "H5Pprivate.h" /* Property lists */
+
+
+#ifdef H5_HAVE_PARALLEL
+
+/* Declare a free list to manage the H5AC2_aux_t struct */
+H5FL_DEFINE_STATIC(H5AC2_aux_t);
+
+#endif /* H5_HAVE_PARALLEL */
+
+/****************************************************************************
+ *
+ * structure H5AC2_slist_entry_t
+ *
+ * The dirty entry list maintained via the d_slist_ptr field of H5AC2_aux_t
+ * and the cleaned entry list maintained via the c_slist_ptr field of
+ * H5AC2_aux_t are just lists of the file offsets of the dirty/cleaned
+ * entries. Unfortunately, the slist code makes us define a dynamically
+ * allocated structure to store these offsets in. This structure serves
+ * that purpose. Its fields are as follows:
+ *
+ * magic: Unsigned 32 bit integer always set to
+ * H5AC2__H5AC2_SLIST_ENTRY_T_MAGIC. This field is used to
+ * validate pointers to instances of H5AC2_slist_entry_t.
+ *
+ * addr: file offset of a metadata entry. Entries are added to this
+ * list (if they aren't there already) when they are marked
+ * dirty in an unprotect, inserted, or renamed. They are
+ * removed when they appear in a clean entries broadcast.
+ *
+ ****************************************************************************/
+
+#ifdef H5_HAVE_PARALLEL
+
+#define H5AC2__H5AC2_SLIST_ENTRY_T_MAGIC 0x00D0A02
+
+typedef struct H5AC2_slist_entry_t
+{
+ uint32_t magic;
+
+ haddr_t addr;
+} H5AC2_slist_entry_t;
+
+/* Declare a free list to manage the H5AC2_slist_entry_t struct */
+H5FL_DEFINE_STATIC(H5AC2_slist_entry_t);
+
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*
+ * Private file-scope variables.
+ */
+
+/* Default dataset transfer property list for metadata I/O calls */
+/* (Collective set, "block before metadata write" set and "library internal" set) */
+/* (Global variable definition, declaration is in H5AC2private.h also) */
+hid_t H5AC2_dxpl_id=(-1);
+
+/* Private dataset transfer property list for metadata I/O calls */
+/* (Collective set and "library internal" set) */
+/* (Static variable definition) */
+static hid_t H5AC2_noblock_dxpl_id=(-1);
+
+/* Dataset transfer property list for independent metadata I/O calls */
+/* (just "library internal" set - i.e. independent transfer mode) */
+/* (Global variable definition, declaration is in H5AC2private.h also) */
+hid_t H5AC2_ind_dxpl_id=(-1);
+
+
+/*
+ * Private file-scope function declarations:
+ */
+
+static herr_t H5AC2_check_if_write_permitted(const H5F_t *f,
+ hid_t dxpl_id,
+ hbool_t * write_permitted_ptr);
+
+#ifdef H5_HAVE_PARALLEL
+static herr_t H5AC2_broadcast_clean_list(H5AC2_t * cache_ptr);
+#endif /* JRM */
+
+static herr_t H5AC2_ext_config_2_int_config(
+ H5AC2_cache_config_t * ext_conf_ptr,
+ H5C2_auto_size_ctl_t * int_conf_ptr);
+
+#ifdef H5_HAVE_PARALLEL
+static herr_t H5AC2_log_deleted_entry(H5AC2_t * cache_ptr,
+ H5AC2_info_t * entry_ptr,
+ haddr_t addr,
+ unsigned int flags);
+
+static herr_t H5AC2_log_dirtied_entry(H5AC2_t * cache_ptr,
+ H5C2_cache_entry_t * entry_ptr,
+ haddr_t addr,
+ hbool_t size_changed,
+ size_t new_size);
+
+static herr_t H5AC2_log_flushed_entry(H5C2_t * cache_ptr,
+ haddr_t addr,
+ hbool_t was_dirty,
+ unsigned flags,
+ int type_id);
+
+#if 0 /* this is useful debugging code -- JRM */
+static herr_t H5AC2_log_flushed_entry_dummy(H5C2_t * cache_ptr,
+ haddr_t addr,
+ hbool_t was_dirty,
+ unsigned flags,
+ int type_id);
+#endif /* JRM */
+
+static herr_t H5AC2_log_inserted_entry(H5F_t * f,
+ H5AC2_t * cache_ptr,
+ H5AC2_info_t * entry_ptr,
+ const H5AC2_class_t * type,
+ haddr_t addr,
+ size_t size);
+
+static herr_t H5AC2_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
+ hid_t dxpl_id,
+ H5AC2_t * cache_ptr,
+ hbool_t do_barrier);
+
+static herr_t H5AC2_receive_and_apply_clean_list(H5F_t * f,
+ hid_t dxpl_id,
+ H5AC2_t * cache_ptr);
+
+static herr_t H5AC2_log_renamed_entry(H5AC2_t * cache_ptr,
+ haddr_t old_addr,
+ haddr_t new_addr);
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_init
+ *
+ * Purpose: Initialize the interface from some other layer.
+ *
+ * Return: Success: non-negative
+ *
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Saturday, January 18, 2003
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC2_init(void)
+{
+ herr_t ret_value=SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5AC2_init, FAIL)
+ /* FUNC_ENTER() does all the work */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_init_interface
+ *
+ * Purpose: Initialize interface-specific information
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, July 18, 2002
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5AC2_init_interface(void)
+{
+#ifdef H5_HAVE_PARALLEL
+ H5P_genclass_t *xfer_pclass; /* Dataset transfer property list class object */
+ H5P_genplist_t *xfer_plist; /* Dataset transfer property list object */
+ unsigned block_before_meta_write; /* "block before meta write" property value */
+ unsigned library_internal=1; /* "library internal" property value */
+ H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode property value */
+ herr_t ret_value=SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5AC2_init_interface)
+
+ /* Sanity check */
+ HDassert(H5P_CLS_DATASET_XFER_g!=(-1));
+
+ /* Get the dataset transfer property list class object */
+ if (NULL == (xfer_pclass = H5I_object(H5P_CLS_DATASET_XFER_g)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get property list class")
+
+ /* Get an ID for the blocking, collective H5AC2 dxpl */
+ if ((H5AC2_dxpl_id=H5P_create_id(xfer_pclass)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "unable to register property list")
+
+ /* Get the property list object */
+ if (NULL == (xfer_plist = H5I_object(H5AC2_dxpl_id)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get new property list object")
+
+ /* Insert 'block before metadata write' property */
+ block_before_meta_write=1;
+ if(H5P_insert(xfer_plist,H5AC2_BLOCK_BEFORE_META_WRITE_NAME,H5AC2_BLOCK_BEFORE_META_WRITE_SIZE,&block_before_meta_write,NULL,NULL,NULL,NULL,NULL,NULL)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't insert metadata cache dxpl property")
+
+ /* Insert 'library internal' property */
+ if(H5P_insert(xfer_plist,H5AC2_LIBRARY_INTERNAL_NAME,H5AC2_LIBRARY_INTERNAL_SIZE,&library_internal,NULL,NULL,NULL,NULL,NULL,NULL)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't insert metadata cache dxpl property")
+
+ /* Set the transfer mode */
+ xfer_mode=H5FD_MPIO_COLLECTIVE;
+ if (H5P_set(xfer_plist,H5D_XFER_IO_XFER_MODE_NAME,&xfer_mode)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to set value")
+
+ /* Get an ID for the non-blocking, collective H5AC2 dxpl */
+ if ((H5AC2_noblock_dxpl_id=H5P_create_id(xfer_pclass)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "unable to register property list")
+
+ /* Get the property list object */
+ if (NULL == (xfer_plist = H5I_object(H5AC2_noblock_dxpl_id)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get new property list object")
+
+ /* Insert 'block before metadata write' property */
+ block_before_meta_write=0;
+ if(H5P_insert(xfer_plist,H5AC2_BLOCK_BEFORE_META_WRITE_NAME,H5AC2_BLOCK_BEFORE_META_WRITE_SIZE,&block_before_meta_write,NULL,NULL,NULL,NULL,NULL,NULL)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't insert metadata cache dxpl property")
+
+ /* Insert 'library internal' property */
+ if(H5P_insert(xfer_plist,H5AC2_LIBRARY_INTERNAL_NAME,H5AC2_LIBRARY_INTERNAL_SIZE,&library_internal,NULL,NULL,NULL,NULL,NULL,NULL)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't insert metadata cache dxpl property")
+
+ /* Set the transfer mode */
+ xfer_mode=H5FD_MPIO_COLLECTIVE;
+ if (H5P_set(xfer_plist,H5D_XFER_IO_XFER_MODE_NAME,&xfer_mode)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to set value")
+
+ /* Get an ID for the non-blocking, independent H5AC2 dxpl */
+ if ((H5AC2_ind_dxpl_id=H5P_create_id(xfer_pclass)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "unable to register property list")
+
+ /* Get the property list object */
+ if (NULL == (xfer_plist = H5I_object(H5AC2_ind_dxpl_id)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get new property list object")
+
+ /* Insert 'block before metadata write' property */
+ block_before_meta_write=0;
+ if(H5P_insert(xfer_plist,H5AC2_BLOCK_BEFORE_META_WRITE_NAME,H5AC2_BLOCK_BEFORE_META_WRITE_SIZE,&block_before_meta_write,NULL,NULL,NULL,NULL,NULL,NULL)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't insert metadata cache dxpl property")
+
+ /* Insert 'library internal' property */
+ if(H5P_insert(xfer_plist,H5AC2_LIBRARY_INTERNAL_NAME,H5AC2_LIBRARY_INTERNAL_SIZE,&library_internal,NULL,NULL,NULL,NULL,NULL,NULL)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't insert metadata cache dxpl property")
+
+ /* Set the transfer mode */
+ xfer_mode=H5FD_MPIO_INDEPENDENT;
+ if (H5P_set(xfer_plist,H5D_XFER_IO_XFER_MODE_NAME,&xfer_mode)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to set value")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+
+#else /* H5_HAVE_PARALLEL */
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5AC2_init_interface)
+
+ /* Sanity check */
+ assert(H5P_LST_DATASET_XFER_g!=(-1));
+
+ H5AC2_dxpl_id=H5P_DATASET_XFER_DEFAULT;
+ H5AC2_noblock_dxpl_id=H5P_DATASET_XFER_DEFAULT;
+ H5AC2_ind_dxpl_id=H5P_DATASET_XFER_DEFAULT;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+#endif /* H5_HAVE_PARALLEL */
+} /* end H5AC2_init_interface() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_term_interface
+ *
+ * Purpose: Terminate this interface.
+ *
+ * Return: Success: Positive if anything was done that might
+ * affect other interfaces; zero otherwise.
+ *
+ * Failure: Negative.
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, July 18, 2002
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+H5AC2_term_interface(void)
+{
+ int n=0;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5AC2_term_interface)
+
+ if (H5_interface_initialize_g) {
+#ifdef H5_HAVE_PARALLEL
+ if(H5AC2_dxpl_id>0 || H5AC2_noblock_dxpl_id>0 || H5AC2_ind_dxpl_id>0) {
+ /* Indicate more work to do */
+ n = 1; /* H5I */
+
+ /* Close H5AC2 dxpl */
+ if (H5I_dec_ref(H5AC2_dxpl_id) < 0 ||
+ H5I_dec_ref(H5AC2_noblock_dxpl_id) < 0 ||
+ H5I_dec_ref(H5AC2_ind_dxpl_id) < 0)
+ H5E_clear_stack(NULL); /*ignore error*/
+ else {
+ /* Reset static IDs */
+ H5AC2_dxpl_id=(-1);
+ H5AC2_noblock_dxpl_id=(-1);
+ H5AC2_ind_dxpl_id=(-1);
+
+ /* Reset interface initialization flag */
+ H5_interface_initialize_g = 0;
+ } /* end else */
+ } /* end if */
+ else
+#else /* H5_HAVE_PARALLEL */
+ /* Reset static IDs */
+ H5AC2_dxpl_id=(-1);
+ H5AC2_noblock_dxpl_id=(-1);
+ H5AC2_ind_dxpl_id=(-1);
+
+#endif /* H5_HAVE_PARALLEL */
+ /* Reset interface initialization flag */
+ H5_interface_initialize_g = 0;
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(n)
+} /* end H5AC2_term_interface() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_create
+ *
+ * Purpose: Initialize the cache just after a file is opened. The
+ * SIZE_HINT is the number of cache slots desired. If you
+ * pass an invalid value then H5AC2_NSLOTS is used. You can
+ * turn off caching by using 1 for the SIZE_HINT value.
+ *
+ * Return: Success: Number of slots actually used.
+ *
+ * Failure: Negative
+ *
+ * Programmer: Robb Matzke
+ * matzke@llnl.gov
+ * Jul 9 1997
+ *
+ * Modifications:
+ *
+ * Complete re-design and re-write to support the re-designed
+ * metadata cache.
+ *
+ * At present, the size_hint is ignored, and the
+ * max_cache_size and min_clean_size fields are hard
+ * coded. This should be fixed, but a parameter
+ * list change will be required, so I will leave it
+ * for now.
+ *
+ * Since no-one seems to care, the function now returns
+ * one on success.
+ * JRM - 4/28/04
+ *
+ * Reworked the function again after abstracting its guts to
+ * the similar function in H5C2.c. The function is now a
+ * wrapper for H5C2_create().
+ * JRM - 6/4/04
+ *
+ * Deleted the old size_hint parameter and added the
+ * max_cache_size, and min_clean_size parameters.
+ *
+ * JRM - 3/10/05
+ *
+ * Deleted the max_cache_size, and min_clean_size parameters,
+ * and added the config_ptr parameter. Added code to
+ * validate the resize configuration before we do anything.
+ *
+ * JRM - 3/24/05
+ *
+ * Changed the type of config_ptr from H5AC2_auto_size_ctl_t *
+ * to H5AC2_cache_config_t *. Propagated associated changes
+ * through the function.
+ * JRM - 4/7/05
+ *
+ * Added code allocating and initializing the auxilary
+ * structure (an instance of H5AC2_aux_t), and linking it
+ * to the instance of H5C2_t created by H5C2_create(). At
+ * present, the auxilary structure is only used in PHDF5.
+ *
+ * JRM - 6/28/05
+ *
+ * Added code to set the prefix if required.
+ *
+ * JRM - 1/20/06
+ *
+ * Added code to initialize the new write_done field.
+ *
+ * JRM - 5/11/06
+ *
+ * Reworked code to conform with changes in the cache
+ * API.
+ * JRM - 10/18/07
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static const char * H5AC2_entry_type_names[H5AC2_NTYPES] =
+{
+ "B-tree nodes",
+ "symbol table nodes",
+ "local heaps",
+ "global heaps",
+ "object headers",
+ "v2 B-tree headers",
+ "v2 B-tree internal nodes",
+ "v2 B-tree leaf nodes",
+ "fractal heap headers",
+ "fractal heap direct blocks",
+ "fractal heap indirect blocks",
+ "free space headers",
+ "free space sections",
+ "shared OH message master table",
+ "shared OH message index",
+ "test entry" /* for testing only -- not used for actual files */
+};
+
+herr_t
+H5AC2_create(const H5F_t *f,
+ H5AC2_cache_config_t *config_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ herr_t result;
+#ifdef H5_HAVE_PARALLEL
+ char prefix[H5C2__PREFIX_LEN] = "";
+ MPI_Comm mpi_comm = MPI_COMM_NULL;
+ int mpi_rank = -1;
+ int mpi_size = -1;
+ H5AC2_aux_t * aux_ptr = NULL;
+#endif /* H5_HAVE_PARALLEL */
+
+ FUNC_ENTER_NOAPI(H5AC2_create, FAIL)
+
+ HDassert ( f );
+ HDassert ( NULL == f->shared->cache2 );
+ HDassert ( config_ptr != NULL ) ;
+ HDassert ( NELMTS(H5AC2_entry_type_names) == H5AC2_NTYPES);
+ HDassert ( H5C2__MAX_NUM_TYPE_IDS == H5AC2_NTYPES);
+
+ result = H5AC2_validate_config(config_ptr);
+
+ if ( result != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Bad cache configuration");
+ }
+
+#ifdef H5_HAVE_PARALLEL
+ if ( IS_H5FD_MPI(f) ) {
+
+ if ( (mpi_comm = H5F_mpi_get_comm(f)) == MPI_COMM_NULL ) {
+
+ HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, \
+ "can't get MPI communicator")
+ }
+
+ if ( (mpi_rank = H5F_mpi_get_rank(f)) < 0 ) {
+
+ HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get mpi rank")
+ }
+
+ if ( (mpi_size = H5F_mpi_get_size(f)) < 0 ) {
+
+ HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get mpi size")
+ }
+
+ /* There is no point in setting up the auxilary structure if size
+ * is less than or equal to 1, as there will never be any processes
+ * to broadcast the clean lists to.
+ */
+ if ( mpi_size > 1 ) {
+
+ if ( NULL == (aux_ptr = H5FL_CALLOC(H5AC2_aux_t)) ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
+ "Can't allocate H5AC2 auxilary structure.")
+
+ } else {
+
+ aux_ptr->magic = H5AC2__H5AC2_AUX_T_MAGIC;
+ aux_ptr->mpi_comm = mpi_comm;
+ aux_ptr->mpi_rank = mpi_rank;
+ aux_ptr->mpi_size = mpi_size;
+ aux_ptr->write_permitted = FALSE;
+ aux_ptr->dirty_bytes_threshold =
+ H5AC2__DEFAULT_DIRTY_BYTES_THRESHOLD;
+ aux_ptr->dirty_bytes = 0;
+#if H5AC2_DEBUG_DIRTY_BYTES_CREATION
+ aux_ptr->dirty_bytes_propagations = 0;
+ aux_ptr->unprotect_dirty_bytes = 0;
+ aux_ptr->unprotect_dirty_bytes_updates = 0;
+ aux_ptr->insert_dirty_bytes = 0;
+ aux_ptr->insert_dirty_bytes_updates = 0;
+ aux_ptr->rename_dirty_bytes = 0;
+ aux_ptr->rename_dirty_bytes_updates = 0;
+#endif /* H5AC2_DEBUG_DIRTY_BYTES_CREATION */
+ aux_ptr->d_slist_ptr = NULL;
+ aux_ptr->d_slist_len = 0;
+ aux_ptr->c_slist_ptr = NULL;
+ aux_ptr->c_slist_len = 0;
+ aux_ptr->write_done = NULL;
+
+ sprintf(prefix, "%d:", mpi_rank);
+ }
+
+ if ( mpi_rank == 0 ) {
+
+ aux_ptr->d_slist_ptr =
+ H5SL_create(H5SL_TYPE_HADDR,0.5,(size_t)16);
+
+ if ( aux_ptr->d_slist_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL,
+ "can't create dirtied entry list.")
+ }
+
+ aux_ptr->c_slist_ptr =
+ H5SL_create(H5SL_TYPE_HADDR,0.5,(size_t)16);
+
+ if ( aux_ptr->c_slist_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL,
+ "can't create cleaned entry list.")
+ }
+ }
+ }
+
+ if ( aux_ptr != NULL ) {
+
+ if ( aux_ptr->mpi_rank == 0 ) {
+
+ f->shared->cache2 = H5C2_create(f,
+ H5AC2__DEFAULT_MAX_CACHE_SIZE,
+ H5AC2__DEFAULT_MIN_CLEAN_SIZE,
+ (H5AC2_NTYPES - 1),
+ (const char **)H5AC2_entry_type_names,
+ H5AC2_check_if_write_permitted,
+ TRUE,
+ H5AC2_log_flushed_entry,
+ (void *)aux_ptr);
+
+ } else {
+
+ f->shared->cache2 = H5C2_create(f,
+ H5AC2__DEFAULT_MAX_CACHE_SIZE,
+ H5AC2__DEFAULT_MIN_CLEAN_SIZE,
+ (H5AC2_NTYPES - 1),
+ (const char **)H5AC2_entry_type_names,
+ NULL,
+ FALSE,
+#if 0 /* this is useful debugging code -- keep it for a while */ /* JRM */
+ H5AC2_log_flushed_entry_dummy,
+#else /* JRM */
+ NULL,
+#endif /* JRM */
+ (void *)aux_ptr);
+ }
+
+ } else {
+
+ f->shared->cache2 = H5C2_create(f,
+ H5AC2__DEFAULT_MAX_CACHE_SIZE,
+ H5AC2__DEFAULT_MIN_CLEAN_SIZE,
+ (H5AC2_NTYPES - 1),
+ (const char **)H5AC2_entry_type_names,
+ H5AC2_check_if_write_permitted,
+ TRUE,
+ NULL,
+ NULL);
+ }
+ } else {
+#endif /* H5_HAVE_PARALLEL */
+ /* The default max cache size and min clean size will frequently be
+ * overwritten shortly by the subsequent set resize config call.
+ * -- JRM
+ */
+
+ f->shared->cache2 = H5C2_create(f,
+ H5AC2__DEFAULT_MAX_CACHE_SIZE,
+ H5AC2__DEFAULT_MIN_CLEAN_SIZE,
+ (H5AC2_NTYPES - 1),
+ (const char **)H5AC2_entry_type_names,
+ H5AC2_check_if_write_permitted,
+ TRUE,
+ NULL,
+ NULL);
+#ifdef H5_HAVE_PARALLEL
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+ if ( NULL == f->shared->cache2 ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
+
+ }
+#ifdef H5_HAVE_PARALLEL
+ else if ( aux_ptr != NULL ) {
+
+ result = H5C2_set_prefix(f->shared->cache2, prefix);
+
+ if ( result != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
+ "H5C2_set_prefix() failed")
+ }
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+ result = H5AC2_set_cache_auto_resize_config(f->shared->cache2, config_ptr);
+
+ if ( result != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
+ "auto resize configuration failed")
+ }
+
+done:
+
+#ifdef H5_HAVE_PARALLEL
+
+ /* if there is a failure, try to tidy up the auxilary structure */
+
+ if ( ret_value != SUCCEED ) {
+
+ if ( aux_ptr != NULL ) {
+
+ if ( aux_ptr->d_slist_ptr != NULL ) {
+
+ H5SL_close(aux_ptr->d_slist_ptr);
+ }
+
+ if ( aux_ptr->c_slist_ptr != NULL ) {
+
+ H5SL_close(aux_ptr->c_slist_ptr);
+ }
+
+ aux_ptr->magic = 0;
+ H5FL_FREE(H5AC2_aux_t, aux_ptr);
+ aux_ptr = NULL;
+ }
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_create() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_dest
+ *
+ * Purpose: Flushes all data to disk and destroys the cache.
+ * This function fails if any object are protected since the
+ * resulting file might not be consistent.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Robb Matzke
+ * matzke@llnl.gov
+ * Jul 9 1997
+ *
+ * Modifications:
+ *
+ * Complete re-design and re-write to support the re-designed
+ * metadata cache.
+ * JRM - 5/12/04
+ *
+ * Abstracted the guts of the function to H5C_dest() in H5C.c,
+ * and then re-wrote the function as a wrapper for H5C_dest().
+ *
+ * JRM - 6/7/04
+ *
+ * Added code to free the auxiliary structure and its
+ * associated slist if present.
+ * JRM - 6/28/05
+ *
+ * Added code to close the trace file if it is present.
+ *
+ * JRM - 6/8/06
+ *
+ * JRM - 10/18/07
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC2_dest(H5F_t *f, hid_t dxpl_id)
+{
+ H5AC2_t *cache = NULL;
+ herr_t ret_value=SUCCEED; /* Return value */
+#ifdef H5_HAVE_PARALLEL
+ H5AC2_aux_t * aux_ptr = NULL;
+#endif /* H5_HAVE_PARALLEL */
+
+ FUNC_ENTER_NOAPI(H5AC2_dest, FAIL)
+
+ assert(f);
+ assert(f->shared->cache2);
+ cache = f->shared->cache2;
+#ifdef H5_HAVE_PARALLEL
+ aux_ptr = cache->aux_ptr;
+
+ if ( aux_ptr != NULL ) {
+
+ HDassert ( aux_ptr->magic == H5AC2__H5AC2_AUX_T_MAGIC );
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+#if H5AC2__TRACE_FILE_ENABLED
+ if ( H5AC2_close_trace_file(cache) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5AC2_close_trace_file() failed.")
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ if ( H5C2_dest(cache, dxpl_id) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "can't destroy cache")
+ }
+
+ f->shared->cache2 = NULL;
+
+#ifdef H5_HAVE_PARALLEL
+ if ( aux_ptr != NULL ) {
+
+ if ( aux_ptr->d_slist_ptr != NULL ) {
+
+ H5SL_close(aux_ptr->d_slist_ptr);
+ }
+
+ if ( aux_ptr->c_slist_ptr != NULL ) {
+
+ H5SL_close(aux_ptr->c_slist_ptr);
+ }
+
+ aux_ptr->magic = 0;
+ H5FL_FREE(H5AC2_aux_t, aux_ptr);
+ aux_ptr = NULL;
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_dest() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_expunge_entry
+ *
+ * Purpose: Expunge the target entry from the cache without writing it
+ * to disk even if it is dirty. The entry must not be either
+ * pinned or protected.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/30/06
+ *
+ * Modifications:
+ *
+ * JRM - 10/18/07
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC2_expunge_entry(H5F_t *f,
+ hid_t dxpl_id,
+ const H5AC2_class_t *type,
+ haddr_t addr)
+{
+ herr_t result;
+ herr_t ret_value=SUCCEED; /* Return value */
+ H5AC2_t * cache_ptr = NULL;
+#if H5AC2__TRACE_FILE_ENABLED
+ char trace[128] = "";
+ FILE * trace_file_ptr = NULL;
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_ENTER_NOAPI(H5AC2_expunge_entry, FAIL)
+
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache2);
+ HDassert(type);
+ HDassert(H5F_addr_defined(addr));
+
+ cache_ptr = f->shared->cache2;
+
+#if H5AC2__TRACE_FILE_ENABLED
+ /* For the expunge entry call, only the addr, and type id are really
+ * necessary in the trace file. Write the return value to catch occult
+ * errors.
+ */
+ if ( ( cache_ptr != NULL ) &&
+ ( H5C2_get_trace_file_ptr(cache_ptr, &trace_file_ptr) >= 0 ) &&
+ ( trace_file_ptr != NULL ) ) {
+
+ sprintf(trace, "H5AC2_expunge_entry 0x%lx %d",
+ (unsigned long)addr,
+ (int)(type->id));
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ result = H5C2_expunge_entry(cache_ptr,
+ dxpl_id,
+ type,
+ addr);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, \
+ "H5C2_expunge_entry() failed.")
+ }
+
+done:
+
+#if H5AC2__TRACE_FILE_ENABLED
+ if ( trace_file_ptr != NULL ) {
+
+ HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_expunge_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_flush
+ *
+ * Purpose: Flush (and possibly destroy) the metadata cache associated
+ * with the specified file.
+ *
+ * This is a re-write of an earlier version of the function
+ * which was reputedly capable of flushing (and destroying
+ * if requested) individual entries, individual entries if
+ * they match the supplied type, all entries of a given type,
+ * as well as all entries in the cache.
+ *
+ * As only this last capability is actually used at present,
+ * I have not implemented the other capabilities in this
+ * version of the function.
+ *
+ * The type and addr parameters are retained to avoid source
+ * code changed, but values other than NULL and HADDR_UNDEF
+ * respectively are errors. If all goes well, they should
+ * be removed, and the function renamed to something more
+ * descriptive -- perhaps H5AC2_flush_cache.
+ *
+ * If the cache contains protected entries, the function will
+ * fail, as protected entries cannot be flushed. However
+ * all unprotected entries should be flushed before the
+ * function returns failure.
+ *
+ * For historical purposes, the original version of the
+ * purpose section is reproduced below:
+ *
+ * ============ Original Version of "Purpose:" ============
+ *
+ * Flushes (and destroys if DESTROY is non-zero) the specified
+ * entry from the cache. If the entry TYPE is CACHE_FREE and
+ * ADDR is HADDR_UNDEF then all types of entries are
+ * flushed. If TYPE is CACHE_FREE and ADDR is defined then
+ * whatever is cached at ADDR is flushed. Otherwise the thing
+ * at ADDR is flushed if it is the correct type.
+ *
+ * If there are protected objects they will not be flushed.
+ * However, an attempt will be made to flush all non-protected
+ * items before this function returns failure.
+ *
+ * Return: Non-negative on success/Negative on failure if there was a
+ * request to flush all items and something was protected.
+ *
+ * Programmer: Robb Matzke
+ * matzke@llnl.gov
+ * Jul 9 1997
+ *
+ * Modifications:
+ * Robb Matzke, 1999-07-27
+ * The ADDR argument is passed by value.
+ *
+ * Complete re-write. See above for details. -- JRM 5/11/04
+ *
+ * Abstracted the guts of the function to H5C_flush_cache()
+ * in H5C.c, and then re-wrote the function as a wrapper for
+ * H5C_flush_cache().
+ *
+ * JRM - 6/7/04
+ *
+ * JRM - 7/5/05
+ * Modified function as part of a fix for a cache coherency
+ * bug in PHDF5. See the header comments on the H5AC2_aux_t
+ * structure for details.
+ *
+ * JRM -- 5/11/06
+ * Added call to the write_done callback.
+ *
+ * JRM -- 6/6/06
+ * Added trace file support.
+ *
+ * JRM - 10/18/07
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC2_flush(H5F_t *f, hid_t dxpl_id, unsigned flags)
+{
+ herr_t status;
+ herr_t ret_value = SUCCEED; /* Return value */
+#ifdef H5_HAVE_PARALLEL
+ H5AC2_aux_t * aux_ptr = NULL;
+ int mpi_code;
+#endif /* H5_HAVE_PARALLEL */
+#if H5AC2__TRACE_FILE_ENABLED
+ char trace[128] = "";
+ FILE * trace_file_ptr = NULL;
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+
+ FUNC_ENTER_NOAPI(H5AC2_flush, FAIL)
+
+ HDassert(f);
+ HDassert(f->shared->cache2);
+
+#if H5AC2__TRACE_FILE_ENABLED
+ /* For the flush, only the flags are really necessary in the trace file.
+ * Write the result to catch occult errors.
+ */
+ if ( ( f != NULL ) &&
+ ( f->shared != NULL ) &&
+ ( f->shared->cache2 != NULL ) &&
+ ( H5C2_get_trace_file_ptr(f->shared->cache2, &trace_file_ptr) >= 0) &&
+ ( trace_file_ptr != NULL ) ) {
+
+ sprintf(trace, "H5AC2_flush 0x%x", flags);
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+#ifdef H5_HAVE_PARALLEL
+ aux_ptr = f->shared->cache2->aux_ptr;
+
+ if ( aux_ptr != NULL ) {
+
+#if H5AC2_DEBUG_DIRTY_BYTES_CREATION
+ HDfprintf(stdout,
+ "%d::H5AC2_flush: (u/uu/i/iu/r/ru) = %d/%d/%d/%d/%d/%d\n",
+ (int)(aux_ptr->mpi_rank),
+ (int)(aux_ptr->unprotect_dirty_bytes),
+ (int)(aux_ptr->unprotect_dirty_bytes_updates),
+ (int)(aux_ptr->insert_dirty_bytes),
+ (int)(aux_ptr->insert_dirty_bytes_updates),
+ (int)(aux_ptr->rename_dirty_bytes),
+ (int)(aux_ptr->rename_dirty_bytes_updates));
+#endif /* H5AC2_DEBUG_DIRTY_BYTES_CREATION */
+
+ /* to prevent "messages from the future" we must synchronize all
+ * processes before we start the flush. Hence the following
+ * barrier.
+ */
+ if ( MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)) ) {
+
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_code)
+ }
+
+ /* if the clear only flag is set, this flush will not involve any
+ * disk I/O. In such cases, it is not necessary to let process 0
+ * flush first.
+ */
+ if ( ( aux_ptr->mpi_rank == 0 ) &&
+ ( (flags & H5AC2__FLUSH_CLEAR_ONLY_FLAG) == 0 ) ) {
+
+ unsigned init_flush_flags = H5AC2__NO_FLAGS_SET;
+
+ if ( ( (flags & H5AC2__FLUSH_MARKED_ENTRIES_FLAG) != 0 ) &&
+ ( (flags & H5AC2__FLUSH_INVALIDATE_FLAG) == 0 ) ) {
+
+ init_flush_flags |= H5AC2__FLUSH_MARKED_ENTRIES_FLAG;
+ }
+
+ aux_ptr->write_permitted = TRUE;
+
+ status = H5C2_flush_cache(f->shared->cache2,
+ dxpl_id,
+ init_flush_flags);
+
+ aux_ptr->write_permitted = FALSE;
+
+ if ( status < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.")
+ }
+
+ if ( aux_ptr->write_done != NULL ) {
+
+ (aux_ptr->write_done)();
+ }
+
+ } /* end if ( aux_ptr->mpi_rank == 0 ) */
+
+ status = H5AC2_propagate_flushed_and_still_clean_entries_list(f,
+ H5AC2_noblock_dxpl_id,
+ f->shared->cache2,
+ FALSE);
+ } /* end if ( aux_ptr != NULL ) */
+#endif /* H5_HAVE_PARALLEL */
+
+ status = H5C2_flush_cache(f->shared->cache2,
+ dxpl_id,
+ flags);
+
+ if ( status < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
+ }
+
+done:
+
+#if H5AC2__TRACE_FILE_ENABLED
+ if ( trace_file_ptr != NULL ) {
+
+ HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_flush() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_get_entry_status
+ *
+ * Purpose: Given a file address, determine whether the metadata
+ * cache contains an entry at that location. If it does,
+ * also determine whether the entry is dirty, protected,
+ * pinned, etc. and return that information to the caller
+ * in *status_ptr.
+ *
+ * If the specified entry doesn't exist, set *status_ptr
+ * to zero.
+ *
+ * On error, the value of *status_ptr is undefined.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 4/27/06
+ *
+ * Modifications:
+ *
+ * JRM - 10/18/07
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5AC2_get_entry_status(H5F_t * f,
+ haddr_t addr,
+ unsigned * status_ptr)
+{
+ H5C2_t *cache_ptr = f->shared->cache2;
+ herr_t ret_value = SUCCEED; /* Return value */
+ herr_t result;
+ hbool_t in_cache;
+ hbool_t is_dirty;
+ hbool_t is_protected;
+ hbool_t is_pinned;
+ size_t entry_size;
+ unsigned status = 0;
+
+ FUNC_ENTER_NOAPI(H5AC2_get_entry_status, FAIL)
+
+ if ( ( cache_ptr == NULL ) ||
+ ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ||
+ ( ! H5F_addr_defined(addr) ) ||
+ ( status_ptr == NULL ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad param(s) on entry.")
+ }
+
+ result = H5C2_get_entry_status(cache_ptr, addr, &entry_size, &in_cache,
+ &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C2_get_entry_status() failed.")
+ }
+
+ if ( in_cache ) {
+
+ status |= H5AC2_ES__IN_CACHE;
+
+ if ( is_dirty )
+ status |= H5AC2_ES__IS_DIRTY;
+
+ if ( is_protected )
+ status |= H5AC2_ES__IS_PROTECTED;
+
+ if ( is_pinned )
+ status |= H5AC2_ES__IS_PINNED;
+ }
+
+ *status_ptr = status;
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_get_entry_status() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_set
+ *
+ * Purpose: Adds the specified thing to the cache. The thing need not
+ * exist on disk yet, but it must have an address and disk
+ * space reserved.
+ *
+ * If H5AC2_DEBUG is defined then this function checks
+ * that the object being inserted isn't a protected object.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Robb Matzke
+ * matzke@llnl.gov
+ * Jul 9 1997
+ *
+ * Modifications:
+ * Robb Matzke, 1999-07-27
+ * The ADDR argument is passed by value.
+ *
+ * Bill Wendling, 2003-09-16
+ * Added automatic "flush" if the FPHDF5 driver is being
+ * used. This'll write the metadata to the SAP where other,
+ * lesser processes can grab it.
+ *
+ * JRM - 5/13/04
+ * Complete re-write for the new metadata cache. The new
+ * code is functionally almost identical to the old, although
+ * the sanity check for a protected entry is now an assert
+ * at the beginning of the function.
+ *
+ * JRM - 6/7/04
+ * Abstracted the guts of the function to H5C_insert_entry()
+ * in H5C.c, and then re-wrote the function as a wrapper for
+ * H5C_insert_entry().
+ *
+ * JRM - 1/6/05
+ * Added the flags parameter. At present, this parameter is
+ * only used to set the new flush_marker field on the new
+ * entry. Since this doesn't apply to the SAP code, no change
+ * is needed there. Thus the only change to the body of the
+ * code is to pass the flags parameter through to
+ * H5C_insert_entry().
+ *
+ * JRM - 6/6/05
+ * Added code to force newly inserted entries to be dirty
+ * in the flexible parallel case. The normal case is handled
+ * in H5C.c. This is part of a series of changes directed at
+ * moving management of the dirty flag on cache entries into
+ * the cache code.
+ *
+ * JRM - 7/5/05
+ * Added code to track dirty byte generation, and to trigger
+ * clean entry list propagation when it exceeds a user
+ * specified threshold. Note that this code only applies in
+ * the PHDF5 case. It should have no effect on either the
+ * serial or FPHSD5 cases.
+ *
+ * JRM - 6/6/06
+ * Added trace file support.
+ *
+ * JRM - 10/18/07
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5AC2_set(H5F_t *f, hid_t dxpl_id, const H5AC2_class_t *type, haddr_t addr, size_t len, void *thing, unsigned int flags)
+{
+ herr_t result;
+ H5AC2_info_t *info;
+ H5AC2_t *cache;
+ herr_t ret_value=SUCCEED; /* Return value */
+#ifdef H5_HAVE_PARALLEL
+ H5AC2_aux_t * aux_ptr = NULL;
+#endif /* H5_HAVE_PARALLEL */
+#if H5AC2__TRACE_FILE_ENABLED
+ char trace[128] = "";
+ size_t trace_entry_size = 0;
+ FILE * trace_file_ptr = NULL;
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_ENTER_NOAPI(H5AC2_set, FAIL)
+
+ HDassert(f);
+ HDassert(f->shared->cache2);
+ HDassert(type);
+ HDassert(type->serialize);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(thing);
+
+#if H5AC2__TRACE_FILE_ENABLED
+ /* For the insert, only the addr, size, type id and flags are really
+ * necessary in the trace file. Write the result to catch occult
+ * errors.
+ *
+ * Note that some data is not available right now -- put what we can
+ * in the trace buffer now, and fill in the rest at the end.
+ */
+ if ( ( f != NULL ) &&
+ ( f->shared != NULL ) &&
+ ( f->shared->cache2 != NULL ) &&
+ ( H5C2_get_trace_file_ptr(f->shared->cache2, &trace_file_ptr) >= 0) &&
+ ( trace_file_ptr != NULL ) ) {
+
+ sprintf(trace, "H5AC2_set 0x%lx %ld %d 0x%x",
+ (unsigned long)addr,
+ (long)len,
+ type->id,
+ flags);
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ /* Get local copy of this information */
+ cache = f->shared->cache2;
+ info = (H5AC2_info_t *)thing;
+
+ info->addr = addr;
+ info->type = type;
+ info->is_protected = FALSE;
+
+#ifdef H5_HAVE_PARALLEL
+ if ( NULL != (aux_ptr = f->shared->cache2->aux_ptr) ) {
+
+ result = H5AC2_log_inserted_entry(f,
+ f->shared->cache2,
+ (H5AC2_info_t *)thing,
+ type,
+ addr,
+ len);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
+ "H5AC2_log_inserted_entry() failed.")
+ }
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+ result = H5C2_insert_entry(cache,
+ dxpl_id,
+ type,
+ addr,
+ len,
+ thing,
+ flags);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C2_insert_entry() failed")
+ }
+
+#if H5AC2__TRACE_FILE_ENABLED
+ if ( trace_file_ptr != NULL ) {
+
+ /* make note of the entry size */
+ trace_entry_size = ((H5C2_cache_entry_t *)thing)->size;
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+#ifdef H5_HAVE_PARALLEL
+ if ( ( aux_ptr != NULL ) &&
+ ( aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold ) ) {
+
+ result = H5AC2_propagate_flushed_and_still_clean_entries_list(f,
+ H5AC2_noblock_dxpl_id,
+ f->shared->cache2,
+ TRUE);
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "Can't propagate clean entries list.")
+ }
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+done:
+
+#if H5AC2__TRACE_FILE_ENABLED
+ if ( trace_file_ptr != NULL ) {
+
+ HDfprintf(trace_file_ptr, "%s %d %d\n", trace,
+ (int)trace_entry_size,
+ (int)ret_value);
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_set() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_mark_pinned_entry_dirty
+ *
+ * Purpose: Mark a pinned entry as dirty. The target entry MUST be
+ * be pinned, and MUST be unprotected.
+ *
+ * If the entry has changed size, the function updates
+ * data structures for the size change.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 4/11/06
+ *
+ * Modifications:
+ *
+ * Added trace file support. JRM -- 6/6/06
+ *
+ * Modified code in support of revised cache API needed
+ * to permit journaling. JRM -- 10/18/07
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC2_mark_pinned_entry_dirty(H5F_t * f,
+ void * thing,
+ hbool_t size_changed,
+ size_t new_size)
+{
+ H5C2_t *cache_ptr = f->shared->cache2;
+ herr_t result;
+ herr_t ret_value = SUCCEED; /* Return value */
+#if H5AC2__TRACE_FILE_ENABLED
+ char trace[128] = "";
+ FILE * trace_file_ptr = NULL;
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_ENTER_NOAPI(H5AC2_mark_pinned_entry_dirty, FAIL)
+
+#if H5AC2__TRACE_FILE_ENABLED
+ /* For the mark pinned entry dirty call, only the addr, size_changed,
+ * and new_size are really necessary in the trace file. Write the result
+ * to catch occult errors.
+ */
+ if ( ( f != NULL ) &&
+ ( f->shared != NULL ) &&
+ ( f->shared->cache2 != NULL ) &&
+ ( H5C2_get_trace_file_ptr(f->shared->cache2, &trace_file_ptr) >= 0) &&
+ ( trace_file_ptr != NULL ) ) {
+
+ sprintf(trace, "H5AC2_mark_pinned_entry_dirty 0x%lx %d %d",
+ (unsigned long)(((H5C2_cache_entry_t *)thing)->addr),
+ (int)size_changed,
+ (int)new_size);
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+#ifdef H5_HAVE_PARALLEL
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( thing );
+
+ if ( ( ((H5AC2_info_t *)thing)->is_dirty == FALSE ) &&
+ ( NULL != cache_ptr->aux_ptr) ) {
+
+ H5AC2_info_t * entry_ptr;
+
+ HDassert( ( size_changed == TRUE ) || ( size_changed == FALSE ) );
+
+ entry_ptr = (H5AC2_info_t *)thing;
+
+ if ( ! ( entry_ptr->is_pinned ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
+ "Entry isn't pinned??")
+ }
+
+ if ( entry_ptr->is_protected ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
+ "Entry is protected??")
+ }
+
+ result = H5AC2_log_dirtied_entry(cache_ptr,
+ entry_ptr,
+ entry_ptr->addr,
+ size_changed,
+ new_size);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
+ "H5AC2_log_dirtied_entry() failed.")
+ }
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+ result = H5C2_mark_pinned_entry_dirty(cache_ptr,
+ thing,
+ size_changed,
+ new_size);
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
+ "H5C2_mark_pinned_entry_dirty() failed.")
+
+ }
+
+done:
+
+#if H5AC2__TRACE_FILE_ENABLED
+ if ( trace_file_ptr != NULL ) {
+
+ HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_mark_pinned_entry_dirty() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_mark_pinned_or_protected_entry_dirty
+ *
+ * Purpose: Mark a pinned or protected entry as dirty. The target
+ * entry MUST be either pinned, protected, or both.
+ *
+ * Unlike H5AC2_mark_pinned_entry_dirty(), this function does
+ * not support size changes.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 5/16/06
+ *
+ * Modifications:
+ *
+ * Added trace file support. JRM -- 6/6/06
+ *
+ * Modified code in support of revised cache API needed
+ * to permit journaling. JRM -- 10/18/07
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC2_mark_pinned_or_protected_entry_dirty(H5F_t * f,
+ void * thing)
+{
+ H5C2_t * cache_ptr = f->shared->cache2;
+#ifdef H5_HAVE_PARALLEL
+ H5AC2_info_t * info_ptr;
+#endif /* H5_HAVE_PARALLEL */
+ herr_t result;
+ herr_t ret_value = SUCCEED; /* Return value */
+#if H5AC2__TRACE_FILE_ENABLED
+ char trace[128] = "";
+ FILE * trace_file_ptr = NULL;
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_ENTER_NOAPI(H5AC2_mark_pinned_or_protected_entry_dirty, FAIL)
+
+#if H5AC2__TRACE_FILE_ENABLED
+ /* For the mark pinned or protected entry dirty call, only the addr
+ * is really necessary in the trace file. Write the result to catch
+ * occult errors.
+ */
+ if ( ( f != NULL ) &&
+ ( f->shared != NULL ) &&
+ ( f->shared->cache2 != NULL ) &&
+ ( H5C2_get_trace_file_ptr(f->shared->cache2, &trace_file_ptr) >= 0) &&
+ ( trace_file_ptr != NULL ) ) {
+
+ sprintf(trace, "H5AC2_mark_pinned_or_protected_entry_dirty 0x%lx",
+ (unsigned long)(((H5C2_cache_entry_t *)thing)->addr));
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+#ifdef H5_HAVE_PARALLEL
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( thing );
+
+ info_ptr = (H5AC2_info_t *)thing;
+
+ if ( ( info_ptr->is_dirty == FALSE ) &&
+ ( ! ( info_ptr->is_protected ) ) &&
+ ( info_ptr->is_pinned ) &&
+ ( NULL != cache_ptr->aux_ptr) ) {
+
+ result = H5AC2_log_dirtied_entry(cache_ptr,
+ info_ptr,
+ info_ptr->addr,
+ FALSE,
+ 0);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
+ "H5AC2_log_dirtied_entry() failed.")
+ }
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+ result = H5C2_mark_pinned_or_protected_entry_dirty(cache_ptr, thing);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
+ "H5C2_mark_pinned_entry_dirty() failed.")
+
+ }
+
+done:
+
+#if H5AC2__TRACE_FILE_ENABLED
+ if ( trace_file_ptr != NULL ) {
+
+ HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_mark_pinned_entry_dirty() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_rename
+ *
+ * Purpose: Use this function to notify the cache that an object's
+ * file address changed.
+ *
+ * If H5AC2_DEBUG is defined then this function checks
+ * that the old and new addresses don't correspond to the
+ * address of a protected object.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Robb Matzke
+ * matzke@llnl.gov
+ * Jul 9 1997
+ *
+ * Modifications:
+ * Robb Matzke, 1999-07-27
+ * The OLD_ADDR and NEW_ADDR arguments are passed by value.
+ *
+ * JRM 5/17/04
+ * Complete rewrite for the new meta-data cache.
+ *
+ * JRM - 6/7/04
+ * Abstracted the guts of the function to H5C_rename_entry()
+ * in H5C.c, and then re-wrote the function as a wrapper for
+ * H5C_rename_entry().
+ *
+ * JRM - 7/5/05
+ * Added code to track dirty byte generation, and to trigger
+ * clean entry list propagation when it exceeds a user
+ * specified threshold. Note that this code only applies in
+ * the PHDF5 case. It should have no effect on either the
+ * serial or FPHSD5 cases.
+ *
+ * Note that this code presumes that the renamed entry will
+ * be present in all caches -- which it must be at present.
+ * To maintain this invarient, only rename entries immediately
+ * after you unprotect them.
+ *
+ * JRM - 6/6/06
+ * Added trace file support.
+ *
+ * JRM - 10/18/07
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC2_rename(H5F_t *f, const H5AC2_class_t *type, haddr_t old_addr, haddr_t new_addr)
+{
+ herr_t result;
+ herr_t ret_value=SUCCEED; /* Return value */
+#ifdef H5_HAVE_PARALLEL
+ H5AC2_aux_t * aux_ptr = NULL;
+#endif /* H5_HAVE_PARALLEL */
+#if H5AC2__TRACE_FILE_ENABLED
+ char trace[128] = "";
+ FILE * trace_file_ptr = NULL;
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_ENTER_NOAPI(H5AC2_rename, FAIL)
+
+ HDassert(f);
+ HDassert(f->shared->cache2);
+ HDassert(type);
+ HDassert(H5F_addr_defined(old_addr));
+ HDassert(H5F_addr_defined(new_addr));
+ HDassert(H5F_addr_ne(old_addr, new_addr));
+
+#if H5AC2__TRACE_FILE_ENABLED
+ /* For the rename call, only the old addr and new addr are really
+ * necessary in the trace file. Include the type id so we don't have to
+ * look it up. Also write the result to catch occult errors.
+ */
+ if ( ( f != NULL ) &&
+ ( f->shared != NULL ) &&
+ ( f->shared->cache2 != NULL ) &&
+ ( H5C2_get_trace_file_ptr(f->shared->cache2, &trace_file_ptr) >= 0) &&
+ ( trace_file_ptr != NULL ) ) {
+
+ sprintf(trace, "H5AC2_rename 0x%lx 0x%lx %d",
+ (unsigned long)old_addr,
+ (unsigned long)new_addr,
+ (int)(type->id));
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+#ifdef H5_HAVE_PARALLEL
+ if ( NULL != (aux_ptr = f->shared->cache2->aux_ptr) ) {
+
+ result = H5AC2_log_renamed_entry(f->shared->cache2,
+ old_addr,
+ new_addr);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "H5AC2_log_renamed_entry() failed.")
+ }
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+ result = H5C2_rename_entry(f->shared->cache2,
+ type,
+ old_addr,
+ new_addr);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRENAME, FAIL, \
+ "H5C2_rename_entry() failed.")
+ }
+
+#ifdef H5_HAVE_PARALLEL
+ if ( ( aux_ptr != NULL ) &&
+ ( aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold ) ) {
+
+ result = H5AC2_propagate_flushed_and_still_clean_entries_list(f,
+ H5AC2_noblock_dxpl_id,
+ f->shared->cache2,
+ TRUE);
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "Can't propagate clean entries list.")
+ }
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+done:
+
+#if H5AC2__TRACE_FILE_ENABLED
+ if ( trace_file_ptr != NULL ) {
+
+ HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_rename() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_pin_protected_entry()
+ *
+ * Purpose: Pin a protected cache entry. The entry must be protected
+ * at the time of call, and must be unpinned.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 4/27/06
+ *
+ * Modifications:
+ *
+ * Added trace file support. 6/6/06
+ *
+ * Modified code in support of revised cache API needed
+ * to permit journaling. JRM - 10/18/07
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC2_pin_protected_entry(H5F_t * f,
+ void * thing)
+{
+ H5C2_t *cache_ptr = f->shared->cache2;
+ herr_t result;
+ herr_t ret_value = SUCCEED; /* Return value */
+#if H5AC2__TRACE_FILE_ENABLED
+ char trace[128] = "";
+ FILE * trace_file_ptr = NULL;
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_ENTER_NOAPI(H5AC2_pin_protected_entry, FAIL)
+
+#if H5AC2__TRACE_FILE_ENABLED
+ /* For the pin protected entry call, only the addr is really necessary
+ * in the trace file. Also write the result to catch occult errors.
+ */
+ if ( ( f != NULL ) &&
+ ( f->shared != NULL ) &&
+ ( f->shared->cache2 != NULL ) &&
+ ( H5C2_get_trace_file_ptr(f->shared->cache2, &trace_file_ptr) >= 0) &&
+ ( trace_file_ptr != NULL ) ) {
+
+ sprintf(trace, "H5AC2_pin_protected_entry 0x%lx",
+ (unsigned long)(((H5C2_cache_entry_t *)thing)->addr));
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ result = H5C2_pin_protected_entry(cache_ptr, thing);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, \
+ "H5C2_pin_protected_entry() failed.")
+ }
+
+done:
+
+#if H5AC2__TRACE_FILE_ENABLED
+ if ( trace_file_ptr != NULL ) {
+
+ HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_pin_protected_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_protect
+ *
+ * Purpose: If the target entry is not in the cache, load it. If
+ * necessary, attempt to evict one or more entries to keep
+ * the cache within its maximum size.
+ *
+ * Mark the target entry as protected, and return its address
+ * to the caller. The caller must call H5AC2_unprotect() when
+ * finished with the entry.
+ *
+ * While it is protected, the entry may not be either evicted
+ * or flushed -- nor may it be accessed by another call to
+ * H5AC2_protect. Any attempt to do so will result in a failure.
+ *
+ * This comment is a re-write of the original Purpose: section.
+ * For historical interest, the original version is reproduced
+ * below:
+ *
+ * Original Purpose section:
+ *
+ * Similar to H5AC2_find() except the object is removed from
+ * the cache and given to the caller, preventing other parts
+ * of the program from modifying the protected object or
+ * preempting it from the cache.
+ *
+ * The caller must call H5AC2_unprotect() when finished with
+ * the pointer.
+ *
+ * If H5AC2_DEBUG is defined then we check that the
+ * requested object isn't already protected.
+ *
+ * Return: Success: Ptr to the object.
+ *
+ * Failure: NULL
+ *
+ * Programmer: Robb Matzke
+ * matzke@llnl.gov
+ * Sep 2 1997
+ *
+ * Modifications:
+ * Robb Matzke, 1999-07-27
+ * The ADDR argument is passed by value.
+ *
+ * Bill Wendling, 2003-09-10
+ * Added parameter to indicate whether this is a READ or
+ * WRITE type of protect.
+ *
+ * JRM -- 5/17/04
+ * Complete re-write for the new client cache. See revised
+ * Purpose section above.
+ *
+ * JRM - 6/7/04
+ * Abstracted the guts of the function to H5C2_protect()
+ * in H5C2.c, and then re-wrote the function as a wrapper for
+ * H5C2_protect().
+ *
+ * JRM - 6/6/06
+ * Added trace file support.
+ *
+ * JRM - 3/18/07
+ * Modified code to support the new flags parameter for
+ * H5C2_protect(). For now, that means passing in the
+ * H5C2_READ_ONLY_FLAG if rw == H5AC2_READ.
+ *
+ * Also updated the trace file output to save the
+ * rw parameter, since we are now doing something with it.
+ *
+ * JRM - 10/18/07
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+void *
+H5AC2_protect(H5F_t *f,
+ hid_t dxpl_id,
+ const H5AC2_class_t *type,
+ haddr_t addr,
+ size_t len,
+ const void *udata,
+ H5AC2_protect_t rw)
+{
+ /* char * fcn_name = "H5AC2_protect"; */
+ unsigned protect_flags = H5C2__NO_FLAGS_SET;
+ void * thing = (void *)NULL;
+ void * ret_value; /* Return value */
+#if H5AC2__TRACE_FILE_ENABLED
+ char trace[128] = "";
+ size_t trace_entry_size = 0;
+ FILE * trace_file_ptr = NULL;
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_ENTER_NOAPI(H5AC2_protect, NULL)
+
+ /* check args */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache2);
+ HDassert(type);
+ HDassert(type->serialize);
+ HDassert(H5F_addr_defined(addr));
+
+ /* Check for invalid access request */
+ if(0 == (f->intent & H5F_ACC_RDWR) && rw == H5AC2_WRITE)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "no write intent on file")
+
+#if H5AC2__TRACE_FILE_ENABLED
+ /* For the protect call, only the addr and type id is really necessary
+ * in the trace file. Include the size of the entry protected as a
+ * sanity check. Also indicate whether the call was successful to
+ * catch occult errors.
+ */
+ if ( ( f != NULL ) &&
+ ( f->shared != NULL ) &&
+ ( f->shared->cache2 != NULL ) &&
+ ( H5C2_get_trace_file_ptr(f->shared->cache2, &trace_file_ptr) >= 0) &&
+ ( trace_file_ptr != NULL ) ) {
+
+ char * rw_string;
+
+ if ( rw == H5AC2_WRITE ) {
+
+ rw_string = "H5AC2_WRITE";
+
+ } else if ( rw == H5AC2_READ ) {
+
+ rw_string = "H5AC2_READ";
+
+ } else {
+
+ rw_string = "???";
+ }
+
+ sprintf(trace, "H5AC2_protect 0x%lx %ld %d %s",
+ (unsigned long)addr,
+ (long)len,
+ (int)(type->id),
+ rw_string);
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ if ( rw == H5AC2_READ ) {
+
+ protect_flags |= H5C2__READ_ONLY_FLAG;
+ }
+
+ thing = H5C2_protect(f->shared->cache2,
+ dxpl_id,
+ type,
+ addr,
+ len,
+ udata,
+ protect_flags);
+
+ if ( thing == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C2_protect() failed.")
+ }
+
+#if H5AC2__TRACE_FILE_ENABLED
+ if ( trace_file_ptr != NULL ) {
+
+ /* make note of the entry size */
+ trace_entry_size = ((H5C2_cache_entry_t *)thing)->size;
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ /* Set return value */
+ ret_value = thing;
+
+done:
+
+#if H5AC2__TRACE_FILE_ENABLED
+ if ( trace_file_ptr != NULL ) {
+
+ HDfprintf(trace_file_ptr, "%s %d %d\n", trace,
+ (int)trace_entry_size,
+ (int)(ret_value != NULL));
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_protect() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_resize_pinned_entry
+ *
+ * Purpose: Resize a pinned entry. The target entry MUST be
+ * be pinned, and MUST not be unprotected.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 7/5/06
+ *
+ * Modifications:
+ *
+ * JRM - 10/18/07
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC2_resize_pinned_entry(H5F_t * f,
+ void * thing,
+ size_t new_size)
+{
+ H5C2_t *cache_ptr = f->shared->cache2;
+ herr_t result;
+ herr_t ret_value = SUCCEED; /* Return value */
+#if H5AC2__TRACE_FILE_ENABLED
+ char trace[128] = "";
+ FILE * trace_file_ptr = NULL;
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_ENTER_NOAPI(H5AC2_resize_pinned_entry, FAIL)
+
+#if H5AC2__TRACE_FILE_ENABLED
+ /* For the resize pinned entry call, only the addr, and new_size are
+ * really necessary in the trace file. Write the result to catch
+ * occult errors.
+ */
+ if ( ( f != NULL ) &&
+ ( f->shared != NULL ) &&
+ ( f->shared->cache2 != NULL ) &&
+ ( H5C2_get_trace_file_ptr(f->shared->cache2, &trace_file_ptr) >= 0) &&
+ ( trace_file_ptr != NULL ) ) {
+
+ sprintf(trace, "H5AC2_resize_pinned_entry 0x%lx %d",
+ (unsigned long)(((H5C2_cache_entry_t *)thing)->addr),
+ (int)new_size);
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+#ifdef H5_HAVE_PARALLEL
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( thing );
+
+ if ( ( ((H5AC2_info_t *)thing)->is_dirty == FALSE ) &&
+ ( NULL != cache_ptr->aux_ptr) ) {
+
+ H5AC2_info_t * entry_ptr;
+
+ entry_ptr = (H5AC2_info_t *)thing;
+
+ if ( ! ( entry_ptr->is_pinned ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, \
+ "Entry isn't pinned??")
+ }
+
+ if ( entry_ptr->is_protected ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, \
+ "Entry is protected??")
+ }
+
+ result = H5AC2_log_dirtied_entry(cache_ptr,
+ entry_ptr,
+ entry_ptr->addr,
+ TRUE,
+ new_size);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
+ "H5AC2_log_dirtied_entry() failed.")
+ }
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+ result = H5C2_resize_pinned_entry(cache_ptr,
+ thing,
+ new_size);
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, \
+ "H5C2_resize_pinned_entry() failed.")
+
+ }
+
+done:
+
+#if H5AC2__TRACE_FILE_ENABLED
+ if ( trace_file_ptr != NULL ) {
+
+ HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_resize_pinned_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_unpin_entry()
+ *
+ * Purpose: Unpin a cache entry. The entry must be unprotected at
+ * the time of call, and must be pinned.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 4/11/06
+ *
+ * Modifications:
+ *
+ * Added code supporting the trace file. JRM -- 6/7/06
+ *
+ * Modified code in support of revised cache API needed
+ * to permit journaling. JRM - 10/18/07
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC2_unpin_entry(H5F_t * f,
+ void * thing)
+{
+ H5C2_t *cache_ptr = f->shared->cache2;
+ herr_t result;
+ herr_t ret_value = SUCCEED; /* Return value */
+#if H5AC2__TRACE_FILE_ENABLED
+ char trace[128] = "";
+ FILE * trace_file_ptr = NULL;
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_ENTER_NOAPI(H5AC2_unpin_entry, FAIL)
+
+#if H5AC2__TRACE_FILE_ENABLED
+ /* For the unpin entry call, only the addr is really necessary
+ * in the trace file. Also write the result to catch occult errors.
+ */
+ if ( ( f != NULL ) &&
+ ( f->shared != NULL ) &&
+ ( f->shared->cache2 != NULL ) &&
+ ( H5C2_get_trace_file_ptr(f->shared->cache2, &trace_file_ptr) >= 0) &&
+ ( trace_file_ptr != NULL ) ) {
+
+ sprintf(trace, "H5AC2_unpin_entry 0x%lx",
+ (unsigned long)(((H5C2_cache_entry_t *)thing)->addr));
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ result = H5C2_unpin_entry(cache_ptr, thing);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "H5C2_unpin_entry() failed.")
+ }
+
+done:
+
+#if H5AC2__TRACE_FILE_ENABLED
+ if ( trace_file_ptr != NULL ) {
+
+ HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_unpin_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_unprotect
+ *
+ * Purpose: Undo an H5AC2_protect() call -- specifically, mark the
+ * entry as unprotected, remove it from the protected list,
+ * and give it back to the replacement policy.
+ *
+ * The TYPE and ADDR arguments must be the same as those in
+ * the corresponding call to H5AC2_protect() and the THING
+ * argument must be the value returned by that call to
+ * H5AC2_protect().
+ *
+ * If the deleted flag is TRUE, simply remove the target entry
+ * from the cache, clear it, and free it without writing it to
+ * disk.
+ *
+ * This verion of the function is a complete re-write to
+ * use the new metadata cache. While there isn't all that
+ * much difference between the old and new Purpose sections,
+ * the original version is given below.
+ *
+ * Original purpose section:
+ *
+ * This function should be called to undo the effect of
+ * H5AC2_protect(). The TYPE and ADDR arguments should be the
+ * same as the corresponding call to H5AC2_protect() and the
+ * THING argument should be the value returned by H5AC2_protect().
+ * If the DELETED flag is set, then this object has been deleted
+ * from the file and should not be returned to the cache.
+ *
+ * If H5AC2_DEBUG is defined then this function fails
+ * if the TYPE and ADDR arguments are not what was used when the
+ * object was protected or if the object was never protected.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Robb Matzke
+ * matzke@llnl.gov
+ * Sep 2 1997
+ *
+ * Modifications:
+ * Robb Matzke, 1999-07-27
+ * The ADDR argument is passed by value.
+ *
+ * Quincey Koziol, 2003-03-19
+ * Added "deleted" argument
+ *
+ * Bill Wendling, 2003-09-18
+ * If this is an FPHDF5 driver and the data is dirty,
+ * perform a "flush" that writes the data to the SAP.
+ *
+ * John Mainzer 5/19/04
+ * Complete re-write for the new metadata cache.
+ *
+ * JRM - 6/7/04
+ * Abstracted the guts of the function to H5C_unprotect()
+ * in H5C.c, and then re-wrote the function as a wrapper for
+ * H5C_unprotect().
+ *
+ * JRM - 1/6/05
+ * Replaced the deleted parameter with the new flags parameter.
+ * Since the deleted parameter is not used by the FPHDF5 code,
+ * the only change in the body is to replace the deleted
+ * parameter with the flags parameter in the call to
+ * H5C_unprotect().
+ *
+ * JRM - 6/6/05
+ * Added the dirtied flag and supporting code. This is
+ * part of a collection of changes directed at moving
+ * management of cache entry dirty flags into the H5C code.
+ *
+ * JRM - 7/5/05
+ * Added code to track dirty byte generation, and to trigger
+ * clean entry list propagation when it exceeds a user
+ * specified threshold. Note that this code only applies in
+ * the PHDF5 case. It should have no effect on either the
+ * serial or FPHSD5 cases.
+ *
+ * JRM - 9/8/05
+ * Added code to track entry size changes. This is necessary
+ * as it can effect dirty byte creation counts, thereby
+ * throwing the caches out of sync in the PHDF5 case.
+ *
+ * JRM - 5/16/06
+ * Added code to use the new dirtied field in
+ * H5C_cache_entry_t in the test to see if the entry has
+ * been dirtied.
+ *
+ * JRM - 6/7/06
+ * Added support for the trace file.
+ *
+ * JRM - 10/18/07
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ *
+ * Note that the H5AC2__SIZE_CHANGED_FLAG must now be set if
+ * the size of the entry has changed.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC2_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC2_class_t *type,
+ haddr_t addr, size_t new_size, void *thing, unsigned flags)
+{
+ herr_t result;
+ herr_t ret_value=SUCCEED; /* Return value */
+ hbool_t size_changed = FALSE;
+ hbool_t dirtied;
+#ifdef H5_HAVE_PARALLEL
+ H5AC2_aux_t * aux_ptr = NULL;
+#endif /* H5_HAVE_PARALLEL */
+#if H5AC2__TRACE_FILE_ENABLED
+ char trace[128] = "";
+ unsigned trace_flags = 0;
+ FILE * trace_file_ptr = NULL;
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_ENTER_NOAPI(H5AC2_unprotect, FAIL)
+
+ HDassert(f);
+ HDassert(f->shared->cache2);
+ HDassert(type);
+ HDassert(type->deserialize);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(thing);
+ HDassert( ((H5AC2_info_t *)thing)->addr == addr );
+ HDassert( ((H5AC2_info_t *)thing)->type == type );
+
+#if H5AC2__TRACE_FILE_ENABLED
+ /* For the unprotect call, only the addr, type id, flags, and possible
+ * new size are really necessary in the trace file. Write the return
+ * value to catch occult errors.
+ */
+ if ( ( f != NULL ) &&
+ ( f->shared != NULL ) &&
+ ( f->shared->cache2 != NULL ) &&
+ ( H5C2_get_trace_file_ptr(f->shared->cache2, &trace_file_ptr) >= 0) &&
+ ( trace_file_ptr != NULL ) ) {
+
+ sprintf(trace, "H5AC2_unprotect 0x%lx %d",
+ (unsigned long)addr,
+ (int)(type->id));
+
+ trace_flags = flags;
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ dirtied = ( ( (flags & H5AC2__DIRTIED_FLAG) == H5AC2__DIRTIED_FLAG ) ||
+ ( ((H5AC2_info_t *)thing)->dirtied ) );
+
+ size_changed = ( (flags & H5AC2__SIZE_CHANGED_FLAG) ==
+ H5AC2__SIZE_CHANGED_FLAG );
+
+#ifdef H5_HAVE_PARALLEL
+ if ( ( dirtied ) && ( ((H5AC2_info_t *)thing)->is_dirty == FALSE ) &&
+ ( NULL != (aux_ptr = f->shared->cache2->aux_ptr) ) ) {
+
+ result = H5AC2_log_dirtied_entry(f->shared->cache2,
+ (H5AC2_info_t *)thing,
+ addr,
+ size_changed,
+ new_size);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "H5AC2_log_dirtied_entry() failed.")
+ }
+ }
+
+ if ( ( (flags & H5C2__DELETED_FLAG) != 0 ) &&
+ ( NULL != (aux_ptr = f->shared->cache2->aux_ptr) ) &&
+ ( aux_ptr->mpi_rank == 0 ) ) {
+
+ result = H5AC2_log_deleted_entry(f->shared->cache2,
+ (H5AC2_info_t *)thing,
+ addr,
+ flags);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "H5AC2_log_deleted_entry() failed.")
+ }
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+ result = H5C2_unprotect(f->shared->cache2,
+ dxpl_id,
+ type,
+ addr,
+ thing,
+ flags,
+ new_size);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "H5C2_unprotect() failed.")
+ }
+
+#ifdef H5_HAVE_PARALLEL
+ if ( ( aux_ptr != NULL ) &&
+ ( aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold ) ) {
+
+ result = H5AC2_propagate_flushed_and_still_clean_entries_list(f,
+ H5AC2_noblock_dxpl_id,
+ f->shared->cache2,
+ TRUE);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "Can't propagate clean entries list.")
+ }
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+done:
+
+#if H5AC2__TRACE_FILE_ENABLED
+ if ( trace_file_ptr != NULL ) {
+
+ HDfprintf(trace_file_ptr, "%s %d %x %d\n",
+ trace,
+ (int)new_size,
+ (unsigned)flags,
+ (int)ret_value);
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_unprotect() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: HA5C_set_write_done_callback
+ *
+ * Purpose: Set the value of the write_done callback. This callback
+ * is used to improve performance of the parallel test bed
+ * for the cache.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 5/11/06
+ *
+ * Modifications:
+ *
+ * JRM - 10/18/07
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef H5_HAVE_PARALLEL
+herr_t
+H5AC2_set_write_done_callback(H5C2_t * cache_ptr,
+ void (* write_done)(void))
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5AC2_aux_t * aux_ptr = NULL;
+
+ FUNC_ENTER_NOAPI(H5AC2_set_write_done_callback, FAIL)
+
+ /* This would normally be an assert, but we need to use an HGOTO_ERROR
+ * call to shut up the compiler.
+ */
+ if ( ( ! cache_ptr ) || ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr")
+ }
+
+ aux_ptr = cache_ptr->aux_ptr;
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC2__H5AC2_AUX_T_MAGIC );
+
+ aux_ptr->write_done = write_done;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_set_write_done_callback() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_stats
+ *
+ * Purpose: Prints statistics about the cache.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Robb Matzke
+ * Thursday, October 30, 1997
+ *
+ * Modifications:
+ * John Mainzer 5/19/04
+ * Re-write to support the new metadata cache.
+ *
+ * JRM - 6/7/04
+ * Abstracted the guts of the function to H5C_stats()
+ * in H5C.c, and then re-wrote the function as a wrapper for
+ * H5C_stats().
+ *
+ * JRM - 10/18/07
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC2_stats(const H5F_t *f)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5AC2_stats, FAIL)
+
+ HDassert(f);
+ HDassert(f->shared->cache2);
+
+ /* at present, this can't fail */
+ (void)H5C2_stats(f->shared->cache2, f->name, FALSE);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_stats() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_get_cache_auto_resize_config
+ *
+ * Purpose: Wrapper function for H5C2_get_cache_auto_resize_config().
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 3/10/05
+ *
+ * Modifications:
+ *
+ * JRM - 4/6/05
+ * Reworked for the addition of struct H5AC2_cache_config_t.
+ *
+ * JRM - 10/25/05
+ * Added support for the new dirty_bytes_threshold field of
+ * both H5AC2_cache_config_t and H5AC2_aux_t.
+ *
+ * JRM - 6/8/06
+ * Added support for the new trace file related fields.
+ *
+ * JRM - 7/28/07
+ * Added support for the new evictions enabled related fields.
+ *
+ * Observe that H5AC2_get_cache_auto_resize_config() and
+ * H5AC2_set_cache_auto_resize_config() are becoming generic
+ * metadata cache configuration routines as they gain
+ * switches for functions that are only tenuously related
+ * to auto resize configuration.
+ *
+ * JRM - 10/18/07
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5AC2_get_cache_auto_resize_config(H5AC2_t * cache_ptr,
+ H5AC2_cache_config_t *config_ptr)
+{
+ herr_t result;
+ herr_t ret_value = SUCCEED; /* Return value */
+ hbool_t evictions_enabled;
+ H5C2_auto_size_ctl_t internal_config;
+
+ FUNC_ENTER_NOAPI(H5AC2_get_cache_auto_resize_config, FAIL)
+
+ if ( ( cache_ptr == NULL )
+ ||
+#ifdef H5_HAVE_PARALLEL
+ ( ( cache_ptr->aux_ptr != NULL )
+ &&
+ ( ((H5AC2_aux_t *)(cache_ptr->aux_ptr))->magic
+ !=
+ H5AC2__H5AC2_AUX_T_MAGIC
+ )
+ )
+ ||
+#endif /* H5_HAVE_PARALLEL */
+ ( config_ptr == NULL )
+ ||
+ ( config_ptr->version != H5AC2__CURR_CACHE_CONFIG_VERSION )
+ )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Bad cache_ptr or config_ptr on entry.")
+
+ }
+
+ result = H5C2_get_cache_auto_resize_config((H5C2_t *)cache_ptr,
+ &internal_config);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C2_get_cache_auto_resize_config() failed.")
+ }
+
+ result = H5C2_get_evictions_enabled((H5C2_t *)cache_ptr,
+ &evictions_enabled);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C2_get_resize_enabled() failed.")
+ }
+
+ if ( internal_config.rpt_fcn == NULL ) {
+
+ config_ptr->rpt_fcn_enabled = FALSE;
+
+ } else {
+
+ config_ptr->rpt_fcn_enabled = TRUE;
+ }
+
+ config_ptr->open_trace_file = FALSE;
+ config_ptr->close_trace_file = FALSE;
+ config_ptr->trace_file_name[0] = '\0';
+ config_ptr->evictions_enabled = evictions_enabled;
+ config_ptr->set_initial_size = internal_config.set_initial_size;
+ config_ptr->initial_size = internal_config.initial_size;
+ config_ptr->min_clean_fraction = internal_config.min_clean_fraction;
+ config_ptr->max_size = internal_config.max_size;
+ config_ptr->min_size = internal_config.min_size;
+ config_ptr->epoch_length = (long)(internal_config.epoch_length);
+ config_ptr->incr_mode = internal_config.incr_mode;
+ config_ptr->lower_hr_threshold = internal_config.lower_hr_threshold;
+ config_ptr->increment = internal_config.increment;
+ config_ptr->apply_max_increment = internal_config.apply_max_increment;
+ config_ptr->max_increment = internal_config.max_increment;
+ config_ptr->decr_mode = internal_config.decr_mode;
+ config_ptr->upper_hr_threshold = internal_config.upper_hr_threshold;
+ config_ptr->decrement = internal_config.decrement;
+ config_ptr->apply_max_decrement = internal_config.apply_max_decrement;
+ config_ptr->max_decrement = internal_config.max_decrement;
+ config_ptr->epochs_before_eviction =
+ (int)(internal_config.epochs_before_eviction);
+ config_ptr->apply_empty_reserve = internal_config.apply_empty_reserve;
+ config_ptr->empty_reserve = internal_config.empty_reserve;
+
+#ifdef H5_HAVE_PARALLEL
+ if ( cache_ptr->aux_ptr != NULL ) {
+
+ config_ptr->dirty_bytes_threshold =
+ ((H5AC2_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold;
+
+ } else {
+#endif /* H5_HAVE_PARALLEL */
+
+ config_ptr->dirty_bytes_threshold = H5AC2__DEFAULT_DIRTY_BYTES_THRESHOLD;
+
+#ifdef H5_HAVE_PARALLEL
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_get_cache_auto_resize_config() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_get_cache_size
+ *
+ * Purpose: Wrapper function for H5C2_get_cache_size().
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 3/11/05
+ *
+ * Modifications:
+ *
+ * JRM - 10/18/07
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5AC2_get_cache_size(H5AC2_t * cache_ptr,
+ size_t * max_size_ptr,
+ size_t * min_clean_size_ptr,
+ size_t * cur_size_ptr,
+ int32_t * cur_num_entries_ptr)
+{
+ herr_t result;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5AC2_get_cache_size, FAIL)
+
+ result = H5C2_get_cache_size((H5C2_t *)cache_ptr,
+ max_size_ptr,
+ min_clean_size_ptr,
+ cur_size_ptr,
+ cur_num_entries_ptr);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C2_get_cache_size() failed.")
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_get_cache_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_get_cache_hit_rate
+ *
+ * Purpose: Wrapper function for H5C2_get_cache_hit_rate().
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 3/10/05
+ *
+ * Modifications:
+ *
+ * JRM - 10/18/07
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5AC2_get_cache_hit_rate(H5AC2_t * cache_ptr,
+ double * hit_rate_ptr)
+
+{
+ herr_t result;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5AC2_get_cache_hit_rate, FAIL)
+
+ result = H5C2_get_cache_hit_rate((H5C2_t *)cache_ptr, hit_rate_ptr);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C2_get_cache_hit_rate() failed.")
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_get_cache_hit_rate() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC2_reset_cache_hit_rate_stats()
+ *
+ * Purpose: Wrapper function for H5C2_reset_cache_hit_rate_stats().
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer, 3/10/05
+ *
+ * Modifications:
+ *
+ * JRM - 10/18/07
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5AC2_reset_cache_hit_rate_stats(H5AC2_t * cache_ptr)
+{
+ herr_t result;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5AC2_reset_cache_hit_rate_stats, FAIL)
+
+ result = H5C2_reset_cache_hit_rate_stats((H5C2_t *)cache_ptr);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C2_reset_cache_hit_rate_stats() failed.")
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_reset_cache_hit_rate_stats() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_set_cache_auto_resize_config
+ *
+ * Purpose: Wrapper function for H5C2_set_cache_auto_resize_config().
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 3/10/05
+ *
+ * Modifications:
+ *
+ * John Mainzer -- 4/6/05
+ * Updated for the addition of H5AC2_cache_config_t.
+ *
+ * John Mainzer -- 10/25/05
+ * Added support for the new dirty_bytes_threshold field of
+ * both H5AC2_cache_config_t and H5AC2_aux_t.
+ *
+ * John Mainzer -- 6/7/06
+ * Added trace file support.
+ *
+ * John Mainzer -- 7/28/07
+ * Added support for the new evictions enabled related fields.
+ *
+ * Observe that H5AC2_get_cache_auto_resize_config() and
+ * H5AC2_set_cache_auto_resize_config() are becoming generic
+ * metadata cache configuration routines as they gain
+ * switches for functions that are only tenuously related
+ * to auto resize configuration.
+ *
+ * JRM - 10/18/07
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5AC2_set_cache_auto_resize_config(H5AC2_t * cache_ptr,
+ H5AC2_cache_config_t *config_ptr)
+{
+ /* const char * fcn_name = "H5AC2_set_cache_auto_resize_config"; */
+ herr_t result;
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5C2_auto_size_ctl_t internal_config;
+#if H5AC2__TRACE_FILE_ENABLED
+ H5AC2_cache_config_t trace_config = H5AC2__DEFAULT_CACHE_CONFIG;
+ FILE * trace_file_ptr = NULL;
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_ENTER_NOAPI(H5AC2_set_cache_auto_resize_config, FAIL)
+
+#if H5AC2__TRACE_FILE_ENABLED
+ /* Make note of the new configuration. Don't look up the trace file
+ * pointer, as that may change before we use it.
+ */
+ if ( config_ptr != NULL ) {
+
+ trace_config = *config_ptr;
+
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ if ( ( cache_ptr == NULL )
+#ifdef H5_HAVE_PARALLEL
+ ||
+ ( ( cache_ptr->aux_ptr != NULL )
+ &&
+ (
+ ((H5AC2_aux_t *)(cache_ptr->aux_ptr))->magic
+ !=
+ H5AC2__H5AC2_AUX_T_MAGIC
+ )
+ )
+#endif /* H5_HAVE_PARALLEL */
+ ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "bad cache_ptr on entry.")
+ }
+
+ result = H5AC2_validate_config(config_ptr);
+
+ if ( result != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Bad cache configuration");
+ }
+
+ if ( config_ptr->open_trace_file ) {
+
+ FILE * file_ptr = NULL;
+
+ if ( H5C2_get_trace_file_ptr(cache_ptr, &file_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C2_get_trace_file_ptr() failed.")
+ }
+
+ if ( ( ! ( config_ptr->close_trace_file ) ) &&
+ ( file_ptr != NULL ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "Trace file already open.")
+ }
+ }
+
+ if (
+ (
+ config_ptr->dirty_bytes_threshold
+ <
+ H5AC2__MIN_DIRTY_BYTES_THRESHOLD
+ )
+ ||
+ (
+ config_ptr->dirty_bytes_threshold
+ >
+ H5AC2__MAX_DIRTY_BYTES_THRESHOLD
+ )
+ ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+ "config_ptr->dirty_bytes_threshold out of range.")
+ }
+
+ if ( config_ptr->close_trace_file ) {
+
+ if ( H5AC2_close_trace_file(cache_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5AC2_close_trace_file() failed.")
+ }
+ }
+
+ if ( config_ptr->open_trace_file ) {
+
+ if ( H5AC2_open_trace_file(cache_ptr, config_ptr->trace_file_name) < 0 )
+ {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "H5AC2_open_trace_file() failed.")
+ }
+ }
+
+ if ( H5AC2_ext_config_2_int_config(config_ptr, &internal_config) !=
+ SUCCEED ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5AC2_ext_config_2_int_config() failed.")
+ }
+
+ result = H5C2_set_cache_auto_resize_config((H5C2_t *)cache_ptr,
+ &internal_config);
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C2_set_cache_auto_resize_config() failed.")
+ }
+
+
+ result = H5C2_set_evictions_enabled((H5C2_t *)cache_ptr,
+ config_ptr->evictions_enabled);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C2_set_evictions_enabled() failed.")
+ }
+
+#ifdef H5_HAVE_PARALLEL
+ if ( cache_ptr->aux_ptr != NULL ) {
+
+ ((H5AC2_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold =
+ config_ptr->dirty_bytes_threshold;
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+done:
+
+#if H5AC2__TRACE_FILE_ENABLED
+ /* For the set cache auto resize config call, only the contents
+ * of the config is necessary in the trace file. Write the return
+ * value to catch occult errors.
+ */
+ if ( ( cache_ptr != NULL ) &&
+ ( H5C2_get_trace_file_ptr(cache_ptr, &trace_file_ptr) >= 0 ) &&
+ ( trace_file_ptr != NULL ) ) {
+
+ HDfprintf(trace_file_ptr,
+ "%s %d %d %d %d \"%s\" %d %d %d %f %d %d %ld %d %f %f %d %d %d %f %f %d %d %d %d %f %d %d\n",
+ "H5AC2_set_cache_auto_resize_config",
+ trace_config.version,
+ (int)(trace_config.rpt_fcn_enabled),
+ (int)(trace_config.open_trace_file),
+ (int)(trace_config.close_trace_file),
+ trace_config.trace_file_name,
+ (int)(trace_config.evictions_enabled),
+ (int)(trace_config.set_initial_size),
+ (int)(trace_config.initial_size),
+ trace_config.min_clean_fraction,
+ (int)(trace_config.max_size),
+ (int)(trace_config.min_size),
+ trace_config.epoch_length,
+ (int)(trace_config.incr_mode),
+ trace_config.lower_hr_threshold,
+ trace_config.increment,
+ (int)(trace_config.apply_max_increment),
+ (int)(trace_config.max_increment),
+ (int)(trace_config.decr_mode),
+ trace_config.upper_hr_threshold,
+ trace_config.decrement,
+ (int)(trace_config.apply_max_decrement),
+ (int)(trace_config.max_decrement),
+ trace_config.epochs_before_eviction,
+ (int)(trace_config.apply_empty_reserve),
+ trace_config.empty_reserve,
+ trace_config.dirty_bytes_threshold,
+ (int)ret_value);
+ }
+#endif /* H5AC2__TRACE_FILE_ENABLED */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_set_cache_auto_resize_config() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_validate_config()
+ *
+ * Purpose: Run a sanity check on the contents of the supplied
+ * instance of H5AC2_cache_config_t.
+ *
+ * Do nothing and return SUCCEED if no errors are detected,
+ * and flag an error and return FAIL otherwise.
+ *
+ * At present, this function operates by packing the data
+ * from the instance of H5AC2_cache_config_t into an instance
+ * of H5C2_auto_size_ctl_t, and then calling
+ * H5C2_validate_resize_config(). As H5AC2_cache_config_t and
+ * H5C2_auto_size_ctl_t diverge, we may have to change this.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 4/6/05
+ *
+ * Modifications:
+ *
+ * - Added code testing the trace file configuration fields.
+ * These tests are not comprehensive, as many errors cannot
+ * be caught until the directives contained in these fields
+ * are applied.
+ * JRM - 5/15/06
+ *
+ * - Added code testing the evictions enabled field. At
+ * present this consists of verifying that if
+ * evictions_enabled is FALSE, then automatic cache
+ * resizing in disabled.
+ *
+ * JRM - 7/28/07
+ *
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ * JRM - 10/18/07
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5AC2_validate_config(H5AC2_cache_config_t * config_ptr)
+
+{
+ herr_t result;
+ herr_t ret_value = SUCCEED; /* Return value */
+ int name_len;
+ H5C2_auto_size_ctl_t internal_config;
+
+ FUNC_ENTER_NOAPI(H5AC2_validate_config, FAIL)
+
+ if ( config_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL config_ptr on entry.")
+ }
+
+ if ( config_ptr->version != H5AC2__CURR_CACHE_CONFIG_VERSION ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Unknown config version.")
+ }
+
+ if ( ( config_ptr->rpt_fcn_enabled != TRUE ) &&
+ ( config_ptr->rpt_fcn_enabled != FALSE ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "config_ptr->rpt_fcn_enabled must be either TRUE or FALSE.")
+ }
+
+ if ( ( config_ptr->open_trace_file != TRUE ) &&
+ ( config_ptr->open_trace_file != FALSE ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "config_ptr->open_trace_file must be either TRUE or FALSE.")
+ }
+
+ if ( ( config_ptr->close_trace_file != TRUE ) &&
+ ( config_ptr->close_trace_file != FALSE ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "config_ptr->close_trace_file must be either TRUE or FALSE.")
+ }
+
+ /* don't bother to test trace_file_name unless open_trace_file is TRUE */
+ if ( config_ptr->open_trace_file ) {
+
+ /* Can't really test the trace_file_name field without trying to
+ * open the file, so we will content ourselves with a couple of
+ * sanity checks on the length of the file name.
+ */
+ name_len = HDstrlen(config_ptr->trace_file_name);
+
+ if ( name_len <= 0 ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "config_ptr->trace_file_name is empty.")
+
+ } else if ( name_len > H5AC2__MAX_TRACE_FILE_NAME_LEN ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "config_ptr->trace_file_name too long.")
+ }
+ }
+
+ if ( ( config_ptr->evictions_enabled != TRUE ) &&
+ ( config_ptr->evictions_enabled != FALSE ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "config_ptr->evictions_enabled must be either TRUE or FALSE.")
+ }
+
+ if ( ( config_ptr->evictions_enabled == FALSE ) &&
+ ( ( config_ptr->incr_mode != H5C2_incr__off ) ||
+ ( config_ptr->incr_mode != H5C2_decr__off ) ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "Can't disable evictions while auto-resize is enabled.")
+ }
+
+ if ( config_ptr->dirty_bytes_threshold < H5AC2__MIN_DIRTY_BYTES_THRESHOLD ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+ "dirty_bytes_threshold too small.")
+ } else
+ if ( config_ptr->dirty_bytes_threshold > H5AC2__MAX_DIRTY_BYTES_THRESHOLD ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+ "dirty_bytes_threshold too big.")
+ }
+
+ if ( H5AC2_ext_config_2_int_config(config_ptr, &internal_config) !=
+ SUCCEED ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5AC2_ext_config_2_int_config() failed.")
+ }
+
+ result = H5C2_validate_resize_config(&internal_config,
+ H5C2_RESIZE_CFG__VALIDATE_ALL);
+
+ if ( result != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "error(s) in new config.")
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_validate_config() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_close_trace_file()
+ *
+ * Purpose: If a trace file is open, stop logging calls to the cache,
+ * and close the file.
+ *
+ * Note that the function does nothing if there is no trace
+ * file.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/2/06
+ *
+ * Modifications:
+ *
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ * JRM - 10/18/07
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5AC2_close_trace_file(H5AC2_t * cache_ptr)
+
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ FILE * trace_file_ptr = NULL;
+
+ FUNC_ENTER_NOAPI(H5AC2_close_trace_file, FAIL)
+
+ if ( cache_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL cache_ptr on entry.")
+ }
+
+ if ( H5C2_get_trace_file_ptr(cache_ptr, &trace_file_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C2_get_trace_file_ptr() failed.")
+ }
+
+ if ( trace_file_ptr != NULL ) {
+
+ if ( H5C2_set_trace_file_ptr(cache_ptr, NULL) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C2_set_trace_file_ptr() failed.")
+ }
+
+ if ( HDfclose(trace_file_ptr) != 0 ) {
+
+ HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL, \
+ "can't close metadata cache trace file")
+ }
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_close_trace_file() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_open_trace_file()
+ *
+ * Purpose: Open a trace file, and start logging calls to the cache.
+ *
+ * This logging is done at the H5C2 level, and will only take
+ * place if H5C2_TRACE_FILE_ENABLED (defined in H5C2private.h)
+ * is TRUE.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/1/06
+ *
+ * Modifications:
+ *
+ * Modified code in support of revised cache API needed
+ * to permit journaling.
+ * JRM - 10/18/07
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5AC2_open_trace_file(H5AC2_t * cache_ptr,
+ const char * trace_file_name)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ char file_name[H5AC2__MAX_TRACE_FILE_NAME_LEN + H5C2__PREFIX_LEN + 2];
+ FILE * file_ptr = NULL;
+#ifdef H5_HAVE_PARALLEL
+ H5AC2_aux_t * aux_ptr = NULL;
+#endif /* H5_HAVE_PARALLEL */
+
+ FUNC_ENTER_NOAPI(H5AC2_open_trace_file, FAIL)
+
+ HDassert(cache_ptr);
+
+ if ( cache_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "cache_ptr NULL on entry.")
+ }
+
+ if ( trace_file_name == NULL ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "NULL trace_file_name on entry.")
+ }
+
+ if ( HDstrlen(trace_file_name) > H5AC2__MAX_TRACE_FILE_NAME_LEN ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "trace file name too long.")
+ }
+
+ if ( H5C2_get_trace_file_ptr(cache_ptr, &file_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C2_get_trace_file_ptr() failed.")
+ }
+
+ if ( file_ptr != NULL ) {
+
+ HGOTO_ERROR(H5E_FILE, H5E_FILEOPEN, FAIL, "trace file already open.")
+ }
+
+#ifdef H5_HAVE_PARALLEL
+
+ aux_ptr = (H5AC2_aux_t *)(cache_ptr->aux_ptr);
+
+ if ( cache_ptr->aux_ptr == NULL ) {
+
+ sprintf(file_name, "%s", trace_file_name);
+
+ } else {
+
+ if ( aux_ptr->magic != H5AC2__H5AC2_AUX_T_MAGIC ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad aux_ptr->magic.")
+ }
+
+ sprintf(file_name, "%s.%d", trace_file_name, aux_ptr->mpi_rank);
+
+ }
+
+ if ( HDstrlen(file_name) >
+ H5AC2__MAX_TRACE_FILE_NAME_LEN + H5C2__PREFIX_LEN + 1 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "cooked trace file name too long.")
+ }
+
+#else /* H5_HAVE_PARALLEL */
+
+ sprintf(file_name, "%s", trace_file_name);
+
+#endif /* H5_HAVE_PARALLEL */
+
+ if ( (file_ptr = HDfopen(file_name, "w")) == NULL ) {
+
+ /* trace file open failed */
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "trace file open failed.")
+ }
+
+ HDfprintf(file_ptr, "### HDF5 metadata cache trace file version 1 ###\n");
+
+ if ( H5C2_set_trace_file_ptr(cache_ptr, file_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C2_set_trace_file_ptr() failed.")
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_open_trace_file() */
+
+
+/*************************************************************************/
+/**************************** Private Functions: *************************/
+/*************************************************************************/
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC2_broadcast_clean_list()
+ *
+ * Purpose: Broadcast the contents of the process 0 cleaned entry
+ * slist. In passing, also remove all entries from said
+ * list, and also remove any matching entries from the dirtied
+ * slist.
+ *
+ * This function must only be called by the process with
+ * MPI_rank 0.
+ *
+ * Return SUCCEED on success, and FAIL on failure.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 7/1/05
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC2_broadcast_clean_list(H5AC2_t * cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ haddr_t addr;
+ H5AC2_aux_t * aux_ptr = NULL;
+ H5SL_node_t * slist_node_ptr = NULL;
+ H5AC2_slist_entry_t * slist_entry_ptr = NULL;
+ MPI_Offset * buf_ptr = NULL;
+ size_t buf_size;
+ int i = 0;
+ int mpi_result;
+ int num_entries;
+
+ FUNC_ENTER_NOAPI(H5AC2_broadcast_clean_list, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+ aux_ptr = (H5AC2_aux_t *)(cache_ptr->aux_ptr);
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC2__H5AC2_AUX_T_MAGIC );
+ HDassert( aux_ptr->mpi_rank == 0 );
+ HDassert( aux_ptr->c_slist_ptr != NULL );
+ HDassert( H5SL_count(aux_ptr->c_slist_ptr) ==
+ (size_t)(aux_ptr->c_slist_len) );
+
+
+ /* First broadcast the number of entries in the list so that the
+ * receives can set up a buffer to receive them. If there aren't
+ * any, we are done.
+ */
+ num_entries = aux_ptr->c_slist_len;
+
+ mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm);
+
+ if ( mpi_result != MPI_SUCCESS ) {
+
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 1", mpi_result)
+
+ }
+
+ if ( num_entries > 0 )
+ {
+ /* allocate a buffer to store the list of entry base addresses in */
+
+ buf_size = sizeof(MPI_Offset) * (size_t)num_entries;
+
+ buf_ptr = (MPI_Offset *)H5MM_malloc(buf_size);
+
+ if ( buf_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
+ "memory allocation failed for clean entry buffer")
+ }
+
+ /* now load the entry base addresses into the buffer, emptying the
+ * cleaned entry list in passing
+ */
+
+ while ( NULL != (slist_node_ptr = H5SL_first(aux_ptr->c_slist_ptr) ) )
+ {
+ slist_entry_ptr = H5SL_item(slist_node_ptr);
+
+ HDassert(slist_entry_ptr->magic == H5AC2__H5AC2_SLIST_ENTRY_T_MAGIC);
+
+ HDassert( i < num_entries );
+
+ addr = slist_entry_ptr->addr;
+
+ if ( H5FD_mpi_haddr_to_MPIOff(addr, &(buf_ptr[i])) < 0 ) {
+
+ HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, \
+ "can't convert from haddr to MPI off")
+ }
+
+ i++;
+
+ /* now remove the entry from the cleaned entry list */
+ if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
+ != slist_entry_ptr ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
+ "Can't delete entry from cleaned entry slist.")
+ }
+
+ slist_entry_ptr->magic = 0;
+ H5FL_FREE(H5AC2_slist_entry_t, slist_entry_ptr);
+ slist_entry_ptr = NULL;
+
+ aux_ptr->c_slist_len -= 1;
+
+ HDassert( aux_ptr->c_slist_len >= 0 );
+
+ /* and also remove the matching entry from the dirtied list
+ * if it exists.
+ */
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
+ (void *)(&addr))) != NULL ) {
+
+ HDassert( slist_entry_ptr->magic ==
+ H5AC2__H5AC2_SLIST_ENTRY_T_MAGIC );
+ HDassert( slist_entry_ptr->addr == addr );
+
+ if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
+ != slist_entry_ptr ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
+ "Can't delete entry from dirty entry slist.")
+ }
+
+ slist_entry_ptr->magic = 0;
+ H5FL_FREE(H5AC2_slist_entry_t, slist_entry_ptr);
+ slist_entry_ptr = NULL;
+
+ aux_ptr->d_slist_len -= 1;
+
+ HDassert( aux_ptr->d_slist_len >= 0 );
+ }
+
+ } /* while */
+
+
+ /* Now broadcast the list of cleaned entries -- if there is one.
+ *
+ * The peculiar structure of the following call to MPI_Bcast is
+ * due to MPI's (?) failure to believe in the MPI_Offset type.
+ * Thus the element type is MPI_BYTE, with size equal to the
+ * buf_size computed above.
+ */
+
+ mpi_result = MPI_Bcast((void *)buf_ptr, (int)buf_size, MPI_BYTE, 0,
+ aux_ptr->mpi_comm);
+
+ if ( mpi_result != MPI_SUCCESS ) {
+
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 2", mpi_result)
+ }
+ }
+
+done:
+
+ if ( buf_ptr != NULL ) {
+
+ buf_ptr = (MPI_Offset *)H5MM_xfree((void *)buf_ptr);
+ }
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_broadcast_clean_list() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC2_check_if_write_permitted
+ *
+ * Purpose: Determine if a write is permitted under the current
+ * circumstances, and set *write_permitted_ptr accordingly.
+ * As a general rule it is, but when we are running in parallel
+ * mode with collective I/O, we must ensure that a read cannot
+ * cause a write.
+ *
+ * In the event of failure, the value of *write_permitted_ptr
+ * is undefined.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/15/04
+ *
+ * Modifications:
+ *
+ * John Mainzer, 9/23/05
+ * Rewrote function to return the value of the
+ * write_permitted field in aux structure if the structure
+ * exists and mpi_rank is 0.
+ *
+ * If the aux structure exists, but mpi_rank isn't 0, the
+ * function now returns FALSE.
+ *
+ * In all other cases, the function returns TRUE.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC2_check_if_write_permitted(const H5F_t *f,
+ hid_t UNUSED dxpl_id,
+ hbool_t * write_permitted_ptr)
+#else /* H5_HAVE_PARALLEL */
+static herr_t
+H5AC2_check_if_write_permitted(const H5F_t UNUSED * f,
+ hid_t UNUSED dxpl_id,
+ hbool_t * write_permitted_ptr)
+#endif /* H5_HAVE_PARALLEL */
+{
+ hbool_t write_permitted = TRUE;
+ herr_t ret_value = SUCCEED; /* Return value */
+#ifdef H5_HAVE_PARALLEL
+ H5AC2_aux_t * aux_ptr = NULL;
+#endif /* H5_HAVE_PARALLEL */
+
+
+ FUNC_ENTER_NOAPI(H5AC2_check_if_write_permitted, FAIL)
+
+#ifdef H5_HAVE_PARALLEL
+ HDassert( f != NULL );
+ HDassert( f->shared != NULL );
+ HDassert( f->shared->cache2 != NULL );
+
+ aux_ptr = (H5AC2_aux_t *)(f->shared->cache2->aux_ptr);
+
+ if ( aux_ptr != NULL ) {
+
+ HDassert( aux_ptr->magic == H5AC2__H5AC2_AUX_T_MAGIC );
+
+ if ( aux_ptr->mpi_rank == 0 ) {
+
+ write_permitted = aux_ptr->write_permitted;
+
+ } else {
+
+ write_permitted = FALSE;
+ }
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+ *write_permitted_ptr = write_permitted;
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_check_if_write_permitted() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_ext_config_2_int_config()
+ *
+ * Purpose: Utility function to translate an instance of
+ * H5AC2_cache_config_t to an instance of H5C2_auto_size_ctl_t.
+ *
+ * Places translation in *int_conf_ptr and returns SUCCEED
+ * if successful. Returns FAIL on failure.
+ *
+ * Does only minimal sanity checking.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 1/26/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5AC2_ext_config_2_int_config(H5AC2_cache_config_t * ext_conf_ptr,
+ H5C2_auto_size_ctl_t * int_conf_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5AC2_ext_config_2_int_config, FAIL)
+
+ if ( ( ext_conf_ptr == NULL ) ||
+ ( ext_conf_ptr->version != H5AC2__CURR_CACHE_CONFIG_VERSION ) ||
+ ( int_conf_ptr == NULL ) ) {
+
+ }
+
+ int_conf_ptr->version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+
+ if ( ext_conf_ptr->rpt_fcn_enabled ) {
+
+ int_conf_ptr->rpt_fcn = H5C2_def_auto_resize_rpt_fcn;
+
+ } else {
+
+ int_conf_ptr->rpt_fcn = NULL;
+ }
+
+ int_conf_ptr->set_initial_size = ext_conf_ptr->set_initial_size;
+ int_conf_ptr->initial_size = ext_conf_ptr->initial_size;
+ int_conf_ptr->min_clean_fraction = ext_conf_ptr->min_clean_fraction;
+ int_conf_ptr->max_size = ext_conf_ptr->max_size;
+ int_conf_ptr->min_size = ext_conf_ptr->min_size;
+ int_conf_ptr->epoch_length =
+ (int64_t)(ext_conf_ptr->epoch_length);
+
+ int_conf_ptr->incr_mode = ext_conf_ptr->incr_mode;
+ int_conf_ptr->lower_hr_threshold = ext_conf_ptr->lower_hr_threshold;
+ int_conf_ptr->increment = ext_conf_ptr->increment;
+ int_conf_ptr->apply_max_increment = ext_conf_ptr->apply_max_increment;
+ int_conf_ptr->max_increment = ext_conf_ptr->max_increment;
+
+ int_conf_ptr->decr_mode = ext_conf_ptr->decr_mode;
+ int_conf_ptr->upper_hr_threshold = ext_conf_ptr->upper_hr_threshold;
+ int_conf_ptr->decrement = ext_conf_ptr->decrement;
+ int_conf_ptr->apply_max_decrement = ext_conf_ptr->apply_max_decrement;
+ int_conf_ptr->max_decrement = ext_conf_ptr->max_decrement;
+ int_conf_ptr->epochs_before_eviction =
+ (int32_t)(ext_conf_ptr->epochs_before_eviction);
+ int_conf_ptr->apply_empty_reserve = ext_conf_ptr->apply_empty_reserve;
+ int_conf_ptr->empty_reserve = ext_conf_ptr->empty_reserve;
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_ext_config_2_int_config() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC2_log_deleted_entry()
+ *
+ * Purpose: Log an entry for which H5C2__DELETED_FLAG has been set.
+ *
+ * If mpi_rank is 0, we must make sure that the entry doesn't
+ * appear in the cleaned or dirty entry lists. Otherwise,
+ * we have nothing to do.
+ *
+ * Return SUCCEED on success, and FAIL on failure.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 6/29/05
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC2_log_deleted_entry(H5AC2_t * cache_ptr,
+ H5AC2_info_t * entry_ptr,
+ haddr_t addr,
+ unsigned int flags)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5AC2_aux_t * aux_ptr = NULL;
+ H5AC2_slist_entry_t * slist_entry_ptr = NULL;
+
+ FUNC_ENTER_NOAPI(H5AC2_log_deleted_entry, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+ aux_ptr = cache_ptr->aux_ptr;
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC2__H5AC2_AUX_T_MAGIC );
+
+ HDassert( entry_ptr != NULL );
+ HDassert( entry_ptr->addr == addr );
+
+ HDassert( (flags & H5C2__DELETED_FLAG) != 0 );
+
+ if ( aux_ptr->mpi_rank == 0 ) {
+
+ HDassert( aux_ptr->d_slist_ptr != NULL );
+ HDassert( aux_ptr->c_slist_ptr != NULL );
+
+ /* if the entry appears in the dirtied entry slist, remove it. */
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
+ (void *)(&addr))) != NULL ) {
+
+ HDassert( slist_entry_ptr->magic ==
+ H5AC2__H5AC2_SLIST_ENTRY_T_MAGIC );
+ HDassert( slist_entry_ptr->addr == addr );
+
+ if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
+ != slist_entry_ptr ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
+ "Can't delete entry from dirty entry slist.")
+ }
+
+ slist_entry_ptr->magic = 0;
+ H5FL_FREE(H5AC2_slist_entry_t, slist_entry_ptr);
+ slist_entry_ptr = NULL;
+
+ aux_ptr->d_slist_len -= 1;
+
+ HDassert( aux_ptr->d_slist_len >= 0 );
+ }
+
+ /* if the entry appears in the cleaned entry slist, remove it. */
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
+ (void *)(&addr))) != NULL ) {
+
+ HDassert( slist_entry_ptr->magic ==
+ H5AC2__H5AC2_SLIST_ENTRY_T_MAGIC );
+ HDassert( slist_entry_ptr->addr == addr );
+
+ if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
+ != slist_entry_ptr ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
+ "Can't delete entry from cleaned entry slist.")
+ }
+
+ slist_entry_ptr->magic = 0;
+ H5FL_FREE(H5AC2_slist_entry_t, slist_entry_ptr);
+ slist_entry_ptr = NULL;
+
+ aux_ptr->c_slist_len -= 1;
+
+ HDassert( aux_ptr->c_slist_len >= 0 );
+ }
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_log_deleted_entry() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC2_log_dirtied_entry()
+ *
+ * Purpose: Update the dirty_bytes count for a newly dirtied entry.
+ *
+ * If mpi_rank isnt 0, this simply means adding the size
+ * of the entries to the dirty_bytes count.
+ *
+ * If mpi_rank is 0, we must first check to see if the entry
+ * appears in the dirty entries slist. If it is, do nothing.
+ * If it isn't, add the size to th dirty_bytes count, add the
+ * entry to the dirty entries slist, and remove it from the
+ * cleaned list (if it is present there).
+ *
+ * Return SUCCEED on success, and FAIL on failure.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 6/29/05
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC2_log_dirtied_entry(H5AC2_t * cache_ptr,
+ H5AC2_info_t * entry_ptr,
+ haddr_t addr,
+ hbool_t size_changed,
+ size_t new_size)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ size_t entry_size;
+ H5AC2_aux_t * aux_ptr = NULL;
+ H5AC2_slist_entry_t * slist_entry_ptr = NULL;
+
+ FUNC_ENTER_NOAPI(H5AC2_log_dirtied_entry, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+ aux_ptr = cache_ptr->aux_ptr;
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC2__H5AC2_AUX_T_MAGIC );
+
+ HDassert( entry_ptr != NULL );
+ HDassert( entry_ptr->addr == addr );
+ HDassert( entry_ptr->is_dirty == FALSE );
+
+ if ( size_changed ) {
+
+ entry_size = new_size;
+
+ } else {
+
+ entry_size = entry_ptr->size;
+ }
+
+ if ( aux_ptr->mpi_rank == 0 ) {
+
+ HDassert( aux_ptr->d_slist_ptr != NULL );
+ HDassert( aux_ptr->c_slist_ptr != NULL );
+
+ if ( H5SL_search(aux_ptr->d_slist_ptr, (void *)(&addr)) == NULL ) {
+
+ /* insert the address of the entry in the dirty entry list, and
+ * add its size to the dirty_bytes count.
+ */
+ if ( NULL == (slist_entry_ptr = H5FL_CALLOC(H5AC2_slist_entry_t)) ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
+ "Can't allocate dirty slist entry .")
+ }
+
+ slist_entry_ptr->magic = H5AC2__H5AC2_SLIST_ENTRY_T_MAGIC;
+ slist_entry_ptr->addr = addr;
+
+ if ( H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr,
+ &(slist_entry_ptr->addr)) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, \
+ "can't insert entry into dirty entry slist.")
+ }
+
+ aux_ptr->d_slist_len += 1;
+ aux_ptr->dirty_bytes += entry_size;
+#if H5AC2_DEBUG_DIRTY_BYTES_CREATION
+ aux_ptr->unprotect_dirty_bytes += entry_size;
+ aux_ptr->unprotect_dirty_bytes_updates += 1;
+#endif /* H5AC2_DEBUG_DIRTY_BYTES_CREATION */
+ }
+
+ if ( H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr)) != NULL ) {
+
+ /* the entry is dirty. If it exists on the cleaned entries list,
+ * remove it.
+ */
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
+ (void *)(&addr))) != NULL ) {
+
+ HDassert( slist_entry_ptr->magic ==
+ H5AC2__H5AC2_SLIST_ENTRY_T_MAGIC );
+ HDassert( slist_entry_ptr->addr == addr );
+
+ if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
+ != slist_entry_ptr ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
+ "Can't delete entry from clean entry slist.")
+ }
+
+ slist_entry_ptr->magic = 0;
+ H5FL_FREE(H5AC2_slist_entry_t, slist_entry_ptr);
+ slist_entry_ptr = NULL;
+
+ aux_ptr->c_slist_len -= 1;
+
+ HDassert( aux_ptr->c_slist_len >= 0 );
+ }
+ }
+ } else {
+
+ aux_ptr->dirty_bytes += entry_size;
+#if H5AC2_DEBUG_DIRTY_BYTES_CREATION
+ aux_ptr->unprotect_dirty_bytes += entry_size;
+ aux_ptr->unprotect_dirty_bytes_updates += 1;
+#endif /* H5AC2_DEBUG_DIRTY_BYTES_CREATION */
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_log_dirtied_entry() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC2_log_flushed_entry()
+ *
+ * Purpose: Update the clean entry slist for the flush of an entry --
+ * specifically, if the entry has been cleared, remove it
+ * from both the cleaned and dirtied lists if it is present.
+ * Otherwise, if the entry was dirty, insert the indicated
+ * entry address in the clean slist if it isn't there already.
+ *
+ * This function is only used in PHDF5, and should only
+ * be called for the process with mpi rank 0.
+ *
+ * Return SUCCEED on success, and FAIL on failure.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 6/29/05
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef H5_HAVE_PARALLEL
+#if 0 /* This is useful debugging code. -- JRM */
+static herr_t
+H5AC2_log_flushed_entry_dummy(H5C2_t * cache_ptr,
+ haddr_t addr,
+ hbool_t was_dirty,
+ unsigned flags,
+ int type_id)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5AC2_aux_t * aux_ptr = NULL;
+
+ FUNC_ENTER_NOAPI(H5AC2_log_flushed_entry_dummy, FAIL)
+
+ aux_ptr = cache_ptr->aux_ptr;
+
+ if ( ( was_dirty ) && ( (flags & H5C2__FLUSH_CLEAR_ONLY_FLAG) == 0 ) ) {
+
+ HDfprintf(stdout,
+ "%d:H5AC2_log_flushed_entry(): addr = %d, flags = %x, was_dirty = %d, type_id = %d\n",
+ (int)(aux_ptr->mpi_rank), (int)addr, flags, (int)was_dirty, type_id);
+ }
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_log_flushed_entry_dummy() */
+#endif /* JRM */
+
+static herr_t
+H5AC2_log_flushed_entry(H5C2_t * cache_ptr,
+ haddr_t addr,
+ hbool_t was_dirty,
+ unsigned flags,
+ int UNUSED type_id)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ hbool_t cleared;
+ H5AC2_aux_t * aux_ptr;
+ H5AC2_slist_entry_t * slist_entry_ptr = NULL;
+
+
+ FUNC_ENTER_NOAPI(H5AC2_log_flushed_entry, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+ aux_ptr = cache_ptr->aux_ptr;
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC2__H5AC2_AUX_T_MAGIC );
+ HDassert( aux_ptr->mpi_rank == 0 );
+ HDassert( aux_ptr->c_slist_ptr != NULL );
+
+ cleared = ( (flags & H5C2__FLUSH_CLEAR_ONLY_FLAG) != 0 );
+
+ if ( cleared ) {
+
+ /* If the entry has been cleared, must remove it from both the
+ * cleaned list and the dirtied list.
+ */
+
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
+ (void *)(&addr))) != NULL ) {
+
+ HDassert( slist_entry_ptr->magic == H5AC2__H5AC2_SLIST_ENTRY_T_MAGIC);
+ HDassert( slist_entry_ptr->addr == addr );
+
+ if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
+ != slist_entry_ptr ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
+ "Can't delete entry from clean entry slist.")
+ }
+
+ slist_entry_ptr->magic = 0;
+ H5FL_FREE(H5AC2_slist_entry_t, slist_entry_ptr);
+ slist_entry_ptr = NULL;
+
+ aux_ptr->c_slist_len -= 1;
+
+ HDassert( aux_ptr->c_slist_len >= 0 );
+ }
+
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
+ (void *)(&addr))) != NULL ) {
+
+ HDassert( slist_entry_ptr->magic == H5AC2__H5AC2_SLIST_ENTRY_T_MAGIC);
+ HDassert( slist_entry_ptr->addr == addr );
+
+ if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
+ != slist_entry_ptr ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
+ "Can't delete entry from dirty entry slist.")
+ }
+
+ slist_entry_ptr->magic = 0;
+ H5FL_FREE(H5AC2_slist_entry_t, slist_entry_ptr);
+ slist_entry_ptr = NULL;
+
+ aux_ptr->d_slist_len -= 1;
+
+ HDassert( aux_ptr->d_slist_len >= 0 );
+ }
+ } else if ( was_dirty ) {
+
+ if ( H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr)) == NULL ) {
+
+ /* insert the address of the entry in the clean entry list. */
+
+ if ( NULL == (slist_entry_ptr = H5FL_CALLOC(H5AC2_slist_entry_t)) ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
+ "Can't allocate clean slist entry .")
+ }
+
+ slist_entry_ptr->magic = H5AC2__H5AC2_SLIST_ENTRY_T_MAGIC;
+ slist_entry_ptr->addr = addr;
+
+ if ( H5SL_insert(aux_ptr->c_slist_ptr, slist_entry_ptr,
+ &(slist_entry_ptr->addr)) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, \
+ "can't insert entry into clean entry slist.")
+ }
+
+ aux_ptr->c_slist_len += 1;
+ }
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_log_flushed_entry() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC2_log_inserted_entry()
+ *
+ * Purpose: Update the dirty_bytes count for a newly inserted entry.
+ *
+ * If mpi_rank isnt 0, this simply means adding the size
+ * of the entry to the dirty_bytes count.
+ *
+ * If mpi_rank is 0, we must also add the entry to the
+ * dirty entries slist.
+ *
+ * Return SUCCEED on success, and FAIL on failure.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 6/30/05
+ *
+ * Modifications:
+ *
+ * JRM -- 10/24/07
+ * Added the size parameter.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC2_log_inserted_entry(H5F_t * f,
+ H5AC2_t * cache_ptr,
+ H5AC2_info_t * entry_ptr,
+ const H5AC2_class_t * type,
+ haddr_t addr,
+ size_t size)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5AC2_aux_t * aux_ptr = NULL;
+ H5AC2_slist_entry_t * slist_entry_ptr = NULL;
+
+ FUNC_ENTER_NOAPI(H5AC2_log_inserted_entry, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+ aux_ptr = cache_ptr->aux_ptr;
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC2__H5AC2_AUX_T_MAGIC );
+
+ HDassert( entry_ptr != NULL );
+ HDassert( entry_ptr->addr == addr );
+ HDassert( entry_ptr->type == type );
+
+ if ( aux_ptr->mpi_rank == 0 ) {
+
+ HDassert( aux_ptr->d_slist_ptr != NULL );
+ HDassert( aux_ptr->c_slist_ptr != NULL );
+
+ if ( H5SL_search(aux_ptr->d_slist_ptr, (void *)(&addr)) == NULL ) {
+
+ /* insert the address of the entry in the dirty entry list, and
+ * add its size to the dirty_bytes count.
+ */
+ if ( NULL == (slist_entry_ptr = H5FL_CALLOC(H5AC2_slist_entry_t)) ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
+ "Can't allocate dirty slist entry .")
+ }
+
+ slist_entry_ptr->magic = H5AC2__H5AC2_SLIST_ENTRY_T_MAGIC;
+ slist_entry_ptr->addr = addr;
+
+ if ( H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr,
+ &(slist_entry_ptr->addr)) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, \
+ "can't insert entry into dirty entry slist.")
+ }
+
+ aux_ptr->d_slist_len += 1;
+
+ } else {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Inserted entry already in dirty slist.")
+ }
+
+ if ( H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr)) != NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Inserted entry in clean slist.")
+ }
+ }
+
+ aux_ptr->dirty_bytes += size;
+
+#if H5AC2_DEBUG_DIRTY_BYTES_CREATION
+ aux_ptr->insert_dirty_bytes += size;
+ aux_ptr->insert_dirty_bytes_updates += 1;
+#endif /* H5AC2_DEBUG_DIRTY_BYTES_CREATION */
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_log_inserted_entry() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC2_log_renamed_entry()
+ *
+ * Purpose: Update the dirty_bytes count for a renamed entry.
+ *
+ * WARNING
+ *
+ * At present, the way that the rename call is used ensures
+ * that the renamed entry is present in all caches by
+ * renaming in a collective operation and immediately after
+ * unprotecting the target entry.
+ *
+ * This function uses this invarient, and will cause arcane
+ * failures if it is not met. If maintaining this invarient
+ * becomes impossible, we will have to rework this function
+ * extensively, and likely include a bit of IPC for
+ * synchronization. A better option might be to subsume
+ * rename in the unprotect operation.
+ *
+ * Given that the target entry is in all caches, the function
+ * proceeds as follows:
+ *
+ * For processes with mpi rank other 0, it simply checks to
+ * see if the entry was dirty prior to the rename, and adds
+ * the entries size to the dirty bytes count.
+ *
+ * In the process with mpi rank 0, the function first checks
+ * to see if the entry was dirty prior to the rename. If it
+ * was, and if the entry doesn't appear in the dirtied list
+ * under its old address, it adds the entry's size to the
+ * dirty bytes count.
+ *
+ * The rank 0 process then removes any references to the
+ * entry under its old address from the cleands and dirtied
+ * lists, and inserts an entry in the dirtied list under the
+ * new address.
+ *
+ * Return SUCCEED on success, and FAIL on failure.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 6/30/05
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC2_log_renamed_entry(H5AC2_t * cache_ptr,
+ haddr_t old_addr,
+ haddr_t new_addr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ hbool_t entry_in_cache;
+ hbool_t entry_dirty;
+ size_t entry_size;
+ H5AC2_aux_t * aux_ptr = NULL;
+ H5AC2_slist_entry_t * slist_entry_ptr = NULL;
+
+ FUNC_ENTER_NOAPI(H5AC2_log_renamed_entry, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+ aux_ptr = cache_ptr->aux_ptr;
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC2__H5AC2_AUX_T_MAGIC );
+
+ /* get entry status, size, etc here */
+ if ( H5C2_get_entry_status(cache_ptr, old_addr, &entry_size, &entry_in_cache,
+ &entry_dirty, NULL, NULL) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get entry status.")
+
+ } else if ( ! entry_in_cache ) {
+
+ HDassert( entry_in_cache );
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry not in cache.")
+ }
+
+ if ( aux_ptr->mpi_rank == 0 ) {
+
+ HDassert( aux_ptr->d_slist_ptr != NULL );
+ HDassert( aux_ptr->c_slist_ptr != NULL );
+
+ /* if the entry appears in the cleaned entry slist, under its old
+ * address, remove it.
+ */
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
+ (void *)(&old_addr))) != NULL ) {
+
+ HDassert( slist_entry_ptr->magic ==
+ H5AC2__H5AC2_SLIST_ENTRY_T_MAGIC );
+ HDassert( slist_entry_ptr->addr == old_addr );
+
+ if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&old_addr))
+ != slist_entry_ptr ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
+ "Can't delete entry from cleaned entry slist.")
+ }
+
+ slist_entry_ptr->magic = 0;
+ H5FL_FREE(H5AC2_slist_entry_t, slist_entry_ptr);
+ slist_entry_ptr = NULL;
+
+ aux_ptr->c_slist_len -= 1;
+
+ HDassert( aux_ptr->c_slist_len >= 0 );
+ }
+
+ /* if the entry appears in the dirtied entry slist under its old
+ * address, remove it, but don't free it. Set addr to new_addr.
+ */
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
+ (void *)(&old_addr))) != NULL ) {
+
+ HDassert( slist_entry_ptr->magic ==
+ H5AC2__H5AC2_SLIST_ENTRY_T_MAGIC );
+ HDassert( slist_entry_ptr->addr == old_addr );
+
+ if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&old_addr))
+ != slist_entry_ptr ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
+ "Can't delete entry from dirty entry slist.")
+ }
+
+ slist_entry_ptr->addr = new_addr;
+
+ aux_ptr->d_slist_len -= 1;
+
+ HDassert( aux_ptr->d_slist_len >= 0 );
+
+ } else {
+
+ /* otherwise, allocate a new entry that is ready
+ * for insertion, and increment dirty_bytes.
+ *
+ * Note that the fact that the entry wasn't in the dirtied
+ * list under its old address implies that it must have
+ * been clean to start with.
+ */
+
+ HDassert( !entry_dirty );
+
+ if ( NULL == (slist_entry_ptr = H5FL_CALLOC(H5AC2_slist_entry_t)) ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
+ "Can't allocate dirty slist entry .")
+ }
+
+ slist_entry_ptr->magic = H5AC2__H5AC2_SLIST_ENTRY_T_MAGIC;
+ slist_entry_ptr->addr = new_addr;
+
+ aux_ptr->dirty_bytes += entry_size;
+
+#if H5AC2_DEBUG_DIRTY_BYTES_CREATION
+ aux_ptr->rename_dirty_bytes += entry_size;
+ aux_ptr->rename_dirty_bytes_updates += 1;
+#endif /* H5AC2_DEBUG_DIRTY_BYTES_CREATION */
+ }
+
+ /* verify that there is no entry at new_addr in the dirty slist */
+ if ( H5SL_search(aux_ptr->d_slist_ptr, (void *)(&new_addr)) != NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "dirty slist already contains entry at new_addr.")
+ }
+
+ /* insert / reinsert the entry in the dirty slist */
+ if ( H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr,
+ &(slist_entry_ptr->addr)) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, \
+ "can't insert entry into dirty entry slist.")
+ }
+
+ aux_ptr->d_slist_len += 1;
+
+ } else if ( ! entry_dirty ) {
+
+ aux_ptr->dirty_bytes += entry_size;
+
+#if H5AC2_DEBUG_DIRTY_BYTES_CREATION
+ aux_ptr->rename_dirty_bytes += entry_size;
+ aux_ptr->rename_dirty_bytes_updates += 1;
+#endif /* H5AC2_DEBUG_DIRTY_BYTES_CREATION */
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_log_renamed_entry() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC2_propagate_flushed_and_still_clean_entries_list
+ *
+ * Purpose: In PHDF5, only the metadata cache with mpi rank 0 is allowed
+ * to write to file. All other metadata caches on processes
+ * with rank greater than 0 must retain dirty entries until
+ * they are notified that the entry is now clean.
+ *
+ * This function is the main routine for that proceedure.
+ * It must be called simultaniously on all processes that
+ * have the relevant file open. To this end, there must
+ * be a barrier immediately prior to this call.
+ *
+ * Typicaly, this will be done one of two ways:
+ *
+ * 1) Dirty byte creation exceeds some user specified value.
+ *
+ * While metadata reads may occur independently, all
+ * operations writing metadata must be collective. Thus
+ * all metadata caches see the same sequence of operations,
+ * and therefore the same dirty data creation.
+ *
+ * This fact is used to synchronize the caches for purposes
+ * of propagating the list of flushed and still clean
+ * entries, by simply calling this function from all
+ * caches whenever some user specified threshold on dirty
+ * data is exceeded.
+ *
+ * 2) Under direct user control -- this operation must be
+ * collective.
+ *
+ * The operations to be managed by this function are as
+ * follows:
+ *
+ * For the process with mpi rank 0:
+ *
+ * 1) Enable writes, flush the cache to its min clean size,
+ * and then disable writes again.
+ *
+ * 2) Load the contents of the flushed and still clean entries
+ * list (c_slist_ptr) into a buffer, and broadcast that
+ * buffer to all the other caches.
+ *
+ * 3) Clear the flushed and still clean entries list
+ * (c_slist_ptr).
+ *
+ *
+ * For all processes with mpi rank greater than 0:
+ *
+ * 1) Receive the flushed and still clean entries list broadcast
+ *
+ * 2) Mark the specified entries as clean.
+ *
+ *
+ * For all processes:
+ *
+ * 1) Reset the dirtied bytes count to 0.
+ *
+ * Return: Success: non-negative
+ *
+ * Failure: negative
+ *
+ * Programmer: John Mainzer
+ * July 5, 2005
+ *
+ * Modifications:
+ *
+ * JRM -- 5/11/06
+ * Added code to call the write_done callback.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef H5_HAVE_PARALLEL
+herr_t
+H5AC2_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
+ hid_t dxpl_id,
+ H5AC2_t * cache_ptr,
+ hbool_t do_barrier)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ herr_t result;
+ int mpi_code;
+ H5AC2_aux_t * aux_ptr = NULL;
+
+ FUNC_ENTER_NOAPI(H5AC2_propagate_flushed_and_still_clean_entries_list, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+ aux_ptr = (H5AC2_aux_t *)(cache_ptr->aux_ptr);
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC2__H5AC2_AUX_T_MAGIC );
+
+#if H5AC2_DEBUG_DIRTY_BYTES_CREATION
+ HDfprintf(stdout,
+ "%d:H5AC2_propagate...:%d: (u/uu/i/iu/r/ru) = %d/%d/%d/%d/%d/%d\n",
+ (int)(aux_ptr->mpi_rank),
+ (int)(aux_ptr->dirty_bytes_propagations),
+ (int)(aux_ptr->unprotect_dirty_bytes),
+ (int)(aux_ptr->unprotect_dirty_bytes_updates),
+ (int)(aux_ptr->insert_dirty_bytes),
+ (int)(aux_ptr->insert_dirty_bytes_updates),
+ (int)(aux_ptr->rename_dirty_bytes),
+ (int)(aux_ptr->rename_dirty_bytes_updates));
+#endif /* H5AC2_DEBUG_DIRTY_BYTES_CREATION */
+
+ if ( do_barrier ) {
+
+ /* to prevent "messages from the future" we must synchronize all
+ * processes before we start the flush. This synchronization may
+ * already be done -- hence the do_barrier parameter.
+ */
+
+ if ( MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)) ) {
+
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_code)
+ }
+ }
+
+ if ( aux_ptr->mpi_rank == 0 ) {
+
+ aux_ptr->write_permitted = TRUE;
+
+ result = H5C2_flush_to_min_clean(cache_ptr, dxpl_id);
+
+ aux_ptr->write_permitted = FALSE;
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C2_flush_to_min_clean() failed.")
+ }
+
+ if ( aux_ptr->write_done != NULL ) {
+
+ (aux_ptr->write_done)();
+ }
+
+ if ( H5AC2_broadcast_clean_list(cache_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Can't broadcast clean slist.")
+ }
+
+ HDassert( aux_ptr->c_slist_len == 0 );
+
+ } else {
+
+ if ( H5AC2_receive_and_apply_clean_list(f, dxpl_id,
+ cache_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Can't receive and/or process clean slist broadcast.")
+ }
+ }
+
+ aux_ptr->dirty_bytes = 0;
+#if H5AC2_DEBUG_DIRTY_BYTES_CREATION
+ aux_ptr->dirty_bytes_propagations += 1;
+ aux_ptr->unprotect_dirty_bytes = 0;
+ aux_ptr->unprotect_dirty_bytes_updates = 0;
+ aux_ptr->insert_dirty_bytes = 0;
+ aux_ptr->insert_dirty_bytes_updates = 0;
+ aux_ptr->rename_dirty_bytes = 0;
+ aux_ptr->rename_dirty_bytes_updates = 0;
+#endif /* H5AC2_DEBUG_DIRTY_BYTES_CREATION */
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_propagate_flushed_and_still_clean_entries_list() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC2_receive_and_apply_clean_list()
+ *
+ * Purpose: Receive the list of cleaned entries from process 0,
+ * and mark the specified entries as clean.
+ *
+ * This function must only be called by the process with
+ * MPI_rank greater than 0.
+ *
+ * Return SUCCEED on success, and FAIL on failure.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 7/4/05
+ *
+ * Modifications:
+ *
+ * JRM --10/24/07
+ * Reworked parameter list in support of the revised cache
+ * API.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC2_receive_and_apply_clean_list(H5F_t * f,
+ hid_t dxpl_id,
+ H5AC2_t * cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5AC2_aux_t * aux_ptr = NULL;
+ haddr_t * haddr_buf_ptr = NULL;
+ MPI_Offset * MPI_Offset_buf_ptr = NULL;
+ size_t buf_size;
+ int i = 0;
+ int mpi_result;
+ int num_entries;
+
+ FUNC_ENTER_NOAPI(H5AC2_receive_and_apply_clean_list, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+ aux_ptr = (H5AC2_aux_t *)(cache_ptr->aux_ptr);
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC2__H5AC2_AUX_T_MAGIC );
+ HDassert( aux_ptr->mpi_rank != 0 );
+
+ /* First receive the number of entries in the list so that we
+ * can set up a buffer to receive them. If there aren't
+ * any, we are done.
+ */
+ mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm);
+
+ if ( mpi_result != MPI_SUCCESS ) {
+
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 1", mpi_result)
+ }
+
+ if ( num_entries > 0 )
+ {
+ /* allocate a buffers to store the list of entry base addresses in */
+
+ buf_size = sizeof(MPI_Offset) * (size_t)num_entries;
+
+ MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_malloc(buf_size);
+
+ if ( MPI_Offset_buf_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
+ "memory allocation failed for receive buffer")
+ }
+
+ haddr_buf_ptr = (haddr_t *)H5MM_malloc(sizeof(haddr_t) *
+ (size_t)num_entries);
+
+ if ( haddr_buf_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
+ "memory allocation failed for haddr buffer")
+ }
+
+
+ /* Now receive the list of cleaned entries
+ *
+ * The peculiar structure of the following call to MPI_Bcast is
+ * due to MPI's (?) failure to believe in the MPI_Offset type.
+ * Thus the element type is MPI_BYTE, with size equal to the
+ * buf_size computed above.
+ */
+
+ mpi_result = MPI_Bcast((void *)MPI_Offset_buf_ptr, (int)buf_size,
+ MPI_BYTE, 0, aux_ptr->mpi_comm);
+
+ if ( mpi_result != MPI_SUCCESS ) {
+
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 2", mpi_result)
+ }
+
+
+ /* translate the MPI_Offsets to haddr_t */
+ i = 0;
+ while ( i < num_entries )
+ {
+ haddr_buf_ptr[i] = H5FD_mpi_MPIOff_to_haddr(MPI_Offset_buf_ptr[i]);
+
+ if ( haddr_buf_ptr[i] == HADDR_UNDEF ) {
+
+ HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, \
+ "can't convert MPI off to haddr")
+ }
+
+ i++;
+ }
+
+
+ /* mark the indicated entries as clean */
+ if ( H5C2_mark_entries_as_clean(cache_ptr, dxpl_id,
+ (int32_t)num_entries,
+ &(haddr_buf_ptr[0])) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Can't mark entries clean.")
+
+ }
+ }
+
+done:
+
+ if ( MPI_Offset_buf_ptr != NULL ) {
+
+ MPI_Offset_buf_ptr =
+ (MPI_Offset *)H5MM_xfree((void *)MPI_Offset_buf_ptr);
+ }
+
+ if ( haddr_buf_ptr != NULL ) {
+
+ haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
+ }
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC2_receive_and_apply_clean_list() */
+#endif /* H5_HAVE_PARALLEL */
+
diff --git a/src/H5AC2pkg.h b/src/H5AC2pkg.h
new file mode 100644
index 0000000..202f4e3
--- /dev/null
+++ b/src/H5AC2pkg.h
@@ -0,0 +1,335 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Programmer: John Mainzer -- 4/19/06
+ *
+ * Purpose: This file contains declarations which are normally visible
+ * only within the H5AC2 package (just H5AC2.c at present).
+ *
+ * Source files outside the H5AC package should include
+ * H5ACprivate.h instead.
+ *
+ * The one exception to this rule is testpar/t_cache.c. The
+ * test code is easier to write if it can look at H5AC_aux_t.
+ * Indeed, this is the main reason why this file was created.
+ *
+ * Modifications:
+ *
+ * JRM - 10/18/07
+ * Copied H5ACpkg.h to H5AC2pkg.h and reworked
+ * to use H5C2 instead of H5C. All this is in support
+ * of cache API modifications needed for journaling.
+ *
+ */
+
+#ifndef H5AC2_PACKAGE
+#error "Do not include this file outside the H5AC2 package!"
+#endif
+
+#ifndef _H5AC2pkg_H
+#define _H5AC2pkg_H
+
+#define H5C2_PACKAGE /*suppress error about including H5C2pkg */
+
+/* Get package's private header */
+#include "H5AC2private.h"
+#include "H5C2private.h"
+
+
+/* Get needed headers */
+#include "H5C2pkg.h" /* Cache */
+#include "H5SLprivate.h" /* Skip lists */
+
+
+#define H5AC2_DEBUG_DIRTY_BYTES_CREATION 0
+
+/*-------------------------------------------------------------------------
+ * It is a bit difficult to set ranges of allowable values on the
+ * dirty_bytes_threshold field of H5AC2_aux_t. The following are
+ * probably broader than they should be.
+ *-------------------------------------------------------------------------
+ */
+
+#define H5AC2__MIN_DIRTY_BYTES_THRESHOLD (int32_t) \
+ (H5C2__MIN_MAX_CACHE_SIZE / 2)
+#define H5AC2__DEFAULT_DIRTY_BYTES_THRESHOLD (256 * 1024)
+#define H5AC2__MAX_DIRTY_BYTES_THRESHOLD (int32_t) \
+ (H5C2__MAX_MAX_CACHE_SIZE / 4)
+
+/****************************************************************************
+ *
+ * structure H5AC2_aux_t
+ *
+ * While H5AC2 has become a wrapper for the cache implemented in H5C2.c, there
+ * are some features of the metadata cache that are specific to it, and which
+ * therefore do not belong in the more generic H5C2 cache code.
+ *
+ * In particular, there is the matter of synchronizing writes from the
+ * metadata cache to disk in the PHDF5 case.
+ *
+ * Prior to this update, the presumption was that all metadata caches would
+ * write the same data at the same time since all operations modifying
+ * metadata must be performed collectively. Given this assumption, it was
+ * safe to allow only the writes from process 0 to actually make it to disk,
+ * while metadata writes from all other processes were discarded.
+ *
+ * Unfortunately, this presumption is in error as operations that read
+ * metadata need not be collective, but can change the location of dirty
+ * entries in the metadata cache LRU lists. This can result in the same
+ * metadata write operation triggering writes from the metadata caches on
+ * some processes, but not all (causing a hang), or in different sets of
+ * entries being written from different caches (potentially resulting in
+ * metadata corruption in the file).
+ *
+ * To deal with this issue, I decided to apply a paradigm shift to the way
+ * metadata is written to disk.
+ *
+ * With this set of changes, only the metadata cache on process 0 is able
+ * to write metadata to disk, although metadata caches on all other
+ * processes can read metadata from disk as before.
+ *
+ * To keep all the other caches from getting plugged up with dirty metadata,
+ * process 0 periodically broadcasts a list of entries that it has flushed
+ * since that last notice, and which are currently clean. The other caches
+ * mark these entries as clean as well, which allows them to evict the
+ * entries as needed.
+ *
+ * One obvious problem in this approach is synchronizing the broadcasts
+ * and receptions, as different caches may see different amounts of
+ * activity.
+ *
+ * The current solution is for the caches to track the number of bytes
+ * of newly generated dirty metadata, and to broadcast and receive
+ * whenever this value exceeds some user specified threshold.
+ *
+ * Maintaining this count is easy for all processes not on process 0 --
+ * all that is necessary is to add the size of the entry to the total
+ * whenever there is an insertion, a rename of a previously clean entry,
+ * or whever a previously clean entry is marked dirty in an unprotect.
+ *
+ * On process 0, we have to be careful not to count dirty bytes twice.
+ * If an entry is marked dirty, flushed, and marked dirty again, all
+ * within a single reporting period, it only th first marking should
+ * be added to the dirty bytes generated tally, as that is all that
+ * the other processes will see.
+ *
+ * At present, this structure exists to maintain the fields needed to
+ * implement the above scheme, and thus is only used in the parallel
+ * case. However, other uses may arise in the future.
+ *
+ * Instance of this structure are associated with metadata caches via
+ * the aux_ptr field of H5C2_t (see H5C2pkg.h). The H5AC2 code is
+ * responsible for allocating, maintaining, and discarding instances
+ * of H5AC2_aux_t.
+ *
+ * The remainder of this header comments documents the individual fields
+ * of the structure.
+ *
+ * JRM - 6/27/05
+ *
+ * magic: Unsigned 32 bit integer always set to
+ * H5AC2__H5AC2_AUX_T_MAGIC. This field is used to validate
+ * pointers to instances of H5AC2_aux_t.
+ *
+ * mpi_comm: MPI communicator associated with the file for which the
+ * cache has been created.
+ *
+ * mpi_rank: MPI rank of this process within mpi_comm.
+ *
+ * mpi_size: Number of processes in mpi_comm.
+ *
+ * write_permitted: Boolean flag used to control whether the cache
+ * is permitted to write to file.
+ *
+ * dirty_bytes_threshold: Integer field containing the dirty bytes
+ * generation threashold. Whenever dirty byte creation
+ * exceeds this value, the metadata cache on process 0
+ * broadcasts a list of the entries it has flushed since
+ * the last broadcast (or since the beginning of execution)
+ * and which are currently clean (if they are still in the
+ * cache)
+ *
+ * Similarly, metadata caches on processes other than process
+ * 0 will attempt to receive a list of clean entries whenever
+ * the threshold is exceeded.
+ *
+ * dirty_bytes: Integer field containing the number of bytes of dirty
+ * metadata generated since the beginning of the computation,
+ * or (more typically) since the last clean entries list
+ * broadcast. This field is reset to zero after each such
+ * broadcast.
+ *
+ * dirty_bytes_propagations: This field only exists when the
+ * H5AC2_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
+ *
+ * It is used to track the number of times the cleaned list
+ * has been propagated from process 0 to the other
+ * processes.
+ *
+ * unprotect_dirty_bytes: This field only exists when the
+ * H5AC2_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
+ *
+ * It is used to track the number of dirty bytes created
+ * via unprotect operations since the last time the cleaned
+ * list was propagated.
+ *
+ * unprotect_dirty_bytes_updates: This field only exists when the
+ * H5AC2_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
+ *
+ * It is used to track the number of times dirty bytes have
+ * been created via unprotect operations since the last time
+ * the cleaned list was propagated.
+ *
+ * insert_dirty_bytes: This field only exists when the
+ * H5AC2_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
+ *
+ * It is used to track the number of dirty bytes created
+ * via insert operations since the last time the cleaned
+ * list was propagated.
+ *
+ * insert_dirty_bytes_updates: This field only exists when the
+ * H5AC2_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
+ *
+ * It is used to track the number of times dirty bytes have
+ * been created via insert operations since the last time
+ * the cleaned list was propagated.
+ *
+ * rename_dirty_bytes: This field only exists when the
+ * H5AC2_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
+ *
+ * It is used to track the number of dirty bytes created
+ * via rename operations since the last time the cleaned
+ * list was propagated.
+ *
+ * rename_dirty_bytes_updates: This field only exists when the
+ * H5AC2_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
+ *
+ * It is used to track the number of times dirty bytes have
+ * been created via rename operations since the last time
+ * the cleaned list was propagated.
+ *
+ * d_slist_ptr: Pointer to an instance of H5SL_t used to maintain a list
+ * of entries that have been dirtied since the last time they
+ * were listed in a clean entries broadcast. This list is
+ * only maintained by the metadata cache on process 0 -- it
+ * it used to maintain a view of the dirty entries as seen
+ * by the other caches, so as to keep the dirty bytes count
+ * in synchronization with them.
+ *
+ * Thus on process 0, the dirty_bytes count is incremented
+ * only if either
+ *
+ * 1) an entry is inserted in the metadata cache, or
+ *
+ * 2) a previously clean entry is renamed, and it does not
+ * already appear in the dirty entry list, or
+ *
+ * 3) a previously clean entry is unprotected with the
+ * dirtied flag set and the entry does not already appear
+ * in the dirty entry list.
+ *
+ * Entries are added to the dirty entry list whever they cause
+ * the dirty bytes count to be increased. They are removed
+ * when they appear in a clean entries broadcast. Note that
+ * renames must be reflected in the dirty entry list.
+ *
+ * To reitterate, this field is only used on process 0 -- it
+ * should be NULL on all other processes.
+ *
+ * d_slist_len: Integer field containing the number of entries in the
+ * dirty entry list. This field should always contain the
+ * value 0 on all processes other than process 0. It exists
+ * primarily for sanity checking.
+ *
+ * c_slist_ptr: Pointer to an instance of H5SL_t used to maintain a list
+ * of entries that were dirty, have been flushed
+ * to disk since the last clean entries broadcast, and are
+ * still clean. Since only process 0 can write to disk, this
+ * list only exists on process 0.
+ *
+ * In essence, this slist is used to assemble the contents of
+ * the next clean entries broadcast. The list emptied after
+ * each broadcast.
+ *
+ * c_slist_len: Integer field containing the number of entries in the clean
+ * entries list (*c_slist_ptr). This field should always
+ * contain the value 0 on all processes other than process 0.
+ * It exists primarily for sanity checking.
+ *
+ * write_done: In the parallel test bed, it is necessary to ensure that
+ * all writes to the server process from cache 0 complete
+ * before it enters the barrier call with the other caches.
+ *
+ * The write_done callback allows t_cache to do this without
+ * requiring an ACK on each write. Since these ACKs greatly
+ * increase the run time on some platforms, this is a
+ * significant optimization.
+ *
+ * This field must be set to NULL when the callback is not
+ * needed.
+ *
+ ****************************************************************************/
+
+#ifdef H5_HAVE_PARALLEL
+
+#define H5AC2__H5AC2_AUX_T_MAGIC (unsigned)0x00D0A02
+
+typedef struct H5AC2_aux_t
+{
+ uint32_t magic;
+
+ MPI_Comm mpi_comm;
+
+ int mpi_rank;
+
+ int mpi_size;
+
+ hbool_t write_permitted;
+
+ int32_t dirty_bytes_threshold;
+
+ int32_t dirty_bytes;
+
+#if H5AC2_DEBUG_DIRTY_BYTES_CREATION
+
+ int32_t dirty_bytes_propagations;
+
+ int32_t unprotect_dirty_bytes;
+ int32_t unprotect_dirty_bytes_updates;
+
+ int32_t insert_dirty_bytes;
+ int32_t insert_dirty_bytes_updates;
+
+ int32_t rename_dirty_bytes;
+ int32_t rename_dirty_bytes_updates;
+
+#endif /* H5AC2_DEBUG_DIRTY_BYTES_CREATION */
+
+ H5SL_t * d_slist_ptr;
+
+ int32_t d_slist_len;
+
+ H5SL_t * c_slist_ptr;
+
+ int32_t c_slist_len;
+
+ void (* write_done)(void);
+
+} H5AC2_aux_t; /* struct H5AC2_aux_t */
+
+#endif /* H5_HAVE_PARALLEL */
+
+#endif /* _H5C2pkg_H */
diff --git a/src/H5AC2private.h b/src/H5AC2private.h
new file mode 100644
index 0000000..de76b40
--- /dev/null
+++ b/src/H5AC2private.h
@@ -0,0 +1,315 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5AC2private.h
+ * Jul 9 1997
+ * Robb Matzke <matzke@llnl.gov>
+ *
+ * Purpose: Constants and typedefs available to the rest of the
+ * library.
+ *
+ * Modifications: JRM - 6/4/04
+ * Complete re-write for a new caching algorithm
+ * located in H5C.c
+ *
+ * JRM - 10/18/07
+ * Copied H5ACprivate.h to H5AC2private.h and reworked
+ * to use H5C2 instead of H5C. All this is in support
+ * of cache API modifications needed for journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifndef _H5AC2private_H
+#define _H5AC2private_H
+
+#include "H5AC2public.h" /*public prototypes */
+
+/* Pivate headers needed by this header */
+#include "H5private.h" /* Generic Functions */
+#include "H5Fprivate.h" /* File access */
+#include "H5C2private.h" /* cache */
+
+#ifdef H5_METADATA_TRACE_FILE
+#define H5AC2__TRACE_FILE_ENABLED 1
+#else /* H5_METADATA_TRACE_FILE */
+#define H5AC2__TRACE_FILE_ENABLED 0
+#endif /* H5_METADATA_TRACE_FILE */
+
+/* Types of metadata objects cached */
+typedef enum {
+ H5AC2_BT_ID = 0, /*B-tree nodes */
+ H5AC2_SNODE_ID, /*symbol table nodes */
+ H5AC2_LHEAP_ID, /*local heap */
+ H5AC2_GHEAP_ID, /*global heap */
+ H5AC2_OHDR_ID, /*object header */
+ H5AC2_BT2_HDR_ID, /*v2 B-tree header */
+ H5AC2_BT2_INT_ID, /*v2 B-tree internal node */
+ H5AC2_BT2_LEAF_ID, /*v2 B-tree leaf node */
+ H5AC2_FHEAP_HDR_ID, /*fractal heap header */
+ H5AC2_FHEAP_DBLOCK_ID,/*fractal heap direct block */
+ H5AC2_FHEAP_IBLOCK_ID,/*fractal heap indirect block */
+ H5AC2_FSPACE_HDR_ID, /*free space header */
+ H5AC2_FSPACE_SINFO_ID,/*free space sections */
+ H5AC2_SOHM_TABLE_ID, /*shared object header message master table */
+ H5AC2_SOHM_LIST_ID, /*shared message index stored as a list */
+ H5AC2_TEST_ID, /*test entry -- not used for actual files */
+ H5AC2_NTYPES /* Number of types, must be last */
+} H5AC2_type_t;
+
+/* H5AC2_DUMP_STATS_ON_CLOSE should always be FALSE when
+ * H5C2_COLLECT_CACHE_STATS is FALSE.
+ *
+ * When H5C2_COLLECT_CACHE_STATS is TRUE, H5AC2_DUMP_STATS_ON_CLOSE must
+ * be FALSE for "make check" to succeed, but may be set to TRUE at other
+ * times for debugging purposes.
+ *
+ * Hence the following, somewhat odd set of #defines.
+ */
+#if H5C2_COLLECT_CACHE_STATS
+
+#define H5AC2_DUMP_STATS_ON_CLOSE 0
+
+#else /* H5C2_COLLECT_CACHE_STATS */
+
+#define H5AC2_DUMP_STATS_ON_CLOSE 0
+
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+/* Default max metadata cache size and min clean size are give here.
+ * At present, these are the same as those given in H5C2private.h.
+ */
+
+#define H5AC2__DEFAULT_MAX_CACHE_SIZE H5C2__DEFAULT_MAX_CACHE_SIZE
+#define H5AC2__DEFAULT_MIN_CLEAN_SIZE H5C2__DEFAULT_MIN_CLEAN_SIZE
+
+
+/*
+ * Class methods pertaining to caching. Each type of cached object will
+ * have a constant variable with permanent life-span that describes how
+ * to cache the object. That variable will be of type H5AC2_class_t and
+ * have the following required fields...
+ *
+ * LOAD: Loads an object from disk to memory. The function
+ * should allocate some data structure and return it.
+ *
+ * FLUSH: Writes some data structure back to disk. It would be
+ * wise for the data structure to include dirty flags to
+ * indicate whether it really needs to be written. This
+ * function is also responsible for freeing memory allocated
+ * by the LOAD method if the DEST argument is non-zero (by
+ * calling the DEST method).
+ *
+ * DEST: Just frees memory allocated by the LOAD method.
+ *
+ * CLEAR: Just marks object as non-dirty.
+ *
+ * SIZE: Report the size (on disk) of the specified cache object.
+ * Note that the space allocated on disk may not be contiguous.
+ */
+
+#define H5AC2__SERIALIZE_RESIZED_FLAG H5C2__SERIALIZE_RESIZED_FLAG
+#define H5AC2__SERIALIZE_RENAMED_FLAG H5C2__SERIALIZE_RENAMED_FLAG
+
+typedef H5C2_deserialize_func_t H5AC2_deserialize_func_t;
+typedef H5C2_image_len_func_t H5AC2_image_len_func_t;
+typedef H5C2_serialize_func_t H5AC2_serialize_func_t;
+typedef H5C2_free_icr_func_t H5CA2_free_icr_func_t;
+typedef H5C2_clear_dirty_bits_func_t H5AC2_clear_dirty_bits_func_t;
+
+typedef H5C2_class_t H5AC2_class_t;
+
+
+
+typedef H5C2_cache_entry_t H5AC2_info_t;
+
+
+/*===----------------------------------------------------------------------===
+ * Protect Types
+ *===----------------------------------------------------------------------===
+ *
+ * These are for the wrapper functions to H5AC2_protect. They specify what
+ * type of operation you're planning on doing to the metadata. The
+ * Flexible Parallel HDF5 locking can then act accordingly.
+ */
+
+typedef enum H5AC2_protect_t {
+ H5AC2_WRITE, /* Protect object for writing */
+ H5AC2_READ /* Protect object for reading */
+} H5AC2_protect_t;
+
+
+/* Typedef for metadata cache (defined in H5C2pkg.h) */
+typedef H5C2_t H5AC2_t;
+
+/* Metadata specific properties for FAPL */
+/* (Only used for parallel I/O) */
+#ifdef H5_HAVE_PARALLEL
+/* Definitions for "block before metadata write" property */
+#define H5AC2_BLOCK_BEFORE_META_WRITE_NAME "H5AC2_block_before_meta_write"
+#define H5AC2_BLOCK_BEFORE_META_WRITE_SIZE sizeof(unsigned)
+#define H5AC2_BLOCK_BEFORE_META_WRITE_DEF 0
+
+/* Definitions for "library internal" property */
+#define H5AC2_LIBRARY_INTERNAL_NAME "H5AC2_library_internal"
+#define H5AC2_LIBRARY_INTERNAL_SIZE sizeof(unsigned)
+#define H5AC2_LIBRARY_INTERNAL_DEF 0
+#endif /* H5_HAVE_PARALLEL */
+
+/* Dataset transfer property list for flush calls */
+/* (Collective set, "block before metadata write" set and "library internal" set) */
+/* (Global variable declaration, definition is in H5AC2.c) */
+extern hid_t H5AC2_dxpl_id;
+
+/* Dataset transfer property list for independent metadata I/O calls */
+/* (just "library internal" set - i.e. independent transfer mode) */
+/* (Global variable declaration, definition is in H5AC2.c) */
+extern hid_t H5AC2_ind_dxpl_id;
+
+
+/* Default cache configuration. */
+
+#define H5AC2__DEFAULT_CACHE_CONFIG \
+{ \
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER, \
+ /* hbool_t rpt_fcn_enabled = */ FALSE, \
+ /* hbool_t open_trace_file = */ FALSE, \
+ /* hbool_t close_trace_file = */ FALSE, \
+ /* char trace_file_name[] = */ "", \
+ /* hbool_t evictions_enabled = */ TRUE, \
+ /* hbool_t set_initial_size = */ TRUE, \
+ /* size_t initial_size = */ ( 1 * 1024 * 1024 ), \
+ /* double min_clean_fraction = */ 0.5, \
+ /* size_t max_size = */ (16 * 1024 * 1024 ), \
+ /* size_t min_size = */ ( 1 * 1024 * 1024 ), \
+ /* long int epoch_length = */ 50000, \
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold, \
+ /* double lower_hr_threshold = */ 0.9, \
+ /* double increment = */ 2.0, \
+ /* hbool_t apply_max_increment = */ TRUE, \
+ /* size_t max_increment = */ (4 * 1024 * 1024), \
+ /* enum H5C2_cache_decr_mode decr_mode = */ \
+ H5C2_decr__age_out_with_threshold, \
+ /* double upper_hr_threshold = */ 0.999, \
+ /* double decrement = */ 0.9, \
+ /* hbool_t apply_max_decrement = */ TRUE, \
+ /* size_t max_decrement = */ (1 * 1024 * 1024), \
+ /* int epochs_before_eviction = */ 3, \
+ /* hbool_t apply_empty_reserve = */ TRUE, \
+ /* double empty_reserve = */ 0.1, \
+ /* int dirty_bytes_threshold = */ (256 * 1024) \
+}
+
+
+/*
+ * Library prototypes.
+ */
+
+/* #defines of flags used in the flags parameters in some of the
+ * following function calls. Note that they are just copies of
+ * the equivalent flags from H5C2private.h.
+ */
+
+#define H5AC2__NO_FLAGS_SET H5C2__NO_FLAGS_SET
+#define H5AC2__SET_FLUSH_MARKER_FLAG H5C2__SET_FLUSH_MARKER_FLAG
+#define H5AC2__DELETED_FLAG H5C2__DELETED_FLAG
+#define H5AC2__DIRTIED_FLAG H5C2__DIRTIED_FLAG
+#define H5AC2__SIZE_CHANGED_FLAG H5C2__SIZE_CHANGED_FLAG
+#define H5AC2__PIN_ENTRY_FLAG H5C2__PIN_ENTRY_FLAG
+#define H5AC2__UNPIN_ENTRY_FLAG H5C2__UNPIN_ENTRY_FLAG
+#define H5AC2__FLUSH_INVALIDATE_FLAG H5C2__FLUSH_INVALIDATE_FLAG
+#define H5AC2__FLUSH_CLEAR_ONLY_FLAG H5C2__FLUSH_CLEAR_ONLY_FLAG
+#define H5AC2__FLUSH_MARKED_ENTRIES_FLAG H5C2__FLUSH_MARKED_ENTRIES_FLAG
+#define H5AC2__FLUSH_IGNORE_PROTECTED_FLAG H5C2__FLUSH_IGNORE_PROTECTED_FLAG
+
+
+/* #defines of flags used to report entry status in the
+ * H5AC2_get_entry_status() call.
+ */
+
+#define H5AC2_ES__IN_CACHE 0x0001
+#define H5AC2_ES__IS_DIRTY 0x0002
+#define H5AC2_ES__IS_PROTECTED 0x0004
+#define H5AC2_ES__IS_PINNED 0x0008
+
+
+/* external function declarations: */
+
+H5_DLL herr_t H5AC2_init(void);
+H5_DLL herr_t H5AC2_create(const H5F_t *f, H5AC2_cache_config_t *config_ptr);
+H5_DLL herr_t H5AC2_get_entry_status(H5F_t * f, haddr_t addr,
+ unsigned * status_ptr);
+H5_DLL herr_t H5AC2_set(H5F_t *f, hid_t dxpl_id, const H5AC2_class_t *type,
+ haddr_t addr, size_t len, void *thing,
+ unsigned int flags);
+H5_DLL herr_t H5AC2_pin_protected_entry(H5F_t * f, void * thing);
+H5_DLL void * H5AC2_protect(H5F_t *f, hid_t dxpl_id, const H5AC2_class_t *type,
+ haddr_t addr, size_t len, const void *udata,
+ H5AC2_protect_t rw);
+H5_DLL herr_t H5AC2_resize_pinned_entry(H5F_t * f,
+ void * thing,
+ size_t new_size);
+H5_DLL herr_t H5AC2_unpin_entry(H5F_t * f,
+ void * thing);
+H5_DLL herr_t H5AC2_unprotect(H5F_t *f, hid_t dxpl_id,
+ const H5AC2_class_t *type, haddr_t addr,
+ size_t new_size, void *thing, unsigned flags);
+H5_DLL herr_t H5AC2_flush(H5F_t *f, hid_t dxpl_id, unsigned flags);
+H5_DLL herr_t H5AC2_mark_pinned_entry_dirty(H5F_t * f,
+ void * thing,
+ hbool_t size_changed,
+ size_t new_size);
+H5_DLL herr_t H5AC2_mark_pinned_or_protected_entry_dirty(H5F_t * f,
+ void * thing);
+H5_DLL herr_t H5AC2_rename(H5F_t *f, const H5AC2_class_t *type,
+ haddr_t old_addr, haddr_t new_addr);
+
+H5_DLL herr_t H5AC2_dest(H5F_t *f, hid_t dxpl_id);
+
+H5_DLL herr_t H5AC2_expunge_entry(H5F_t *f, hid_t dxpl_id,
+ const H5AC2_class_t *type, haddr_t addr);
+
+H5_DLL herr_t H5AC2_set_write_done_callback(H5C2_t * cache_ptr,
+ void (* write_done)(void));
+H5_DLL herr_t H5AC2_stats(const H5F_t *f);
+
+H5_DLL herr_t H5AC2_get_cache_auto_resize_config(H5AC2_t * cache_ptr,
+ H5AC2_cache_config_t *config_ptr);
+
+H5_DLL herr_t H5AC2_get_cache_size(H5AC2_t * cache_ptr,
+ size_t * max_size_ptr,
+ size_t * min_clean_size_ptr,
+ size_t * cur_size_ptr,
+ int32_t * cur_num_entries_ptr);
+
+H5_DLL herr_t H5AC2_get_cache_hit_rate(H5AC2_t * cache_ptr,
+ double * hit_rate_ptr);
+
+H5_DLL herr_t H5AC2_reset_cache_hit_rate_stats(H5AC2_t * cache_ptr);
+
+H5_DLL herr_t H5AC2_set_cache_auto_resize_config(H5AC2_t * cache_ptr,
+ H5AC2_cache_config_t *config_ptr);
+
+H5_DLL herr_t H5AC2_validate_config(H5AC2_cache_config_t * config_ptr);
+
+H5_DLL herr_t H5AC2_close_trace_file(H5AC2_t * cache_ptr);
+
+H5_DLL herr_t H5AC2_open_trace_file(H5AC2_t * cache_ptr,
+ const char * trace_file_name);
+
+#endif /* !_H5AC2private_H */
+
diff --git a/src/H5AC2public.h b/src/H5AC2public.h
new file mode 100644
index 0000000..3c88ef1
--- /dev/null
+++ b/src/H5AC2public.h
@@ -0,0 +1,393 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5ACpublic.h
+ * Jul 10 1997
+ * Robb Matzke <matzke@llnl.gov>
+ *
+ * Purpose: Public include file for cache functions.
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef _H5AC2public_H
+#define _H5AC2public_H
+
+/* Public headers needed by this file */
+#include "H5public.h"
+#include "H5C2public.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define H5AC2__MAX_TRACE_FILE_NAME_LEN 1024
+
+/****************************************************************************
+ *
+ * structure H5AC2_cache_config_t
+ *
+ * H5AC2_cache_config_t is a public structure intended for use in public APIs.
+ * At least in its initial incarnation, it is basicaly a copy of struct
+ * H5C2_auto_size_ctl_t, minus the report_fcn field, and plus the
+ * dirty_bytes_threshold field.
+ *
+ * The report_fcn field is omitted, as including it would require us to
+ * make H5C2_t structure public.
+ *
+ * The dirty_bytes_threshold field does not appear in H5C2_auto_size_ctl_t,
+ * as synchronization between caches on different processes is handled at
+ * the H5AC2 level, not at the level of H5C2. Note however that there is
+ * considerable interaction between this value and the other fields in this
+ * structure.
+ *
+ * Similarly, the open_trace_file, close_trace_file, and trace_file_name
+ * fields do not appear in H5C2_auto_size_ctl_t, as most trace file
+ * issues are handled at the H5AC2 level. The one exception is storage of
+ * the pointer to the trace file, which is handled by H5C2.
+ *
+ * The structure is in H5AC2public.h as we may wish to allow different
+ * configuration options for metadata and raw data caches.
+ *
+ * The fields of the structure are discussed individually below:
+ *
+ * version: Integer field containing the version number of this version
+ * of the H5AC2_cache_config_t structure. Any instance of
+ * H5AC2_cache_config_t passed to the cache must have a known
+ * version number, or an error will be flagged.
+ *
+ * rpt_fcn_enabled: Boolean field used to enable and disable the default
+ * reporting function. This function is invoked every time the
+ * automatic cache resize code is run, and reports on its activities.
+ *
+ * This is a debugging function, and should normally be turned off.
+ *
+ * open_trace_file: Boolean field indicating whether the trace_file_name
+ * field should be used to open a trace file for the cache.
+ *
+ * The trace file is a debuging feature that allow the capture of
+ * top level metadata cache requests for purposes of debugging and/or
+ * optimization. This field should normally be set to FALSE, as
+ * trace file collection imposes considerable overhead.
+ *
+ * This field should only be set to TRUE when the trace_file_name
+ * contains the full path of the desired trace file, and either
+ * there is no open trace file on the cache, or the close_trace_file
+ * field is also TRUE.
+ *
+ * close_trace_file: Boolean field indicating whether the current trace
+ * file (if any) should be closed.
+ *
+ * See the above comments on the open_trace_file field. This field
+ * should be set to FALSE unless there is an open trace file on the
+ * cache that you wish to close.
+ *
+ * trace_file_name: Full path of the trace file to be opened if the
+ * open_trace_file field is TRUE.
+ *
+ * In the parallel case, an ascii representation of the mpi rank of
+ * the process will be appended to the file name to yield a unique
+ * trace file name for each process.
+ *
+ * The length of the path must not exceed H5AC2__MAX_TRACE_FILE_NAME_LEN
+ * characters.
+ *
+ * evictions_enabled: Boolean field used to either report the current
+ * evictions enabled status of the cache, or to set the cache's
+ * evictions enabled status.
+ *
+ * In general, the metadata cache should always be allowed to
+ * evict entries. However, in some cases it is advantageous to
+ * disable evictions briefly, and thereby postpone metadata
+ * writes. However, this must be done with care, as the cache
+ * can grow quickly. If you do this, re-enable evictions as
+ * soon as possible and monitor cache size.
+ *
+ * At present, evictions can only be disabled if automatic
+ * cache resizing is also disabled (that is, ( incr_mode ==
+ * H5C2_incr__off ) && ( decr_mode == H5C2_decr__off )). There
+ * is no logical reason why this should be so, but it simplifies
+ * implementation and testing, and I can't think of any reason
+ * why it would be desireable. If you can think of one, I'll
+ * revisit the issue.
+ *
+ * set_initial_size: Boolean flag indicating whether the size of the
+ * initial size of the cache is to be set to the value given in
+ * the initial_size field. If set_initial_size is FALSE, the
+ * initial_size field is ignored.
+ *
+ * initial_size: If enabled, this field contain the size the cache is
+ * to be set to upon receipt of this structure. Needless to say,
+ * initial_size must lie in the closed interval [min_size, max_size].
+ *
+ * min_clean_fraction: double in the range 0 to 1 indicating the fraction
+ * of the cache that is to be kept clean. This field is only used
+ * in parallel mode. Typical values are 0.1 to 0.5.
+ *
+ * max_size: Maximum size to which the cache can be adjusted. The
+ * supplied value must fall in the closed interval
+ * [MIN_MAX_CACHE_SIZE, MAX_MAX_CACHE_SIZE]. Also, max_size must
+ * be greater than or equal to min_size.
+ *
+ * min_size: Minimum size to which the cache can be adjusted. The
+ * supplied value must fall in the closed interval
+ * [H5C2__MIN_MAX_CACHE_SIZE, H5C2__MAX_MAX_CACHE_SIZE]. Also, min_size
+ * must be less than or equal to max_size.
+ *
+ * epoch_length: Number of accesses on the cache over which to collect
+ * hit rate stats before running the automatic cache resize code,
+ * if it is enabled.
+ *
+ * At the end of an epoch, we discard prior hit rate data and start
+ * collecting afresh. The epoch_length must lie in the closed
+ * interval [H5C2__MIN_AR_EPOCH_LENGTH, H5C2__MAX_AR_EPOCH_LENGTH].
+ *
+ *
+ * Cache size increase control fields:
+ *
+ * incr_mode: Instance of the H5C2_cache_incr_mode enumerated type whose
+ * value indicates how we determine whether the cache size should be
+ * increased. At present there are two possible values:
+ *
+ * H5C2_incr__off: Don't attempt to increase the size of the cache
+ * automatically.
+ *
+ * When this increment mode is selected, the remaining fields
+ * in the cache size increase section ar ignored.
+ *
+ * H5C2_incr__threshold: Attempt to increase the size of the cache
+ * whenever the average hit rate over the last epoch drops
+ * below the value supplied in the lower_hr_threshold
+ * field.
+ *
+ * Note that this attempt will fail if the cache is already
+ * at its maximum size, or if the cache is not already using
+ * all available space.
+ *
+ * Note that you must set decr_mode to H5C2_incr__off if you
+ * disable metadata cache entry evictions.
+ *
+ * lower_hr_threshold: Lower hit rate threshold. If the increment mode
+ * (incr_mode) is H5C2_incr__threshold and the hit rate drops below the
+ * value supplied in this field in an epoch, increment the cache size by
+ * size_increment. Note that cache size may not be incremented above
+ * max_size, and that the increment may be further restricted by the
+ * max_increment field if it is enabled.
+ *
+ * When enabled, this field must contain a value in the range [0.0, 1.0].
+ * Depending on the incr_mode selected, it may also have to be less than
+ * upper_hr_threshold.
+ *
+ * increment: Double containing the multiplier used to derive the new
+ * cache size from the old if a cache size increment is triggered.
+ * The increment must be greater than 1.0, and should not exceed 2.0.
+ *
+ * The new cache size is obtained my multiplying the current max cache
+ * size by the increment, and then clamping to max_size and to stay
+ * within the max_increment as necessary.
+ *
+ * apply_max_increment: Boolean flag indicating whether the max_increment
+ * field should be used to limit the maximum cache size increment.
+ *
+ * max_increment: If enabled by the apply_max_increment field described
+ * above, this field contains the maximum number of bytes by which the
+ * cache size can be increased in a single re-size.
+ *
+ *
+ * Cache size decrease control fields:
+ *
+ * decr_mode: Instance of the H5C2_cache_decr_mode enumerated type whose
+ * value indicates how we determine whether the cache size should be
+ * decreased. At present there are four possibilities.
+ *
+ * H5C2_decr__off: Don't attempt to decrease the size of the cache
+ * automatically.
+ *
+ * When this increment mode is selected, the remaining fields
+ * in the cache size decrease section are ignored.
+ *
+ * H5C2_decr__threshold: Attempt to decrease the size of the cache
+ * whenever the average hit rate over the last epoch rises
+ * above the value supplied in the upper_hr_threshold
+ * field.
+ *
+ * H5C2_decr__age_out: At the end of each epoch, search the cache for
+ * entries that have not been accessed for at least the number
+ * of epochs specified in the epochs_before_eviction field, and
+ * evict these entries. Conceptually, the maximum cache size
+ * is then decreased to match the new actual cache size. However,
+ * this reduction may be modified by the min_size, the
+ * max_decrement, and/or the empty_reserve.
+ *
+ * H5C2_decr__age_out_with_threshold: Same as age_out, but we only
+ * attempt to reduce the cache size when the hit rate observed
+ * over the last epoch exceeds the value provided in the
+ * upper_hr_threshold field.
+ *
+ * Note that you must set decr_mode to H5C2_decr__off if you
+ * disable metadata cache entry evictions.
+ *
+ * upper_hr_threshold: Upper hit rate threshold. The use of this field
+ * varies according to the current decr_mode:
+ *
+ * H5C2_decr__off or H5C2_decr__age_out: The value of this field is
+ * ignored.
+ *
+ * H5C2_decr__threshold: If the hit rate exceeds this threshold in any
+ * epoch, attempt to decrement the cache size by size_decrement.
+ *
+ * Note that cache size may not be decremented below min_size.
+ *
+ * Note also that if the upper_threshold is 1.0, the cache size
+ * will never be reduced.
+ *
+ * H5C2_decr__age_out_with_threshold: If the hit rate exceeds this
+ * threshold in any epoch, attempt to reduce the cache size
+ * by evicting entries that have not been accessed for more
+ * than the specified number of epochs.
+ *
+ * decrement: This field is only used when the decr_mode is
+ * H5C2_decr__threshold.
+ *
+ * The field is a double containing the multiplier used to derive the
+ * new cache size from the old if a cache size decrement is triggered.
+ * The decrement must be in the range 0.0 (in which case the cache will
+ * try to contract to its minimum size) to 1.0 (in which case the
+ * cache will never shrink).
+ *
+ * apply_max_decrement: Boolean flag used to determine whether decrements
+ * in cache size are to be limited by the max_decrement field.
+ *
+ * max_decrement: Maximum number of bytes by which the cache size can be
+ * decreased in a single re-size. Note that decrements may also be
+ * restricted by the min_size of the cache, and (in age out modes) by
+ * the empty_reserve field.
+ *
+ * epochs_before_eviction: Integer field used in H5C2_decr__age_out and
+ * H5C2_decr__age_out_with_threshold decrement modes.
+ *
+ * This field contains the number of epochs an entry must remain
+ * unaccessed before it is evicted in an attempt to reduce the
+ * cache size. If applicable, this field must lie in the range
+ * [1, H5C2__MAX_EPOCH_MARKERS].
+ *
+ * apply_empty_reserve: Boolean field controlling whether the empty_reserve
+ * field is to be used in computing the new cache size when the
+ * decr_mode is H5C2_decr__age_out or H5C2_decr__age_out_with_threshold.
+ *
+ * empty_reserve: To avoid a constant racheting down of cache size by small
+ * amounts in the H5C2_decr__age_out and H5C2_decr__age_out_with_threshold
+ * modes, this field allows one to require that any cache size
+ * reductions leave the specified fraction of unused space in the cache.
+ *
+ * The value of this field must be in the range [0.0, 1.0]. I would
+ * expect typical values to be in the range of 0.01 to 0.1.
+ *
+ *
+ * Parallel Configuration Fields:
+ *
+ * In PHDF5, all operations that modify metadata must be executed collectively.
+ * We used to think that this was enough to ensure consistency across the
+ * metadata caches, but since we allow processes to read metadata individually,
+ * the order of dirty entries in the LRU list can vary across processes,
+ * which can result in inconsistencies between the caches.
+ *
+ * To prevent this, only the metadata cache on process 0 is allowed to write
+ * to file, and then only after synchronizing with the other caches. After
+ * it writes entries to file, it sends the base addresses of the now clean
+ * entries to the other caches, so they can mark these entries clean as well.
+ *
+ * The different caches know when to synchronize caches by counting the
+ * number of bytes of dirty metadata created by the collective operations
+ * modifying metadata. Whenever this count exceeds a user specified
+ * threshold (see below), process 0 flushes down to its minimum clean size,
+ * and then sends the list of newly cleaned entries to the other caches.
+ *
+ * dirty_bytes_threshold: Threshold of dirty byte creation used to
+ * synchronize updates between caches. (See above for outline and
+ * motivation.)
+ *
+ * This value MUST be consistant across all processes accessing the
+ * file. This field is ignored unless HDF5 has been compiled for
+ * parallel.
+ *
+ ****************************************************************************/
+
+#define H5AC2__CURR_CACHE_CONFIG_VERSION 1
+
+typedef struct H5AC2_cache_config_t
+{
+ /* general configuration fields: */
+ int version;
+
+ hbool_t rpt_fcn_enabled;
+
+ hbool_t open_trace_file;
+ hbool_t close_trace_file;
+ char trace_file_name[H5AC2__MAX_TRACE_FILE_NAME_LEN+1];
+
+ hbool_t evictions_enabled;
+
+ hbool_t set_initial_size;
+ size_t initial_size;
+
+ double min_clean_fraction;
+
+ size_t max_size;
+ size_t min_size;
+
+ long int epoch_length;
+
+
+ /* size increase control fields: */
+ enum H5C2_cache_incr_mode incr_mode;
+
+ double lower_hr_threshold;
+
+ double increment;
+
+ hbool_t apply_max_increment;
+ size_t max_increment;
+
+
+ /* size decrease control fields: */
+ enum H5C2_cache_decr_mode decr_mode;
+
+ double upper_hr_threshold;
+
+ double decrement;
+
+ hbool_t apply_max_decrement;
+ size_t max_decrement;
+
+ int epochs_before_eviction;
+
+ hbool_t apply_empty_reserve;
+ double empty_reserve;
+
+
+ /* parallel configuration fields: */
+ int dirty_bytes_threshold;
+
+} H5AC2_cache_config_t;
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/src/H5C2.c b/src/H5C2.c
new file mode 100644
index 0000000..373191f
--- /dev/null
+++ b/src/H5C2.c
@@ -0,0 +1,11024 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5C2.c
+ * June 1 2004
+ * John Mainzer
+ *
+ * Purpose: Functions in this file implement a generic cache for
+ * things which exist on disk, and which may be
+ * unambiguously referenced by their disk addresses.
+ *
+ * The code in this module was initially written in
+ * support of a complete re-write of the metadata cache
+ * in H5AC.c However, other uses for the cache code
+ * suggested themselves, and thus this file was created
+ * in an attempt to support re-use.
+ *
+ * For a detailed overview of the cache, please see the
+ * header comment for H5C2_t in H5C2pkg.h.
+ *
+ * Modifications:
+ *
+ * QAK - 11/27/2004
+ * Switched over to using skip list routines instead of TBBT
+ * routines.
+ *
+ * JRM - 12/15/04
+ * Added code supporting manual and automatic cache resizing.
+ * See the header for H5C2_auto_size_ctl_t in H5C2private.h for
+ * an overview.
+ *
+ * Some elements of the automatic cache resize code depend on
+ * the LRU list. Thus if we ever choose to support a new
+ * replacement policy, we will either have to disable those
+ * elements of the auto resize code when running the new
+ * policy, or modify them to make use of similar information
+ * maintained by the new policy code.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/**************************************************************************
+ *
+ * To Do:
+ *
+ * Code Changes:
+ *
+ * - Remove extra functionality in H5C2_flush_single_entry()?
+ *
+ * - Change protect/unprotect to lock/unlock.
+ *
+ * - Change the way the dirty flag is set. Probably pass it in
+ * as a parameter in unprotect & insert.
+ *
+ * - Size should also be passed in as a parameter in insert and
+ * unprotect -- or some other way should be found to advise the
+ * cache of changes in entry size.
+ *
+ * - Flush entries in increasing address order in
+ * H5C2_make_space_in_cache().
+ *
+ * - Also in H5C2_make_space_in_cache(), use high and low water marks
+ * to reduce the number of I/O calls.
+ *
+ * - When flushing, attempt to combine contiguous entries to reduce
+ * I/O overhead. Can't do this just yet as some entries are not
+ * contiguous. Do this in parallel only or in serial as well?
+ *
+ * - Create MPI type for dirty objects when flushing in parallel.
+ *
+ * - Now that TBBT routines aren't used, fix nodes in memory to
+ * point directly to the skip list node from the LRU list, eliminating
+ * skip list lookups when evicting objects from the cache.
+ *
+ * Tests:
+ *
+ * - Trim execution time. (This is no longer a major issue with the
+ * shift from the TBBT to a hash table for indexing.)
+ *
+ * - Add random tests.
+ *
+ **************************************************************************/
+
+#define H5C2_PACKAGE /*suppress error about including H5C2pkg */
+#define H5F_PACKAGE /*suppress error about including H5Fpkg */
+
+
+#include "H5private.h" /* Generic Functions */
+#include "H5C2pkg.h" /* Cache */
+#include "H5Dprivate.h" /* Dataset functions */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5Fpkg.h" /* Files */
+#include "H5FDprivate.h" /* File drivers */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5Iprivate.h" /* IDs */
+#include "H5MMprivate.h" /* Memory management */
+#include "H5Pprivate.h" /* Property lists */
+#include "H5SLprivate.h" /* Skip lists */
+
+
+/****************************************************************************
+ *
+ * We maintain doubly linked lists of instances of H5C2_cache_entry_t for a
+ * variety of reasons -- protected list, LRU list, and the clean and dirty
+ * LRU lists at present. The following macros support linking and unlinking
+ * of instances of H5C2_cache_entry_t by both their regular and auxilary next
+ * and previous pointers.
+ *
+ * The size and length fields are also maintained.
+ *
+ * Note that the relevant pair of prev and next pointers are presumed to be
+ * NULL on entry in the insertion macros.
+ *
+ * Finally, observe that the sanity checking macros evaluate to the empty
+ * string when H5C2_DO_SANITY_CHECKS is FALSE. They also contain calls
+ * to the HGOTO_ERROR macro, which may not be appropriate in all cases.
+ * If so, we will need versions of the insertion and deletion macros which
+ * do not reference the sanity checking macros.
+ * JRM - 5/5/04
+ *
+ * Changes:
+ *
+ * - Removed the line:
+ *
+ * ( ( (Size) == (entry_ptr)->size ) && ( (len) != 1 ) ) ||
+ *
+ * from the H5C2__DLL_PRE_REMOVE_SC macro. With the addition of the
+ * epoch markers used in the age out based cache size reduction algorithm,
+ * this invarient need not hold, as the epoch markers are of size 0.
+ *
+ * One could argue that I should have given the epoch markers a positive
+ * size, but this would break the index_size = LRU_list_size + pl_size
+ * + pel_size invarient.
+ *
+ * Alternatively, I could pass the current decr_mode in to the macro,
+ * and just skip the check whenever epoch markers may be in use.
+ *
+ * However, any size errors should be caught when the cache is flushed
+ * and destroyed. Until we are tracking such an error, this should be
+ * good enough.
+ * JRM - 12/9/04
+ *
+ *
+ * - In the H5C2__DLL_PRE_INSERT_SC macro, replaced the lines:
+ *
+ * ( ( (len) == 1 ) &&
+ * ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) ||
+ * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) )
+ * )
+ * ) ||
+ *
+ * with:
+ *
+ * ( ( (len) == 1 ) &&
+ * ( ( (head_ptr) != (tail_ptr) ) ||
+ * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) )
+ * )
+ * ) ||
+ *
+ * Epoch markers have size 0, so we can now have a non-empty list with
+ * zero size. Hence the "( (Size) <= 0 )" clause cause false failures
+ * in the sanity check. Since "Size" is typically a size_t, it can't
+ * take on negative values, and thus the revised clause "( (Size) < 0 )"
+ * caused compiler warnings.
+ * JRM - 12/22/04
+ *
+ * - In the H5C2__DLL_SC macro, replaced the lines:
+ *
+ * ( ( (len) == 1 ) &&
+ * ( ( (head_ptr) != (tail_ptr) ) || ( (cache_ptr)->size <= 0 ) ||
+ * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) )
+ * )
+ * ) ||
+ *
+ * with
+ *
+ * ( ( (len) == 1 ) &&
+ * ( ( (head_ptr) != (tail_ptr) ) ||
+ * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) )
+ * )
+ * ) ||
+ *
+ * Epoch markers have size 0, so we can now have a non-empty list with
+ * zero size. Hence the "( (Size) <= 0 )" clause cause false failures
+ * in the sanity check. Since "Size" is typically a size_t, it can't
+ * take on negative values, and thus the revised clause "( (Size) < 0 )"
+ * caused compiler warnings.
+ * JRM - 1/10/05
+ *
+ * - Added the H5C2__DLL_UPDATE_FOR_SIZE_CHANGE macro and the associated
+ * sanity checking macros. These macro are used to update the size of
+ * a DLL when one of its entries changes size.
+ *
+ * JRM - 9/8/05
+ *
+ ****************************************************************************/
+
+#if H5C2_DO_SANITY_CHECKS
+
+#define H5C2__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
+if ( ( (head_ptr) == NULL ) || \
+ ( (tail_ptr) == NULL ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( (len) <= 0 ) || \
+ ( (Size) < (entry_ptr)->size ) || \
+ ( ( (entry_ptr)->prev == NULL ) && ( (head_ptr) != (entry_ptr) ) ) || \
+ ( ( (entry_ptr)->next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \
+ ( ( (len) == 1 ) && \
+ ( ! ( ( (head_ptr) == (entry_ptr) ) && \
+ ( (tail_ptr) == (entry_ptr) ) && \
+ ( (entry_ptr)->next == NULL ) && \
+ ( (entry_ptr)->prev == NULL ) && \
+ ( (Size) == (entry_ptr)->size ) \
+ ) \
+ ) \
+ ) \
+ ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL pre remove SC failed") \
+}
+
+#define H5C2__DLL_SC(head_ptr, tail_ptr, len, Size, fv) \
+if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
+ ( (head_ptr) != (tail_ptr) ) \
+ ) || \
+ ( (len) < 0 ) || \
+ ( (Size) < 0 ) || \
+ ( ( (len) == 1 ) && \
+ ( ( (head_ptr) != (tail_ptr) ) || \
+ ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
+ ) \
+ ) || \
+ ( ( (len) >= 1 ) && \
+ ( ( (head_ptr) == NULL ) || ( (head_ptr)->prev != NULL ) || \
+ ( (tail_ptr) == NULL ) || ( (tail_ptr)->next != NULL ) \
+ ) \
+ ) \
+ ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL sanity check failed") \
+}
+
+#define H5C2__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
+if ( ( (entry_ptr) == NULL ) || \
+ ( (entry_ptr)->next != NULL ) || \
+ ( (entry_ptr)->prev != NULL ) || \
+ ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
+ ( (head_ptr) != (tail_ptr) ) \
+ ) || \
+ ( (len) < 0 ) || \
+ ( ( (len) == 1 ) && \
+ ( ( (head_ptr) != (tail_ptr) ) || \
+ ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
+ ) \
+ ) || \
+ ( ( (len) >= 1 ) && \
+ ( ( (head_ptr) == NULL ) || ( (head_ptr)->prev != NULL ) || \
+ ( (tail_ptr) == NULL ) || ( (tail_ptr)->next != NULL ) \
+ ) \
+ ) \
+ ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL pre insert SC failed") \
+}
+
+#define H5C2__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \
+if ( ( (dll_len) <= 0 ) || \
+ ( (dll_size) <= 0 ) || \
+ ( (old_size) <= 0 ) || \
+ ( (old_size) > (dll_size) ) || \
+ ( (new_size) <= 0 ) || \
+ ( ( (dll_len) == 1 ) && ( (old_size) != (dll_size) ) ) ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "DLL pre size update SC failed") \
+}
+
+#define H5C2__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \
+if ( ( (new_size) > (dll_size) ) || \
+ ( ( (dll_len) == 1 ) && ( (new_size) != (dll_size) ) ) ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "DLL post size update SC failed") \
+}
+
+#else /* H5C2_DO_SANITY_CHECKS */
+
+#define H5C2__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv)
+#define H5C2__DLL_SC(head_ptr, tail_ptr, len, Size, fv)
+#define H5C2__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv)
+#define H5C2__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size)
+#define H5C2__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size)
+
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+
+#define H5C2__DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \
+ H5C2__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
+ fail_val) \
+ if ( (head_ptr) == NULL ) \
+ { \
+ (head_ptr) = (entry_ptr); \
+ (tail_ptr) = (entry_ptr); \
+ } \
+ else \
+ { \
+ (tail_ptr)->next = (entry_ptr); \
+ (entry_ptr)->prev = (tail_ptr); \
+ (tail_ptr) = (entry_ptr); \
+ } \
+ (len)++; \
+ (Size) += (entry_ptr)->size;
+
+#define H5C2__DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \
+ H5C2__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
+ fail_val) \
+ if ( (head_ptr) == NULL ) \
+ { \
+ (head_ptr) = (entry_ptr); \
+ (tail_ptr) = (entry_ptr); \
+ } \
+ else \
+ { \
+ (head_ptr)->prev = (entry_ptr); \
+ (entry_ptr)->next = (head_ptr); \
+ (head_ptr) = (entry_ptr); \
+ } \
+ (len)++; \
+ (Size) += entry_ptr->size;
+
+#define H5C2__DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \
+ H5C2__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
+ fail_val) \
+ { \
+ if ( (head_ptr) == (entry_ptr) ) \
+ { \
+ (head_ptr) = (entry_ptr)->next; \
+ if ( (head_ptr) != NULL ) \
+ { \
+ (head_ptr)->prev = NULL; \
+ } \
+ } \
+ else \
+ { \
+ (entry_ptr)->prev->next = (entry_ptr)->next; \
+ } \
+ if ( (tail_ptr) == (entry_ptr) ) \
+ { \
+ (tail_ptr) = (entry_ptr)->prev; \
+ if ( (tail_ptr) != NULL ) \
+ { \
+ (tail_ptr)->next = NULL; \
+ } \
+ } \
+ else \
+ { \
+ (entry_ptr)->next->prev = (entry_ptr)->prev; \
+ } \
+ entry_ptr->next = NULL; \
+ entry_ptr->prev = NULL; \
+ (len)--; \
+ (Size) -= entry_ptr->size; \
+ }
+
+#define H5C2__DLL_UPDATE_FOR_SIZE_CHANGE(dll_len, dll_size, old_size, new_size) \
+ H5C2__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \
+ (dll_size) -= (old_size); \
+ (dll_size) += (new_size); \
+ H5C2__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size)
+
+#if H5C2_DO_SANITY_CHECKS
+
+#define H5C2__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \
+if ( ( (hd_ptr) == NULL ) || \
+ ( (tail_ptr) == NULL ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( (len) <= 0 ) || \
+ ( (Size) < (entry_ptr)->size ) || \
+ ( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \
+ ( ( (entry_ptr)->aux_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \
+ ( ( (entry_ptr)->aux_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \
+ ( ( (len) == 1 ) && \
+ ( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \
+ ( (entry_ptr)->aux_next == NULL ) && \
+ ( (entry_ptr)->aux_prev == NULL ) && \
+ ( (Size) == (entry_ptr)->size ) \
+ ) \
+ ) \
+ ) \
+ ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "aux DLL pre remove SC failed") \
+}
+
+#define H5C2__AUX_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \
+if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
+ ( (head_ptr) != (tail_ptr) ) \
+ ) || \
+ ( (len) < 0 ) || \
+ ( (Size) < 0 ) || \
+ ( ( (len) == 1 ) && \
+ ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
+ ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
+ ) \
+ ) || \
+ ( ( (len) >= 1 ) && \
+ ( ( (head_ptr) == NULL ) || ( (head_ptr)->aux_prev != NULL ) || \
+ ( (tail_ptr) == NULL ) || ( (tail_ptr)->aux_next != NULL ) \
+ ) \
+ ) \
+ ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "AUX DLL sanity check failed") \
+}
+
+#define H5C2__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \
+if ( ( (entry_ptr) == NULL ) || \
+ ( (entry_ptr)->aux_next != NULL ) || \
+ ( (entry_ptr)->aux_prev != NULL ) || \
+ ( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
+ ( (hd_ptr) != (tail_ptr) ) \
+ ) || \
+ ( (len) < 0 ) || \
+ ( ( (len) == 1 ) && \
+ ( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
+ ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \
+ ) \
+ ) || \
+ ( ( (len) >= 1 ) && \
+ ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->aux_prev != NULL ) || \
+ ( (tail_ptr) == NULL ) || ( (tail_ptr)->aux_next != NULL ) \
+ ) \
+ ) \
+ ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "AUX DLL pre insert SC failed") \
+}
+
+#else /* H5C2_DO_SANITY_CHECKS */
+
+#define H5C2__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv)
+#define H5C2__AUX_DLL_SC(head_ptr, tail_ptr, len, Size, fv)
+#define H5C2__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv)
+
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+
+#define H5C2__AUX_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val)\
+ H5C2__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
+ fail_val) \
+ if ( (head_ptr) == NULL ) \
+ { \
+ (head_ptr) = (entry_ptr); \
+ (tail_ptr) = (entry_ptr); \
+ } \
+ else \
+ { \
+ (tail_ptr)->aux_next = (entry_ptr); \
+ (entry_ptr)->aux_prev = (tail_ptr); \
+ (tail_ptr) = (entry_ptr); \
+ } \
+ (len)++; \
+ (Size) += entry_ptr->size;
+
+#define H5C2__AUX_DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
+ H5C2__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
+ fv) \
+ if ( (head_ptr) == NULL ) \
+ { \
+ (head_ptr) = (entry_ptr); \
+ (tail_ptr) = (entry_ptr); \
+ } \
+ else \
+ { \
+ (head_ptr)->aux_prev = (entry_ptr); \
+ (entry_ptr)->aux_next = (head_ptr); \
+ (head_ptr) = (entry_ptr); \
+ } \
+ (len)++; \
+ (Size) += entry_ptr->size;
+
+#define H5C2__AUX_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
+ H5C2__AUX_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
+ fv) \
+ { \
+ if ( (head_ptr) == (entry_ptr) ) \
+ { \
+ (head_ptr) = (entry_ptr)->aux_next; \
+ if ( (head_ptr) != NULL ) \
+ { \
+ (head_ptr)->aux_prev = NULL; \
+ } \
+ } \
+ else \
+ { \
+ (entry_ptr)->aux_prev->aux_next = (entry_ptr)->aux_next; \
+ } \
+ if ( (tail_ptr) == (entry_ptr) ) \
+ { \
+ (tail_ptr) = (entry_ptr)->aux_prev; \
+ if ( (tail_ptr) != NULL ) \
+ { \
+ (tail_ptr)->aux_next = NULL; \
+ } \
+ } \
+ else \
+ { \
+ (entry_ptr)->aux_next->aux_prev = (entry_ptr)->aux_prev; \
+ } \
+ entry_ptr->aux_next = NULL; \
+ entry_ptr->aux_prev = NULL; \
+ (len)--; \
+ (Size) -= entry_ptr->size; \
+ }
+
+
+/***********************************************************************
+ *
+ * Stats collection macros
+ *
+ * The following macros must handle stats collection when this collection
+ * is enabled, and evaluate to the empty string when it is not.
+ *
+ * The sole exception to this rule is
+ * H5C2__UPDATE_CACHE_HIT_RATE_STATS(), which is always active as
+ * the cache hit rate stats are always collected and available.
+ *
+ * Changes:
+ *
+ * JRM -- 3/21/06
+ * Added / updated macros for pinned entry related stats.
+ *
+ * JRM -- 8/9/06
+ * More pinned entry stats related updates.
+ *
+ * JRM -- 3/31/07
+ * Updated H5C2__UPDATE_STATS_FOR_PROTECT() to keep stats on
+ * read and write protects.
+ *
+ ***********************************************************************/
+
+#define H5C2__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) \
+ (cache_ptr->cache_accesses)++; \
+ if ( hit ) { \
+ (cache_ptr->cache_hits)++; \
+ } \
+
+#if H5C2_COLLECT_CACHE_STATS
+
+#define H5C2__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) \
+ (((cache_ptr)->dirty_pins)[(entry_ptr)->type->id])++;
+
+#define H5C2__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) \
+ if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \
+ (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \
+ if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \
+ (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
+ if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \
+ (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
+ if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
+ (cache_ptr)->max_pel_size = (cache_ptr)->pel_size;
+
+#define H5C2__UPDATE_STATS_FOR_RENAME(cache_ptr, entry_ptr) \
+ if ( cache_ptr->flush_in_progress ) { \
+ ((cache_ptr)->cache_flush_renames[(entry_ptr)->type->id])++; \
+ } \
+ if ( entry_ptr->flush_in_progress ) { \
+ ((cache_ptr)->entry_flush_renames[(entry_ptr)->type->id])++; \
+ } \
+ (((cache_ptr)->renames)[(entry_ptr)->type->id])++;
+
+#define H5C2__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size)\
+ if ( cache_ptr->flush_in_progress ) { \
+ ((cache_ptr)->cache_flush_size_changes[(entry_ptr)->type->id])++; \
+ } \
+ if ( entry_ptr->flush_in_progress ) { \
+ ((cache_ptr)->entry_flush_size_changes[(entry_ptr)->type->id])++; \
+ } \
+ if ( (entry_ptr)->size < (new_size) ) { \
+ ((cache_ptr)->size_increases[(entry_ptr)->type->id])++; \
+ if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \
+ (cache_ptr)->max_index_size = (cache_ptr)->index_size; \
+ if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \
+ (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
+ if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \
+ (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \
+ } else if ( (entry_ptr)->size > (new_size) ) { \
+ ((cache_ptr)->size_decreases[(entry_ptr)->type->id])++; \
+ }
+
+#define H5C2__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \
+ (cache_ptr)->total_ht_insertions++;
+
+#define H5C2__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \
+ (cache_ptr)->total_ht_deletions++;
+
+#define H5C2__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth) \
+ if ( success ) { \
+ (cache_ptr)->successful_ht_searches++; \
+ (cache_ptr)->total_successful_ht_search_depth += depth; \
+ } else { \
+ (cache_ptr)->failed_ht_searches++; \
+ (cache_ptr)->total_failed_ht_search_depth += depth; \
+ }
+
+#define H5C2__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) \
+ ((cache_ptr)->unpins)[(entry_ptr)->type->id]++;
+
+#if H5C2_COLLECT_CACHE_ENTRY_STATS
+
+#define H5C2__RESET_CACHE_ENTRY_STATS(entry_ptr) \
+ (entry_ptr)->accesses = 0; \
+ (entry_ptr)->clears = 0; \
+ (entry_ptr)->flushes = 0; \
+ (entry_ptr)->pins = 0;
+
+#define H5C2__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \
+ (((cache_ptr)->clears)[(entry_ptr)->type->id])++; \
+ if ( (entry_ptr)->is_pinned ) { \
+ (((cache_ptr)->pinned_clears)[(entry_ptr)->type->id])++; \
+ } \
+ ((entry_ptr)->clears)++;
+
+#define H5C2__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \
+ (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \
+ if ( (entry_ptr)->is_pinned ) { \
+ (((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \
+ } \
+ ((entry_ptr)->flushes)++;
+
+#define H5C2__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) \
+ (((cache_ptr)->evictions)[(entry_ptr)->type->id])++; \
+ if ( (entry_ptr)->accesses > \
+ ((cache_ptr)->max_accesses)[(entry_ptr)->type->id] ) { \
+ ((cache_ptr)->max_accesses)[(entry_ptr)->type->id] \
+ = (entry_ptr)->accesses; \
+ } \
+ if ( (entry_ptr)->accesses < \
+ ((cache_ptr)->min_accesses)[(entry_ptr)->type->id] ) { \
+ ((cache_ptr)->min_accesses)[(entry_ptr)->type->id] \
+ = (entry_ptr)->accesses; \
+ } \
+ if ( (entry_ptr)->clears > \
+ ((cache_ptr)->max_clears)[(entry_ptr)->type->id] ) { \
+ ((cache_ptr)->max_clears)[(entry_ptr)->type->id] \
+ = (entry_ptr)->clears; \
+ } \
+ if ( (entry_ptr)->flushes > \
+ ((cache_ptr)->max_flushes)[(entry_ptr)->type->id] ) { \
+ ((cache_ptr)->max_flushes)[(entry_ptr)->type->id] \
+ = (entry_ptr)->flushes; \
+ } \
+ if ( (entry_ptr)->size > \
+ ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) { \
+ ((cache_ptr)->max_size)[(entry_ptr)->type->id] \
+ = (entry_ptr)->size; \
+ } \
+ if ( (entry_ptr)->pins > \
+ ((cache_ptr)->max_pins)[(entry_ptr)->type->id] ) { \
+ ((cache_ptr)->max_pins)[(entry_ptr)->type->id] \
+ = (entry_ptr)->pins; \
+ }
+
+#define H5C2__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \
+ (((cache_ptr)->insertions)[(entry_ptr)->type->id])++; \
+ if ( (entry_ptr)->is_pinned ) { \
+ (((cache_ptr)->pinned_insertions)[(entry_ptr)->type->id])++; \
+ ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \
+ (entry_ptr)->pins++; \
+ if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \
+ (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
+ if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
+ (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \
+ } \
+ if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
+ (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
+ if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \
+ (cache_ptr)->max_index_size = (cache_ptr)->index_size; \
+ if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \
+ (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \
+ if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \
+ (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
+ if ( (entry_ptr)->size > \
+ ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) { \
+ ((cache_ptr)->max_size)[(entry_ptr)->type->id] \
+ = (entry_ptr)->size; \
+ }
+
+#define H5C2__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \
+ if ( hit ) \
+ ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \
+ else \
+ ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \
+ if ( ! ((entry_ptr)->is_read_only) ) { \
+ ((cache_ptr)->write_protects)[(entry_ptr)->type->id]++; \
+ } else { \
+ ((cache_ptr)->read_protects)[(entry_ptr)->type->id]++; \
+ if ( ((entry_ptr)->ro_ref_count) > \
+ ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] ) { \
+ ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] = \
+ ((entry_ptr)->ro_ref_count); \
+ } \
+ } \
+ if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
+ (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
+ if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \
+ (cache_ptr)->max_index_size = (cache_ptr)->index_size; \
+ if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \
+ (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \
+ if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \
+ (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \
+ if ( (entry_ptr)->size > \
+ ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) { \
+ ((cache_ptr)->max_size)[(entry_ptr)->type->id] \
+ = (entry_ptr)->size; \
+ } \
+ ((entry_ptr)->accesses)++;
+
+#define H5C2__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \
+ ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \
+ (entry_ptr)->pins++; \
+ if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \
+ (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
+ if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
+ (cache_ptr)->max_pel_size = (cache_ptr)->pel_size;
+
+#else /* H5C2_COLLECT_CACHE_ENTRY_STATS */
+
+#define H5C2__RESET_CACHE_ENTRY_STATS(entry_ptr)
+
+#define H5C2__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \
+ if ( (entry_ptr)->is_pinned ) { \
+ (((cache_ptr)->pinned_clears)[(entry_ptr)->type->id])++; \
+ } \
+ (((cache_ptr)->clears)[(entry_ptr)->type->id])++;
+
+#define H5C2__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \
+ (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \
+ if ( (entry_ptr)->is_pinned ) { \
+ (((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \
+ }
+
+#define H5C2__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) \
+ (((cache_ptr)->evictions)[(entry_ptr)->type->id])++;
+
+#define H5C2__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \
+ (((cache_ptr)->insertions)[(entry_ptr)->type->id])++; \
+ if ( (entry_ptr)->is_pinned ) { \
+ (((cache_ptr)->pinned_insertions)[(entry_ptr)->type->id])++; \
+ ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \
+ if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \
+ (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
+ if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
+ (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \
+ } \
+ if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
+ (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
+ if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \
+ (cache_ptr)->max_index_size = (cache_ptr)->index_size; \
+ if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \
+ (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \
+ if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \
+ (cache_ptr)->max_slist_size = (cache_ptr)->slist_size;
+
+#define H5C2__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \
+ if ( hit ) \
+ ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \
+ else \
+ ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \
+ if ( ! ((entry_ptr)->is_read_only) ) { \
+ ((cache_ptr)->write_protects)[(entry_ptr)->type->id]++; \
+ } else { \
+ ((cache_ptr)->read_protects)[(entry_ptr)->type->id]++; \
+ if ( ((entry_ptr)->ro_ref_count) > \
+ ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] ) { \
+ ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] = \
+ ((entry_ptr)->ro_ref_count); \
+ } \
+ } \
+ if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
+ (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
+ if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \
+ (cache_ptr)->max_index_size = (cache_ptr)->index_size; \
+ if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \
+ (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \
+ if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \
+ (cache_ptr)->max_pl_size = (cache_ptr)->pl_size;
+
+#define H5C2__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \
+ ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \
+ if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \
+ (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
+ if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
+ (cache_ptr)->max_pel_size = (cache_ptr)->pel_size;
+
+#endif /* H5C2_COLLECT_CACHE_ENTRY_STATS */
+
+#else /* H5C2_COLLECT_CACHE_STATS */
+
+#define H5C2__RESET_CACHE_ENTRY_STATS(entry_ptr)
+#define H5C2__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
+#define H5C2__UPDATE_STATS_FOR_UNPROTECT(cache_ptr)
+#define H5C2__UPDATE_STATS_FOR_RENAME(cache_ptr, entry_ptr)
+#define H5C2__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size)
+#define H5C2__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr)
+#define H5C2__UPDATE_STATS_FOR_HT_DELETION(cache_ptr)
+#define H5C2__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth)
+#define H5C2__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr)
+#define H5C2__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
+#define H5C2__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
+#define H5C2__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr)
+#define H5C2__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit)
+#define H5C2__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr)
+#define H5C2__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
+
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+
+/***********************************************************************
+ *
+ * Hash table access and manipulation macros:
+ *
+ * The following macros handle searches, insertions, and deletion in
+ * the hash table.
+ *
+ * When modifying these macros, remember to modify the similar macros
+ * in tst/cache.c
+ *
+ ***********************************************************************/
+
+/* H5C2__HASH_TABLE_LEN is defined in H5C2pkg.h. It mut be a power of two. */
+
+#define H5C2__HASH_MASK ((size_t)(H5C2__HASH_TABLE_LEN - 1) << 3)
+
+#define H5C2__HASH_FCN(x) (int)(((x) & H5C2__HASH_MASK) >> 3)
+
+#if H5C2_DO_SANITY_CHECKS
+
+#define H5C2__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C2__H5C2_T_MAGIC ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
+ ( (entry_ptr)->ht_next != NULL ) || \
+ ( (entry_ptr)->ht_prev != NULL ) || \
+ ( (entry_ptr)->size <= 0 ) || \
+ ( (k = H5C2__HASH_FCN((entry_ptr)->addr)) < 0 ) || \
+ ( k >= H5C2__HASH_TABLE_LEN ) ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
+ "Pre HT insert SC failed") \
+}
+
+#define H5C2__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C2__H5C2_T_MAGIC ) || \
+ ( (cache_ptr)->index_len < 1 ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
+ ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
+ ( (entry_ptr)->size <= 0 ) || \
+ ( H5C2__HASH_FCN((entry_ptr)->addr) < 0 ) || \
+ ( H5C2__HASH_FCN((entry_ptr)->addr) >= H5C2__HASH_TABLE_LEN ) || \
+ ( ((cache_ptr)->index)[(H5C2__HASH_FCN((entry_ptr)->addr))] \
+ == NULL ) || \
+ ( ( ((cache_ptr)->index)[(H5C2__HASH_FCN((entry_ptr)->addr))] \
+ != (entry_ptr) ) && \
+ ( (entry_ptr)->ht_prev == NULL ) ) || \
+ ( ( ((cache_ptr)->index)[(H5C2__HASH_FCN((entry_ptr)->addr))] == \
+ (entry_ptr) ) && \
+ ( (entry_ptr)->ht_prev != NULL ) ) ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Pre HT remove SC failed") \
+}
+
+#define H5C2__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C2__H5C2_T_MAGIC ) || \
+ ( ! H5F_addr_defined(Addr) ) || \
+ ( H5C2__HASH_FCN(Addr) < 0 ) || \
+ ( H5C2__HASH_FCN(Addr) >= H5C2__HASH_TABLE_LEN ) ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "Pre HT search SC failed") \
+}
+
+#define H5C2__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C2__H5C2_T_MAGIC ) || \
+ ( (cache_ptr)->index_len < 1 ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
+ ( H5F_addr_ne((entry_ptr)->addr, (Addr)) ) || \
+ ( (entry_ptr)->size <= 0 ) || \
+ ( ((cache_ptr)->index)[k] == NULL ) || \
+ ( ( ((cache_ptr)->index)[k] != (entry_ptr) ) && \
+ ( (entry_ptr)->ht_prev == NULL ) ) || \
+ ( ( ((cache_ptr)->index)[k] == (entry_ptr) ) && \
+ ( (entry_ptr)->ht_prev != NULL ) ) || \
+ ( ( (entry_ptr)->ht_prev != NULL ) && \
+ ( (entry_ptr)->ht_prev->ht_next != (entry_ptr) ) ) || \
+ ( ( (entry_ptr)->ht_next != NULL ) && \
+ ( (entry_ptr)->ht_next->ht_prev != (entry_ptr) ) ) ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
+ "Post successful HT search SC failed") \
+}
+
+#define H5C2__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( ((cache_ptr)->index)[k] != (entry_ptr) ) || \
+ ( (entry_ptr)->ht_prev != NULL ) ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
+ "Post HT shift to front SC failed") \
+}
+
+#define H5C2__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->index_len <= 0 ) || \
+ ( (cache_ptr)->index_size <= 0 ) || \
+ ( (new_size) <= 0 ) || \
+ ( (old_size) > (cache_ptr)->index_size ) || \
+ ( (new_size) <= 0 ) || \
+ ( ( (cache_ptr)->index_len == 1 ) && \
+ ( (cache_ptr)->index_size != (old_size) ) ) ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Pre HT entry size change SC failed") \
+}
+
+#define H5C2__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->index_len <= 0 ) || \
+ ( (cache_ptr)->index_size <= 0 ) || \
+ ( (new_size) > (cache_ptr)->index_size ) || \
+ ( ( (cache_ptr)->index_len == 1 ) && \
+ ( (cache_ptr)->index_size != (new_size) ) ) ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Post HT entry size change SC failed") \
+}
+
+#else /* H5C2_DO_SANITY_CHECKS */
+
+#define H5C2__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val)
+#define H5C2__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr)
+#define H5C2__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val)
+#define H5C2__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val)
+#define H5C2__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val)
+#define H5C2__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size)
+#define H5C2__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size)
+
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+
+#define H5C2__INSERT_IN_INDEX(cache_ptr, entry_ptr, fail_val) \
+{ \
+ int k; \
+ H5C2__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
+ k = H5C2__HASH_FCN((entry_ptr)->addr); \
+ if ( ((cache_ptr)->index)[k] == NULL ) \
+ { \
+ ((cache_ptr)->index)[k] = (entry_ptr); \
+ } \
+ else \
+ { \
+ (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
+ (entry_ptr)->ht_next->ht_prev = (entry_ptr); \
+ ((cache_ptr)->index)[k] = (entry_ptr); \
+ } \
+ (cache_ptr)->index_len++; \
+ (cache_ptr)->index_size += (entry_ptr)->size; \
+ H5C2__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \
+}
+
+#define H5C2__DELETE_FROM_INDEX(cache_ptr, entry_ptr) \
+{ \
+ int k; \
+ H5C2__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
+ k = H5C2__HASH_FCN((entry_ptr)->addr); \
+ if ( (entry_ptr)->ht_next ) \
+ { \
+ (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
+ } \
+ if ( (entry_ptr)->ht_prev ) \
+ { \
+ (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
+ } \
+ if ( ((cache_ptr)->index)[k] == (entry_ptr) ) \
+ { \
+ ((cache_ptr)->index)[k] = (entry_ptr)->ht_next; \
+ } \
+ (entry_ptr)->ht_next = NULL; \
+ (entry_ptr)->ht_prev = NULL; \
+ (cache_ptr)->index_len--; \
+ (cache_ptr)->index_size -= (entry_ptr)->size; \
+ H5C2__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \
+}
+
+#define H5C2__SEARCH_INDEX(cache_ptr, Addr, entry_ptr, fail_val) \
+{ \
+ int k; \
+ int depth = 0; \
+ H5C2__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
+ k = H5C2__HASH_FCN(Addr); \
+ entry_ptr = ((cache_ptr)->index)[k]; \
+ while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) \
+ { \
+ (entry_ptr) = (entry_ptr)->ht_next; \
+ (depth)++; \
+ } \
+ if ( entry_ptr ) \
+ { \
+ H5C2__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \
+ if ( entry_ptr != ((cache_ptr)->index)[k] ) \
+ { \
+ if ( (entry_ptr)->ht_next ) \
+ { \
+ (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
+ } \
+ HDassert( (entry_ptr)->ht_prev != NULL ); \
+ (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
+ ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
+ (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
+ (entry_ptr)->ht_prev = NULL; \
+ ((cache_ptr)->index)[k] = (entry_ptr); \
+ H5C2__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
+ } \
+ } \
+ H5C2__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, (entry_ptr != NULL), depth) \
+}
+
+#define H5C2__SEARCH_INDEX_NO_STATS(cache_ptr, Addr, entry_ptr, fail_val) \
+{ \
+ int k; \
+ int depth = 0; \
+ H5C2__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
+ k = H5C2__HASH_FCN(Addr); \
+ entry_ptr = ((cache_ptr)->index)[k]; \
+ while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) \
+ { \
+ (entry_ptr) = (entry_ptr)->ht_next; \
+ (depth)++; \
+ } \
+ if ( entry_ptr ) \
+ { \
+ H5C2__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \
+ if ( entry_ptr != ((cache_ptr)->index)[k] ) \
+ { \
+ if ( (entry_ptr)->ht_next ) \
+ { \
+ (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
+ } \
+ HDassert( (entry_ptr)->ht_prev != NULL ); \
+ (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
+ ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
+ (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
+ (entry_ptr)->ht_prev = NULL; \
+ ((cache_ptr)->index)[k] = (entry_ptr); \
+ H5C2__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
+ } \
+ } \
+}
+
+#define H5C2__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
+{ \
+ H5C2__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size) \
+ (cache_ptr)->index_size -= old_size; \
+ (cache_ptr)->index_size += new_size; \
+ H5C2__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size) \
+}
+
+
+/**************************************************************************
+ *
+ * Skip list insertion and deletion macros:
+ *
+ * These used to be functions, but I converted them to macros to avoid some
+ * function call overhead.
+ *
+ **************************************************************************/
+
+/*-------------------------------------------------------------------------
+ *
+ * Macro: H5C2__INSERT_ENTRY_IN_SLIST
+ *
+ * Purpose: Insert the specified instance of H5C2_cache_entry_t into
+ * the skip list in the specified instance of H5C2_t. Update
+ * the associated length and size fields.
+ *
+ * Return: N/A
+ *
+ * Programmer: John Mainzer, 5/10/04
+ *
+ * Modifications:
+ *
+ * JRM -- 7/21/04
+ * Updated function to set the in_tree flag when inserting
+ * an entry into the tree. Also modified the function to
+ * update the tree size and len fields instead of the similar
+ * index fields.
+ *
+ * All of this is part of the modifications to support the
+ * hash table.
+ *
+ * JRM -- 7/27/04
+ * Converted the function H5C2_insert_entry_in_tree() into
+ * the macro H5C2__INSERT_ENTRY_IN_TREE in the hopes of
+ * wringing a little more speed out of the cache.
+ *
+ * Note that we don't bother to check if the entry is already
+ * in the tree -- if it is, H5SL_insert() will fail.
+ *
+ * QAK -- 11/27/04
+ * Switched over to using skip list routines.
+ *
+ * JRM -- 6/27/06
+ * Added fail_val parameter.
+ *
+ * JRM -- 8/25/06
+ * Added the H5C2_DO_SANITY_CHECKS version of the macro.
+ *
+ * This version maintains the slist_len_increase and
+ * slist_size_increase fields that are used in sanity
+ * checks in the flush routines.
+ *
+ * All this is needed as the fractal heap needs to be
+ * able to dirty, resize and/or rename entries during the
+ * flush.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#if H5C2_DO_SANITY_CHECKS
+
+#define H5C2__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
+ HDassert( !((entry_ptr)->in_slist) ); \
+ \
+ if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) \
+ < 0 ) \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
+ "Can't insert entry in skip list") \
+ \
+ (entry_ptr)->in_slist = TRUE; \
+ (cache_ptr)->slist_len++; \
+ (cache_ptr)->slist_size += (entry_ptr)->size; \
+ (cache_ptr)->slist_len_increase++; \
+ (cache_ptr)->slist_size_increase += (entry_ptr)->size; \
+ \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ HDassert( (cache_ptr)->slist_size > 0 ); \
+ \
+} /* H5C2__INSERT_ENTRY_IN_SLIST */
+
+#else /* H5C2_DO_SANITY_CHECKS */
+
+#define H5C2__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
+ HDassert( !((entry_ptr)->in_slist) ); \
+ \
+ if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) \
+ < 0 ) \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
+ "Can't insert entry in skip list") \
+ \
+ (entry_ptr)->in_slist = TRUE; \
+ (cache_ptr)->slist_len++; \
+ (cache_ptr)->slist_size += (entry_ptr)->size; \
+ \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ HDassert( (cache_ptr)->slist_size > 0 ); \
+ \
+} /* H5C2__INSERT_ENTRY_IN_SLIST */
+
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2__REMOVE_ENTRY_FROM_SLIST
+ *
+ * Purpose: Remove the specified instance of H5C2_cache_entry_t from the
+ * index skip list in the specified instance of H5C2_t. Update
+ * the associated length and size fields.
+ *
+ * Return: N/A
+ *
+ * Programmer: John Mainzer, 5/10/04
+ *
+ * Modifications:
+ *
+ * JRM -- 7/21/04
+ * Updated function for the addition of the hash table.
+ *
+ * JRM - 7/27/04
+ * Converted from the function H5C2_remove_entry_from_tree()
+ * to the macro H5C2__REMOVE_ENTRY_FROM_TREE in the hopes of
+ * wringing a little more performance out of the cache.
+ *
+ * QAK -- 11/27/04
+ * Switched over to using skip list routines.
+ *
+ * JRM -- 3/28/07
+ * Updated sanity checks for the new is_read_only and
+ * ro_ref_count fields in H5C2_cache_entry_t.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define H5C2__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ HDassert( (entry_ptr)->in_slist ); \
+ HDassert( (cache_ptr)->slist_ptr ); \
+ \
+ if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
+ != (entry_ptr) ) \
+ \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \
+ "Can't delete entry from skip list.") \
+ \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ (cache_ptr)->slist_len--; \
+ HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
+ (cache_ptr)->slist_size -= (entry_ptr)->size; \
+ (entry_ptr)->in_slist = FALSE; \
+} /* H5C2__REMOVE_ENTRY_FROM_SLIST */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2__UPDATE_SLIST_FOR_SIZE_CHANGE
+ *
+ * Purpose: Update cache_ptr->slist_size for a change in the size of
+ * and entry in the slist.
+ *
+ * Return: N/A
+ *
+ * Programmer: John Mainzer, 9/07/05
+ *
+ * Modifications:
+ *
+ * JRM -- 8/27/06
+ * Added the H5C2_DO_SANITY_CHECKS version of the macro.
+ *
+ * This version maintains the slist_size_increase field
+ * that are used in sanity checks in the flush routines.
+ *
+ * All this is needed as the fractal heap needs to be
+ * able to dirty, resize and/or rename entries during the
+ * flush.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#if H5C2_DO_SANITY_CHECKS
+
+#define H5C2__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (old_size) > 0 ); \
+ HDassert( (new_size) > 0 ); \
+ HDassert( (old_size) <= (cache_ptr)->slist_size ); \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ HDassert( ((cache_ptr)->slist_len > 1) || \
+ ( (cache_ptr)->slist_size == (old_size) ) ); \
+ \
+ (cache_ptr)->slist_size -= (old_size); \
+ (cache_ptr)->slist_size += (new_size); \
+ \
+ (cache_ptr)->slist_size_increase -= (int64_t)(old_size); \
+ (cache_ptr)->slist_size_increase += (int64_t)(new_size); \
+ \
+ HDassert( (new_size) <= (cache_ptr)->slist_size ); \
+ HDassert( ( (cache_ptr)->slist_len > 1 ) || \
+ ( (cache_ptr)->slist_size == (new_size) ) ); \
+} /* H5C2__REMOVE_ENTRY_FROM_SLIST */
+
+#else /* H5C2_DO_SANITY_CHECKS */
+
+#define H5C2__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (old_size) > 0 ); \
+ HDassert( (new_size) > 0 ); \
+ HDassert( (old_size) <= (cache_ptr)->slist_size ); \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ HDassert( ((cache_ptr)->slist_len > 1) || \
+ ( (cache_ptr)->slist_size == (old_size) ) ); \
+ \
+ (cache_ptr)->slist_size -= (old_size); \
+ (cache_ptr)->slist_size += (new_size); \
+ \
+ HDassert( (new_size) <= (cache_ptr)->slist_size ); \
+ HDassert( ( (cache_ptr)->slist_len > 1 ) || \
+ ( (cache_ptr)->slist_size == (new_size) ) ); \
+} /* H5C2__REMOVE_ENTRY_FROM_SLIST */
+
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+
+/**************************************************************************
+ *
+ * Replacement policy update macros:
+ *
+ * These used to be functions, but I converted them to macros to avoid some
+ * function call overhead.
+ *
+ **************************************************************************/
+
+/*-------------------------------------------------------------------------
+ *
+ * Macro: H5C2__FAKE_RP_FOR_MOST_RECENT_ACCESS
+ *
+ * Purpose: For efficiency, we sometimes change the order of flushes --
+ * but doing so can confuse the replacement policy. This
+ * macro exists to allow us to specify an entry as the
+ * most recently touched so we can repair any such
+ * confusion.
+ *
+ * At present, we only support the modified LRU policy, so
+ * this function deals with that case unconditionally. If
+ * we ever support other replacement policies, the macro
+ * should switch on the current policy and act accordingly.
+ *
+ * Return: N/A
+ *
+ * Programmer: John Mainzer, 10/13/05
+ *
+ * Modifications:
+ *
+ * JRM -- 3/20/06
+ * Modified macro to ignore pinned entries. Pinned entries
+ * do not appear in the data structures maintained by the
+ * replacement policy code, and thus this macro has nothing
+ * to do if called for such an entry.
+ *
+ * JRM -- 3/28/07
+ * Added sanity checks using the new is_read_only and
+ * ro_ref_count fields of struct H5C2_cache_entry_t.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#if H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+
+#define H5C2__FAKE_RP_FOR_MOST_RECENT_ACCESS(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ if ( ! ((entry_ptr)->is_pinned) ) { \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* remove the entry from the LRU list, and re-insert it at the head.\
+ */ \
+ \
+ H5C2__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ H5C2__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* Use the dirty flag to infer whether the entry is on the clean or \
+ * dirty LRU list, and remove it. Then insert it at the head of \
+ * the same LRU list. \
+ * \
+ * At least initially, all entries should be clean. That may \
+ * change, so we may as well deal with both cases now. \
+ */ \
+ \
+ if ( (entry_ptr)->is_dirty ) { \
+ H5C2__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ \
+ H5C2__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ } else { \
+ H5C2__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ \
+ H5C2__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+ \
+ /* End modified LRU specific code. */ \
+ } \
+} /* H5C2__FAKE_RP_FOR_MOST_RECENT_ACCESS */
+
+#else /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+#define H5C2__FAKE_RP_FOR_MOST_RECENT_ACCESS(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ if ( ! ((entry_ptr)->is_pinned) ) { \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* remove the entry from the LRU list, and re-insert it at the head \
+ */ \
+ \
+ H5C2__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ H5C2__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* End modified LRU specific code. */ \
+ } \
+} /* H5C2__FAKE_RP_FOR_MOST_RECENT_ACCESS */
+
+#endif /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Macro: H5C2__UPDATE_RP_FOR_EVICTION
+ *
+ * Purpose: Update the replacement policy data structures for an
+ * eviction of the specified cache entry.
+ *
+ * At present, we only support the modified LRU policy, so
+ * this function deals with that case unconditionally. If
+ * we ever support other replacement policies, the function
+ * should switch on the current policy and act accordingly.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/10/04
+ *
+ * Modifications:
+ *
+ * JRM - 7/27/04
+ * Converted the function H5C2_update_rp_for_eviction() to the
+ * macro H5C2__UPDATE_RP_FOR_EVICTION in an effort to squeeze
+ * a bit more performance out of the cache.
+ *
+ * At least for the first cut, I am leaving the comments and
+ * white space in the macro. If they cause dificulties with
+ * the pre-processor, I'll have to remove them.
+ *
+ * JRM - 7/28/04
+ * Split macro into two version, one supporting the clean and
+ * dirty LRU lists, and the other not. Yet another attempt
+ * at optimization.
+ *
+ * JRM - 3/20/06
+ * Pinned entries can't be evicted, so this entry should never
+ * be called on a pinned entry. Added assert to verify this.
+ *
+ * JRM -- 3/28/07
+ * Added sanity checks for the new is_read_only and
+ * ro_ref_count fields of struct H5C2_cache_entry_t.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#if H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+
+#define H5C2__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( !((entry_ptr)->is_pinned) ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* remove the entry from the LRU list. */ \
+ \
+ H5C2__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* If the entry is clean when it is evicted, it should be on the \
+ * clean LRU list, if it was dirty, it should be on the dirty LRU list. \
+ * Remove it from the appropriate list according to the value of the \
+ * dirty flag. \
+ */ \
+ \
+ if ( (entry_ptr)->is_dirty ) { \
+ \
+ H5C2__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ } else { \
+ H5C2__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+ \
+} /* H5C2__UPDATE_RP_FOR_EVICTION */
+
+#else /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+#define H5C2__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( !((entry_ptr)->is_pinned) ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* remove the entry from the LRU list. */ \
+ \
+ H5C2__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+} /* H5C2__UPDATE_RP_FOR_EVICTION */
+
+#endif /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Macro: H5C2__UPDATE_RP_FOR_FLUSH
+ *
+ * Purpose: Update the replacement policy data structures for a flush
+ * of the specified cache entry.
+ *
+ * At present, we only support the modified LRU policy, so
+ * this function deals with that case unconditionally. If
+ * we ever support other replacement policies, the function
+ * should switch on the current policy and act accordingly.
+ *
+ * Return: N/A
+ *
+ * Programmer: John Mainzer, 5/6/04
+ *
+ * Modifications:
+ *
+ * JRM - 7/27/04
+ * Converted the function H5C2_update_rp_for_flush() to the
+ * macro H5C2__UPDATE_RP_FOR_FLUSH in an effort to squeeze
+ * a bit more performance out of the cache.
+ *
+ * At least for the first cut, I am leaving the comments and
+ * white space in the macro. If they cause dificulties with
+ * pre-processor, I'll have to remove them.
+ *
+ * JRM - 7/28/04
+ * Split macro into two versions, one supporting the clean and
+ * dirty LRU lists, and the other not. Yet another attempt
+ * at optimization.
+ *
+ * JRM - 3/20/06
+ * While pinned entries can be flushed, they don't reside in
+ * the replacement policy data structures when unprotected.
+ * Thus I modified this macro to do nothing if the entry is
+ * pinned.
+ *
+ * JRM - 3/28/07
+ * Added sanity checks based on the new is_read_only and
+ * ro_ref_count fields of struct H5C2_cache_entry_t.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#if H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+
+#define H5C2__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ if ( ! ((entry_ptr)->is_pinned) ) { \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* remove the entry from the LRU list, and re-insert it at the \
+ * head. \
+ */ \
+ \
+ H5C2__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ H5C2__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* since the entry is being flushed or cleared, one would think \
+ * that it must be dirty -- but that need not be the case. Use the \
+ * dirty flag to infer whether the entry is on the clean or dirty \
+ * LRU list, and remove it. Then insert it at the head of the \
+ * clean LRU list. \
+ * \
+ * The function presumes that a dirty entry will be either cleared \
+ * or flushed shortly, so it is OK if we put a dirty entry on the \
+ * clean LRU list. \
+ */ \
+ \
+ if ( (entry_ptr)->is_dirty ) { \
+ H5C2__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ } else { \
+ H5C2__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+ \
+ H5C2__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ \
+ /* End modified LRU specific code. */ \
+ } \
+} /* H5C2__UPDATE_RP_FOR_FLUSH */
+
+#else /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+#define H5C2__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ if ( ! ((entry_ptr)->is_pinned) ) { \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* remove the entry from the LRU list, and re-insert it at the \
+ * head. \
+ */ \
+ \
+ H5C2__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ H5C2__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* End modified LRU specific code. */ \
+ } \
+} /* H5C2__UPDATE_RP_FOR_FLUSH */
+
+#endif /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Macro: H5C2__UPDATE_RP_FOR_INSERTION
+ *
+ * Purpose: Update the replacement policy data structures for an
+ * insertion of the specified cache entry.
+ *
+ * At present, we only support the modified LRU policy, so
+ * this function deals with that case unconditionally. If
+ * we ever support other replacement policies, the function
+ * should switch on the current policy and act accordingly.
+ *
+ * Return: N/A
+ *
+ * Programmer: John Mainzer, 5/17/04
+ *
+ * Modifications:
+ *
+ * JRM - 7/27/04
+ * Converted the function H5C2_update_rp_for_insertion() to the
+ * macro H5C2__UPDATE_RP_FOR_INSERTION in an effort to squeeze
+ * a bit more performance out of the cache.
+ *
+ * At least for the first cut, I am leaving the comments and
+ * white space in the macro. If they cause dificulties with
+ * pre-processor, I'll have to remove them.
+ *
+ * JRM - 7/28/04
+ * Split macro into two version, one supporting the clean and
+ * dirty LRU lists, and the other not. Yet another attempt
+ * at optimization.
+ *
+ * JRM - 3/10/06
+ * This macro should never be called on a pinned entry.
+ * Inserted an assert to verify this.
+ *
+ * JRM - 8/9/06
+ * Not any more. We must now allow insertion of pinned
+ * entries. Updated macro to support this.
+ *
+ * JRM - 3/28/07
+ * Added sanity checks using the new is_read_only and
+ * ro_ref_count fields of struct H5C2_cache_entry_t.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#if H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+
+#define H5C2__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ if ( (entry_ptr)->is_pinned ) { \
+ \
+ H5C2__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
+ (cache_ptr)->pel_tail_ptr, \
+ (cache_ptr)->pel_len, \
+ (cache_ptr)->pel_size, (fail_val)) \
+ \
+ } else { \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* insert the entry at the head of the LRU list. */ \
+ \
+ H5C2__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* insert the entry at the head of the clean or dirty LRU list as \
+ * appropriate. \
+ */ \
+ \
+ if ( entry_ptr->is_dirty ) { \
+ H5C2__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ } else { \
+ H5C2__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+ \
+ /* End modified LRU specific code. */ \
+ } \
+}
+
+#else /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+#define H5C2__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ if ( (entry_ptr)->is_pinned ) { \
+ \
+ H5C2__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
+ (cache_ptr)->pel_tail_ptr, \
+ (cache_ptr)->pel_len, \
+ (cache_ptr)->pel_size, (fail_val)) \
+ \
+ } else { \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* insert the entry at the head of the LRU list. */ \
+ \
+ H5C2__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* End modified LRU specific code. */ \
+ } \
+}
+
+#endif /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Macro: H5C2__UPDATE_RP_FOR_PROTECT
+ *
+ * Purpose: Update the replacement policy data structures for a
+ * protect of the specified cache entry.
+ *
+ * To do this, unlink the specified entry from any data
+ * structures used by the replacement policy, and add the
+ * entry to the protected list.
+ *
+ * At present, we only support the modified LRU policy, so
+ * this function deals with that case unconditionally. If
+ * we ever support other replacement policies, the function
+ * should switch on the current policy and act accordingly.
+ *
+ * Return: N/A
+ *
+ * Programmer: John Mainzer, 5/17/04
+ *
+ * Modifications:
+ *
+ * JRM - 7/27/04
+ * Converted the function H5C2_update_rp_for_protect() to the
+ * macro H5C2__UPDATE_RP_FOR_PROTECT in an effort to squeeze
+ * a bit more performance out of the cache.
+ *
+ * At least for the first cut, I am leaving the comments and
+ * white space in the macro. If they cause dificulties with
+ * pre-processor, I'll have to remove them.
+ *
+ * JRM - 7/28/04
+ * Split macro into two version, one supporting the clean and
+ * dirty LRU lists, and the other not. Yet another attempt
+ * at optimization.
+ *
+ * JRM - 3/17/06
+ * Modified macro to attempt to remove pinned entriese from
+ * the pinned entry list instead of from the data structures
+ * maintained by the replacement policy.
+ *
+ * JRM - 3/28/07
+ * Added sanity checks based on the new is_read_only and
+ * ro_ref_count fields of struct H5C2_cache_entry_t.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#if H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+
+#define H5C2__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ if ( (entry_ptr)->is_pinned ) { \
+ \
+ H5C2__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
+ (cache_ptr)->pel_tail_ptr, \
+ (cache_ptr)->pel_len, \
+ (cache_ptr)->pel_size, (fail_val)) \
+ \
+ } else { \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* remove the entry from the LRU list. */ \
+ \
+ H5C2__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* Similarly, remove the entry from the clean or dirty LRU list \
+ * as appropriate. \
+ */ \
+ \
+ if ( (entry_ptr)->is_dirty ) { \
+ \
+ H5C2__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ \
+ } else { \
+ \
+ H5C2__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+ \
+ /* End modified LRU specific code. */ \
+ } \
+ \
+ /* Regardless of the replacement policy, or whether the entry is \
+ * pinned, now add the entry to the protected list. \
+ */ \
+ \
+ H5C2__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \
+ (cache_ptr)->pl_tail_ptr, \
+ (cache_ptr)->pl_len, \
+ (cache_ptr)->pl_size, (fail_val)) \
+} /* H5C2__UPDATE_RP_FOR_PROTECT */
+
+#else /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+#define H5C2__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ if ( (entry_ptr)->is_pinned ) { \
+ \
+ H5C2__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
+ (cache_ptr)->pel_tail_ptr, \
+ (cache_ptr)->pel_len, \
+ (cache_ptr)->pel_size, (fail_val)) \
+ \
+ } else { \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* remove the entry from the LRU list. */ \
+ \
+ H5C2__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* End modified LRU specific code. */ \
+ } \
+ \
+ /* Regardless of the replacement policy, or whether the entry is \
+ * pinned, now add the entry to the protected list. \
+ */ \
+ \
+ H5C2__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \
+ (cache_ptr)->pl_tail_ptr, \
+ (cache_ptr)->pl_len, \
+ (cache_ptr)->pl_size, (fail_val)) \
+} /* H5C2__UPDATE_RP_FOR_PROTECT */
+
+#endif /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Macro: H5C2__UPDATE_RP_FOR_RENAME
+ *
+ * Purpose: Update the replacement policy data structures for a
+ * rename of the specified cache entry.
+ *
+ * At present, we only support the modified LRU policy, so
+ * this function deals with that case unconditionally. If
+ * we ever support other replacement policies, the function
+ * should switch on the current policy and act accordingly.
+ *
+ * Return: N/A
+ *
+ * Programmer: John Mainzer, 5/17/04
+ *
+ * Modifications:
+ *
+ * JRM - 7/27/04
+ * Converted the function H5C2_update_rp_for_rename() to the
+ * macro H5C2__UPDATE_RP_FOR_RENAME in an effort to squeeze
+ * a bit more performance out of the cache.
+ *
+ * At least for the first cut, I am leaving the comments and
+ * white space in the macro. If they cause dificulties with
+ * pre-processor, I'll have to remove them.
+ *
+ * JRM - 7/28/04
+ * Split macro into two version, one supporting the clean and
+ * dirty LRU lists, and the other not. Yet another attempt
+ * at optimization.
+ *
+ * JRM - 6/23/05
+ * Added the was_dirty parameter. It is possible that
+ * the entry was clean when it was renamed -- if so it
+ * it is in the clean LRU regardless of the current
+ * value of the is_dirty field.
+ *
+ * At present, all renamed entries are forced to be
+ * dirty. This macro is a bit more general that that,
+ * to allow it to function correctly should that policy
+ * be relaxed in the future.
+ *
+ * JRM - 3/17/06
+ * Modified macro to do nothing if the entry is pinned.
+ * In this case, the entry is on the pinned entry list, not
+ * in the replacement policy data structures, so there is
+ * nothing to be done.
+ *
+ * JRM - 3/28/07
+ * Added sanity checks using the new is_read_only and
+ * ro_ref_count fields of struct H5C2_cache_entry_t.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#if H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+
+#define H5C2__UPDATE_RP_FOR_RENAME(cache_ptr, entry_ptr, was_dirty, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ if ( ! ((entry_ptr)->is_pinned) ) { \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* remove the entry from the LRU list, and re-insert it at the head. \
+ */ \
+ \
+ H5C2__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ H5C2__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* remove the entry from either the clean or dirty LUR list as \
+ * indicated by the was_dirty parameter \
+ */ \
+ if ( was_dirty ) { \
+ \
+ H5C2__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ \
+ } else { \
+ \
+ H5C2__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+ \
+ /* insert the entry at the head of either the clean or dirty LRU \
+ * list as appropriate. \
+ */ \
+ \
+ if ( (entry_ptr)->is_dirty ) { \
+ \
+ H5C2__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ \
+ } else { \
+ \
+ H5C2__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+ \
+ /* End modified LRU specific code. */ \
+ } \
+} /* H5C2__UPDATE_RP_FOR_RENAME */
+
+#else /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+#define H5C2__UPDATE_RP_FOR_RENAME(cache_ptr, entry_ptr, was_dirty, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ if ( ! ((entry_ptr)->is_pinned) ) { \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* remove the entry from the LRU list, and re-insert it at the head. \
+ */ \
+ \
+ H5C2__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ H5C2__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* End modified LRU specific code. */ \
+ } \
+} /* H5C2__UPDATE_RP_FOR_RENAME */
+
+#endif /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Macro: H5C2__UPDATE_RP_FOR_SIZE_CHANGE
+ *
+ * Purpose: Update the replacement policy data structures for a
+ * size change of the specified cache entry.
+ *
+ * To do this, determine if the entry is pinned. If it is,
+ * update the size of the pinned entry list.
+ *
+ * If it isn't pinned, the entry must handled by the
+ * replacement policy. Update the appropriate replacement
+ * policy data structures.
+ *
+ * At present, we only support the modified LRU policy, so
+ * this function deals with that case unconditionally. If
+ * we ever support other replacement policies, the function
+ * should switch on the current policy and act accordingly.
+ *
+ * Return: N/A
+ *
+ * Programmer: John Mainzer, 8/23/06
+ *
+ * Modifications:
+ *
+ * JRM -- 3/28/07
+ * Added sanity checks based on the new is_read_only and
+ * ro_ref_count fields of struct H5C2_cache_entry_t.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#if H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+
+#define H5C2__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ HDassert( new_size > 0 ); \
+ \
+ if ( (entry_ptr)->is_pinned ) { \
+ \
+ H5C2__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \
+ (cache_ptr)->pel_size, \
+ (entry_ptr)->size, \
+ (new_size)); \
+ \
+ } else { \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* Update the size of the LRU list */ \
+ \
+ H5C2__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, \
+ (entry_ptr)->size, \
+ (new_size)); \
+ \
+ /* Similarly, update the size of the clean or dirty LRU list as \
+ * appropriate. At present, the entry must be clean, but that \
+ * could change. \
+ */ \
+ \
+ if ( (entry_ptr)->is_dirty ) { \
+ \
+ H5C2__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, \
+ (entry_ptr)->size, \
+ (new_size)); \
+ \
+ } else { \
+ \
+ H5C2__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, \
+ (entry_ptr)->size, \
+ (new_size)); \
+ } \
+ \
+ /* End modified LRU specific code. */ \
+ } \
+ \
+} /* H5C2__UPDATE_RP_FOR_SIZE_CHANGE */
+
+#else /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+#define H5C2__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ HDassert( new_size > 0 ); \
+ \
+ if ( (entry_ptr)->is_pinned ) { \
+ \
+ H5C2__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \
+ (cache_ptr)->pel_size, \
+ (entry_ptr)->size, \
+ (new_size)); \
+ \
+ } else { \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* Update the size of the LRU list */ \
+ \
+ H5C2__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, \
+ (entry_ptr)->size, \
+ (new_size)); \
+ \
+ /* End modified LRU specific code. */ \
+ } \
+ \
+} /* H5C2__UPDATE_RP_FOR_SIZE_CHANGE */
+
+#endif /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Macro: H5C2__UPDATE_RP_FOR_UNPIN
+ *
+ * Purpose: Update the replacement policy data structures for an
+ * unpin of the specified cache entry.
+ *
+ * To do this, unlink the specified entry from the protected
+ * entry list, and re-insert it in the data structures used
+ * by the current replacement policy.
+ *
+ * At present, we only support the modified LRU policy, so
+ * this function deals with that case unconditionally. If
+ * we ever support other replacement policies, the macro
+ * should switch on the current policy and act accordingly.
+ *
+ * Return: N/A
+ *
+ * Programmer: John Mainzer, 3/22/06
+ *
+ * Modifications:
+ *
+ * JRM -- 3/28/07
+ * Added sanity checks based on the new is_read_only and
+ * ro_ref_count fields of struct H5C2_cache_entry_t.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#if H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+
+#define H5C2__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->is_pinned); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ /* Regardless of the replacement policy, remove the entry from the \
+ * pinned entry list. \
+ */ \
+ H5C2__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
+ (cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \
+ (cache_ptr)->pel_size, (fail_val)) \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* insert the entry at the head of the LRU list. */ \
+ \
+ H5C2__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* Similarly, insert the entry at the head of either the clean or \
+ * dirty LRU list as appropriate. \
+ */ \
+ \
+ if ( (entry_ptr)->is_dirty ) { \
+ \
+ H5C2__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ \
+ } else { \
+ \
+ H5C2__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+ \
+ /* End modified LRU specific code. */ \
+ \
+} /* H5C2__UPDATE_RP_FOR_UNPIN */
+
+#else /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+#define H5C2__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->is_pinned); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ /* Regardless of the replacement policy, remove the entry from the \
+ * pinned entry list. \
+ */ \
+ H5C2__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
+ (cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \
+ (cache_ptr)->pel_size, (fail_val)) \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* insert the entry at the head of the LRU list. */ \
+ \
+ H5C2__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* End modified LRU specific code. */ \
+ \
+} /* H5C2__UPDATE_RP_FOR_UNPIN */
+
+#endif /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Macro: H5C2__UPDATE_RP_FOR_UNPROTECT
+ *
+ * Purpose: Update the replacement policy data structures for an
+ * unprotect of the specified cache entry.
+ *
+ * To do this, unlink the specified entry from the protected
+ * list, and re-insert it in the data structures used by the
+ * current replacement policy.
+ *
+ * At present, we only support the modified LRU policy, so
+ * this function deals with that case unconditionally. If
+ * we ever support other replacement policies, the function
+ * should switch on the current policy and act accordingly.
+ *
+ * Return: N/A
+ *
+ * Programmer: John Mainzer, 5/19/04
+ *
+ * Modifications:
+ *
+ * JRM - 7/27/04
+ * Converted the function H5C2_update_rp_for_unprotect() to
+ * the macro H5C2__UPDATE_RP_FOR_UNPROTECT in an effort to
+ * squeeze a bit more performance out of the cache.
+ *
+ * At least for the first cut, I am leaving the comments and
+ * white space in the macro. If they cause dificulties with
+ * pre-processor, I'll have to remove them.
+ *
+ * JRM - 7/28/04
+ * Split macro into two version, one supporting the clean and
+ * dirty LRU lists, and the other not. Yet another attempt
+ * at optimization.
+ *
+ * JRM - 3/17/06
+ * Modified macro to put pinned entries on the pinned entry
+ * list instead of inserting them in the data structures
+ * maintained by the replacement policy.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#if H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+
+#define H5C2__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( (entry_ptr)->is_protected); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ /* Regardless of the replacement policy, remove the entry from the \
+ * protected list. \
+ */ \
+ H5C2__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \
+ (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \
+ (cache_ptr)->pl_size, (fail_val)) \
+ \
+ if ( (entry_ptr)->is_pinned ) { \
+ \
+ H5C2__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
+ (cache_ptr)->pel_tail_ptr, \
+ (cache_ptr)->pel_len, \
+ (cache_ptr)->pel_size, (fail_val)) \
+ \
+ } else { \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* insert the entry at the head of the LRU list. */ \
+ \
+ H5C2__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* Similarly, insert the entry at the head of either the clean or \
+ * dirty LRU list as appropriate. \
+ */ \
+ \
+ if ( (entry_ptr)->is_dirty ) { \
+ \
+ H5C2__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ \
+ } else { \
+ \
+ H5C2__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+ \
+ /* End modified LRU specific code. */ \
+ } \
+ \
+} /* H5C2__UPDATE_RP_FOR_UNPROTECT */
+
+#else /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+#define H5C2__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C2__H5C2_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( (entry_ptr)->is_protected); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ /* Regardless of the replacement policy, remove the entry from the \
+ * protected list. \
+ */ \
+ H5C2__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \
+ (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \
+ (cache_ptr)->pl_size, (fail_val)) \
+ \
+ if ( (entry_ptr)->is_pinned ) { \
+ \
+ H5C2__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
+ (cache_ptr)->pel_tail_ptr, \
+ (cache_ptr)->pel_len, \
+ (cache_ptr)->pel_size, (fail_val)) \
+ \
+ } else { \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* insert the entry at the head of the LRU list. */ \
+ \
+ H5C2__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* End modified LRU specific code. */ \
+ } \
+} /* H5C2__UPDATE_RP_FOR_UNPROTECT */
+
+#endif /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+
+/*
+ * Private file-scope variables.
+ */
+
+/* Declare a free list to manage the H5C2_t struct */
+H5FL_DEFINE_STATIC(H5C2_t);
+
+
+/*
+ * Private file-scope function declarations:
+ */
+
+static herr_t H5C2__auto_adjust_cache_size(H5C2_t * cache_ptr,
+ hid_t dxpl_id,
+ hbool_t write_permitted);
+
+static herr_t H5C2__autoadjust__ageout(H5C2_t * cache_ptr,
+ double hit_rate,
+ enum H5C2_resize_status * status_ptr,
+ size_t * new_max_cache_size_ptr,
+ hid_t dxpl_id,
+ hbool_t write_permitted);
+
+static herr_t H5C2__autoadjust__ageout__cycle_epoch_marker(H5C2_t * cache_ptr);
+
+static herr_t H5C2__autoadjust__ageout__evict_aged_out_entries(hid_t dxpl_id,
+ H5C2_t * cache_ptr,
+ hbool_t write_permitted);
+
+static herr_t H5C2__autoadjust__ageout__insert_new_marker(H5C2_t * cache_ptr);
+
+static herr_t H5C2__autoadjust__ageout__remove_all_markers(H5C2_t * cache_ptr);
+
+static herr_t H5C2__autoadjust__ageout__remove_excess_markers(H5C2_t * cache_ptr);
+
+static herr_t H5C2_flush_single_entry(H5F_t * f,
+ hid_t dxpl_id,
+ H5C2_t * cache_ptr,
+ const H5C2_class_t * type_ptr,
+ haddr_t addr,
+ unsigned flags,
+ hbool_t del_entry_from_slist_on_destroy);
+
+static herr_t H5C2_flush_invalidate_cache(hid_t dxpl_id,
+ H5C2_t * cache_ptr,
+ unsigned flags);
+
+static void * H5C2_load_entry(H5F_t * f,
+ hid_t dxpl_id,
+ const H5C2_class_t * type,
+ haddr_t addr,
+ size_t len,
+ hbool_t chk_len,
+ const void * udata_ptr);
+
+static herr_t H5C2_make_space_in_cache(hid_t dxpl_id,
+ H5C2_t * cache_ptr,
+ size_t space_needed,
+ hbool_t write_permitted);
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+static herr_t H5C2_validate_lru_list(H5C2_t * cache_ptr);
+static herr_t H5C2_verify_not_in_index(H5C2_t * cache_ptr,
+ H5C2_cache_entry_t * entry_ptr);
+#endif /* H5C2_DO_EXTREME_SANITY_CHECKS */
+
+
+/****************************************************************************
+ *
+ * #defines and declarations for epoch marker cache entries.
+ *
+ * As a strategy for automatic cache size reduction, the cache may insert
+ * marker entries in the LRU list at the end of each epoch. These markers
+ * are then used to identify entries that have not been accessed for n
+ * epochs so that they can be evicted from the cache.
+ *
+ ****************************************************************************/
+
+/* Note that H5C2__MAX_EPOCH_MARKERS is defined in H5C2pkg.h, not here because
+ * it is needed to dimension arrays in H5C2_t.
+ */
+
+#define H5C2__EPOCH_MARKER_TYPE H5C2__MAX_NUM_TYPE_IDS
+
+static void * H5C2_epoch_marker_deserialize(haddr_t addr,
+ size_t len,
+ const void * image_ptr,
+ const void * udata_ptr,
+ hbool_t * dirty_ptr);
+static herr_t H5C2_epoch_marker_image_len(void * thing,
+ size_t *image_len_ptr);
+static herr_t H5C2_epoch_marker_serialize(haddr_t addr,
+ size_t len,
+ void * image_ptr,
+ void * thing,
+ unsigned * flags_ptr,
+ haddr_t * new_addr_ptr,
+ size_t * new_len_ptr,
+ void ** new_image_ptr_ptr);
+static herr_t H5C2_epoch_marker_free_icr(haddr_t addr,
+ size_t len,
+ void * thing);
+static herr_t H5C2_epoch_marker_clear_dirty_bits(haddr_t addr,
+ size_t len,
+ void * thing);
+
+const H5C2_class_t epoch_marker_class_2 =
+{
+ /* id = */ H5C2__EPOCH_MARKER_TYPE,
+ /* name = */ "epoch marker",
+ /* mem_type = */ H5FD_MEM_DEFAULT, /* value doesn't matter */
+ /* deserialize = */ &H5C2_epoch_marker_deserialize,
+ /* image_len = */ &H5C2_epoch_marker_image_len,
+ /* serialize = */ &H5C2_epoch_marker_serialize,
+ /* free_icr = */ &H5C2_epoch_marker_free_icr,
+ /* clear_dirty_bits = */ &H5C2_epoch_marker_clear_dirty_bits,
+};
+
+
+/***************************************************************************
+ * Class functions for H5C2__EPOCH_MAKER_TYPE:
+ *
+ * None of these functions should ever be called, so there is no point in
+ * documenting them separately.
+ * JRM - 11/16/04
+ *
+ ***************************************************************************/
+
+static void *
+H5C2_epoch_marker_deserialize(haddr_t UNUSED addr,
+ size_t UNUSED len,
+ const void UNUSED * image_ptr,
+ const void UNUSED * udata_ptr,
+ hbool_t UNUSED * dirty_ptr)
+{
+ void * ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_epoch_marker_serialize, NULL)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "called unreachable fcn.")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+}
+
+static herr_t
+H5C2_epoch_marker_image_len(void UNUSED *thing,
+ size_t UNUSED *image_len_ptr)
+{
+ herr_t ret_value = FAIL; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_epoch_marker_image_len, FAIL)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "called unreachable fcn.")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+}
+
+static herr_t
+H5C2_epoch_marker_serialize(haddr_t UNUSED addr,
+ size_t UNUSED len,
+ void UNUSED * image_ptr,
+ void UNUSED * thing,
+ unsigned UNUSED * flags_ptr,
+ haddr_t UNUSED * new_addr_ptr,
+ size_t UNUSED * new_len_ptr,
+ void UNUSED ** new_image_ptr_ptr)
+{
+ herr_t ret_value = FAIL; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_epoch_marker_serialize, FAIL)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "called unreachable fcn.")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+}
+
+static herr_t
+H5C2_epoch_marker_free_icr(haddr_t UNUSED addr,
+ size_t UNUSED len,
+ void UNUSED * thing)
+{
+ herr_t ret_value = FAIL; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_epoch_marker_free_icr, FAIL)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "called unreachable fcn.")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+}
+
+static herr_t
+H5C2_epoch_marker_clear_dirty_bits(haddr_t UNUSED addr,
+ size_t UNUSED len,
+ void UNUSED * thing)
+{
+ herr_t ret_value = FAIL; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_epoch_marker_clear_dirty_bits, FAIL)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "called unreachable fcn.")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+}
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_create
+ *
+ * Purpose: Allocate, initialize, and return the address of a new
+ * instance of H5C2_t.
+ *
+ * In general, the max_cache_size parameter must be positive,
+ * and the min_clean_size parameter must lie in the closed
+ * interval [0, max_cache_size].
+ *
+ * The check_write_permitted parameter must either be NULL,
+ * or point to a function of type H5C2_write_permitted_func_t.
+ * If it is NULL, the cache will use the write_permitted
+ * flag to determine whether writes are permitted.
+ *
+ * Return: Success: Pointer to the new instance.
+ *
+ * Failure: NULL
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ * Modifications:
+ *
+ * JRM -- 7/20/04
+ * Updated for the addition of the hash table.
+ *
+ * JRM -- 10/5/04
+ * Added call to H5C2_reset_cache_hit_rate_stats(). Also
+ * added initialization for cache_is_full flag and for
+ * resize_ctl.
+ *
+ * JRM -- 11/12/04
+ * Added initialization for the new size_decreased field.
+ *
+ * JRM -- 11/17/04
+ * Added/updated initialization for the automatic cache
+ * size control data structures.
+ *
+ * JRM -- 6/24/05
+ * Added support for the new write_permitted field of
+ * the H5C2_t structure.
+ *
+ * JRM -- 7/5/05
+ * Added the new log_flush parameter and supporting code.
+ *
+ * JRM -- 9/21/05
+ * Added the new aux_ptr parameter and supporting code.
+ *
+ * JRM -- 1/20/06
+ * Added initialization of the new prefix field in H5C2_t.
+ *
+ * JRM -- 3/16/06
+ * Added initialization for the pinned entry related fields.
+ *
+ * JRM -- 5/31/06
+ * Added initialization for the trace_file_ptr field.
+ *
+ * JRM -- 8/19/06
+ * Added initialization for the flush_in_progress field.
+ *
+ * JRM -- 8/25/06
+ * Added initialization for the slist_len_increase and
+ * slist_size_increase fields. These fields are used
+ * for sanity checking in the flush process, and are not
+ * compiled in unless H5C2_DO_SANITY_CHECKS is TRUE.
+ *
+ * JRM -- 3/28/07
+ * Added initialization for the new is_read_only and
+ * ro_ref_count fields.
+ *
+ * JRM -- 7/10/07
+ * Added the f parameter, along with initialization of
+ * the field of the same in in H5C2_t. Also removed the
+ * type name table, as type names are now included in
+ * instances of H5C2_class_t.
+ *
+ * JRM -- 3/28/07
+ * Added initialization for the new is_read_only and
+ * ro_ref_count fields.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+H5C2_t *
+H5C2_create(const H5F_t * f,
+ size_t max_cache_size,
+ size_t min_clean_size,
+ int max_type_id,
+ const char * (* type_name_table_ptr),
+ H5C2_write_permitted_func_t check_write_permitted,
+ hbool_t write_permitted,
+ H5C2_log_flush_func_t log_flush,
+ void * aux_ptr)
+{
+ int i;
+ H5C2_t * cache_ptr = NULL;
+ H5C2_t * ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_create, NULL)
+
+ HDassert( f );
+
+ HDassert( max_cache_size >= H5C2__MIN_MAX_CACHE_SIZE );
+ HDassert( max_cache_size <= H5C2__MAX_MAX_CACHE_SIZE );
+ HDassert( min_clean_size <= max_cache_size );
+
+ HDassert( max_type_id >= 0 );
+ HDassert( max_type_id < H5C2__MAX_NUM_TYPE_IDS );
+
+ HDassert( ( write_permitted == TRUE ) || ( write_permitted == FALSE ) );
+
+ for ( i = 0; i <= max_type_id; i++ ) {
+
+ HDassert( (type_name_table_ptr)[i] );
+ HDassert( HDstrlen(( type_name_table_ptr)[i]) > 0 );
+ }
+
+ if ( NULL == (cache_ptr = H5FL_CALLOC(H5C2_t)) ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, \
+ "memory allocation failed")
+ }
+
+ if ( (cache_ptr->slist_ptr = H5SL_create(H5SL_TYPE_HADDR,0.5,(size_t)16))
+ == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, NULL, "can't create skip list.")
+ }
+
+ /* If we get this far, we should succeed. Go ahead and initialize all
+ * the fields.
+ */
+
+ cache_ptr->magic = H5C2__H5C2_T_MAGIC;
+
+ cache_ptr->f = (H5F_t *)f;
+
+ cache_ptr->flush_in_progress = FALSE;
+
+ cache_ptr->trace_file_ptr = NULL;
+
+ cache_ptr->aux_ptr = aux_ptr;
+
+ cache_ptr->max_type_id = max_type_id;
+
+ cache_ptr->type_name_table_ptr = type_name_table_ptr;
+
+ cache_ptr->max_cache_size = max_cache_size;
+ cache_ptr->min_clean_size = min_clean_size;
+
+ cache_ptr->check_write_permitted = check_write_permitted;
+ cache_ptr->write_permitted = write_permitted;
+
+ cache_ptr->log_flush = log_flush;
+
+ cache_ptr->evictions_enabled = TRUE;
+
+ cache_ptr->index_len = 0;
+ cache_ptr->index_size = (size_t)0;
+
+ cache_ptr->slist_len = 0;
+ cache_ptr->slist_size = (size_t)0;
+
+#if H5C2_DO_SANITY_CHECKS
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+ for ( i = 0; i < H5C2__HASH_TABLE_LEN; i++ )
+ {
+ (cache_ptr->index)[i] = NULL;
+ }
+
+ cache_ptr->pl_len = 0;
+ cache_ptr->pl_size = (size_t)0;
+ cache_ptr->pl_head_ptr = NULL;
+ cache_ptr->pl_tail_ptr = NULL;
+
+ cache_ptr->pel_len = 0;
+ cache_ptr->pel_size = (size_t)0;
+ cache_ptr->pel_head_ptr = NULL;
+ cache_ptr->pel_tail_ptr = NULL;
+
+ cache_ptr->LRU_list_len = 0;
+ cache_ptr->LRU_list_size = (size_t)0;
+ cache_ptr->LRU_head_ptr = NULL;
+ cache_ptr->LRU_tail_ptr = NULL;
+
+ cache_ptr->cLRU_list_len = 0;
+ cache_ptr->cLRU_list_size = (size_t)0;
+ cache_ptr->cLRU_head_ptr = NULL;
+ cache_ptr->cLRU_tail_ptr = NULL;
+
+ cache_ptr->dLRU_list_len = 0;
+ cache_ptr->dLRU_list_size = (size_t)0;
+ cache_ptr->dLRU_head_ptr = NULL;
+ cache_ptr->dLRU_tail_ptr = NULL;
+
+ cache_ptr->size_increase_possible = FALSE;
+ cache_ptr->size_decrease_possible = FALSE;
+ cache_ptr->resize_enabled = FALSE;
+ cache_ptr->cache_full = FALSE;
+ cache_ptr->size_decreased = FALSE;
+
+ (cache_ptr->resize_ctl).version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ (cache_ptr->resize_ctl).rpt_fcn = NULL;
+ (cache_ptr->resize_ctl).set_initial_size = FALSE;
+ (cache_ptr->resize_ctl).initial_size = H5C2__DEF_AR_INIT_SIZE;
+ (cache_ptr->resize_ctl).min_clean_fraction = H5C2__DEF_AR_MIN_CLEAN_FRAC;
+ (cache_ptr->resize_ctl).max_size = H5C2__DEF_AR_MAX_SIZE;
+ (cache_ptr->resize_ctl).min_size = H5C2__DEF_AR_MIN_SIZE;
+ (cache_ptr->resize_ctl).epoch_length = H5C2__DEF_AR_EPOCH_LENGTH;
+
+ (cache_ptr->resize_ctl).incr_mode = H5C2_incr__off;
+ (cache_ptr->resize_ctl).lower_hr_threshold = H5C2__DEF_AR_LOWER_THRESHHOLD;
+ (cache_ptr->resize_ctl).increment = H5C2__DEF_AR_INCREMENT;
+ (cache_ptr->resize_ctl).apply_max_increment = TRUE;
+ (cache_ptr->resize_ctl).max_increment = H5C2__DEF_AR_MAX_INCREMENT;
+
+ (cache_ptr->resize_ctl).decr_mode = H5C2_decr__off;
+ (cache_ptr->resize_ctl).upper_hr_threshold = H5C2__DEF_AR_UPPER_THRESHHOLD;
+ (cache_ptr->resize_ctl).decrement = H5C2__DEF_AR_DECREMENT;
+ (cache_ptr->resize_ctl).apply_max_decrement = TRUE;
+ (cache_ptr->resize_ctl).max_decrement = H5C2__DEF_AR_MAX_DECREMENT;
+ (cache_ptr->resize_ctl).epochs_before_eviction = H5C2__DEF_AR_EPCHS_B4_EVICT;
+ (cache_ptr->resize_ctl).apply_empty_reserve = TRUE;
+ (cache_ptr->resize_ctl).empty_reserve = H5C2__DEF_AR_EMPTY_RESERVE;
+
+ cache_ptr->epoch_markers_active = 0;
+
+ /* no need to initialize the ring buffer itself */
+ cache_ptr->epoch_marker_ringbuf_first = 1;
+ cache_ptr->epoch_marker_ringbuf_last = 0;
+ cache_ptr->epoch_marker_ringbuf_size = 0;
+
+ for ( i = 0; i < H5C2__MAX_EPOCH_MARKERS; i++ )
+ {
+ (cache_ptr->epoch_marker_active)[i] = FALSE;
+
+ ((cache_ptr->epoch_markers)[i]).magic =
+ H5C2__H5C2_CACHE_ENTRY_T_MAGIC;
+ ((cache_ptr->epoch_markers)[i]).addr = (haddr_t)i;
+ ((cache_ptr->epoch_markers)[i]).size = (size_t)0;
+ ((cache_ptr->epoch_markers)[i]).type = &epoch_marker_class_2;
+ ((cache_ptr->epoch_markers)[i]).is_dirty = FALSE;
+ ((cache_ptr->epoch_markers)[i]).dirtied = FALSE;
+ ((cache_ptr->epoch_markers)[i]).is_protected = FALSE;
+ ((cache_ptr->epoch_markers)[i]).is_read_only = FALSE;
+ ((cache_ptr->epoch_markers)[i]).ro_ref_count = 0;
+ ((cache_ptr->epoch_markers)[i]).is_pinned = FALSE;
+ ((cache_ptr->epoch_markers)[i]).in_slist = FALSE;
+ ((cache_ptr->epoch_markers)[i]).ht_next = NULL;
+ ((cache_ptr->epoch_markers)[i]).ht_prev = NULL;
+ ((cache_ptr->epoch_markers)[i]).next = NULL;
+ ((cache_ptr->epoch_markers)[i]).prev = NULL;
+ ((cache_ptr->epoch_markers)[i]).aux_next = NULL;
+ ((cache_ptr->epoch_markers)[i]).aux_prev = NULL;
+#if H5C2_COLLECT_CACHE_ENTRY_STATS
+ ((cache_ptr->epoch_markers)[i]).accesses = 0;
+ ((cache_ptr->epoch_markers)[i]).clears = 0;
+ ((cache_ptr->epoch_markers)[i]).flushes = 0;
+ ((cache_ptr->epoch_markers)[i]).pins = 0;
+#endif /* H5C2_COLLECT_CACHE_ENTRY_STATS */
+ }
+
+ if ( H5C2_reset_cache_hit_rate_stats(cache_ptr) != SUCCEED ) {
+
+ /* this should be impossible... */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, \
+ "H5C2_reset_cache_hit_rate_stats failed.")
+ }
+
+ H5C2_stats__reset(cache_ptr);
+
+ cache_ptr->skip_file_checks = FALSE;
+ cache_ptr->skip_dxpl_id_checks = FALSE;
+ cache_ptr->prefix[0] = '\0'; /* empty string */
+
+ /* Set return value */
+ ret_value = cache_ptr;
+
+done:
+
+ if ( ret_value == 0 ) {
+
+ if ( cache_ptr != NULL ) {
+
+ if ( cache_ptr->slist_ptr != NULL )
+ H5SL_close(cache_ptr->slist_ptr);
+
+ cache_ptr->magic = 0;
+ H5FL_FREE(H5C2_t, cache_ptr);
+ cache_ptr = NULL;
+
+ } /* end if */
+
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_create() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_def_auto_resize_rpt_fcn
+ *
+ * Purpose: Print results of a automatic cache resize.
+ *
+ * This function should only be used where HDprintf() behaves
+ * well -- i.e. not on Windows.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 10/27/04
+ *
+ * Modifications:
+ *
+ * JRM -- 11/22/04
+ * Reworked function to adapt it to the addition of the
+ * ageout method of cache size reduction.
+ *
+ * JRM -- 1/19/06
+ * Updated function for display the new prefix field of
+ * H5C2_t in output.
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+H5C2_def_auto_resize_rpt_fcn(H5C2_t * cache_ptr,
+#ifndef NDEBUG
+ int32_t version,
+#else /* NDEBUG */
+ int32_t UNUSED version,
+#endif /* NDEBUG */
+ double hit_rate,
+ enum H5C2_resize_status status,
+ size_t old_max_cache_size,
+ size_t new_max_cache_size,
+ size_t old_min_clean_size,
+ size_t new_min_clean_size)
+{
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( version == H5C2__CURR_AUTO_RESIZE_RPT_FCN_VER );
+
+ switch ( status )
+ {
+ case in_spec2:
+ HDfprintf(stdout,
+ "%sAuto cache resize -- no change. (hit rate = %lf)\n",
+ cache_ptr->prefix, hit_rate);
+ break;
+
+ case increase2:
+ HDassert( hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold );
+ HDassert( old_max_cache_size < new_max_cache_size );
+
+ HDfprintf(stdout,
+ "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
+ cache_ptr->prefix, hit_rate,
+ (cache_ptr->resize_ctl).lower_hr_threshold);
+
+ HDfprintf(stdout,
+ "%s cache size increased from (%Zu/%Zu) to (%Zu/%Zu).\n",
+ cache_ptr->prefix,
+ old_max_cache_size,
+ old_min_clean_size,
+ new_max_cache_size,
+ new_min_clean_size);
+ break;
+
+ case decrease2:
+ HDassert( old_max_cache_size > new_max_cache_size );
+
+ switch ( (cache_ptr->resize_ctl).decr_mode )
+ {
+ case H5C2_decr__threshold:
+ HDassert( hit_rate >
+ (cache_ptr->resize_ctl).upper_hr_threshold );
+
+ HDfprintf(stdout,
+ "%sAuto cache resize -- decrease by threshold. HR = %lf > %6.5lf\n",
+ cache_ptr->prefix, hit_rate,
+ (cache_ptr->resize_ctl).upper_hr_threshold);
+
+ HDfprintf(stdout, "%sout of bounds high (%6.5lf).\n",
+ cache_ptr->prefix,
+ (cache_ptr->resize_ctl).upper_hr_threshold);
+ break;
+
+ case H5C2_decr__age_out:
+ HDfprintf(stdout,
+ "%sAuto cache resize -- decrease by ageout. HR = %lf\n",
+ cache_ptr->prefix, hit_rate);
+ break;
+
+ case H5C2_decr__age_out_with_threshold:
+ HDassert( hit_rate >
+ (cache_ptr->resize_ctl).upper_hr_threshold );
+
+ HDfprintf(stdout,
+ "%sAuto cache resize -- decrease by ageout with threshold. HR = %lf > %6.5lf\n",
+ cache_ptr->prefix, hit_rate,
+ (cache_ptr->resize_ctl).upper_hr_threshold);
+ break;
+
+ default:
+ HDfprintf(stdout,
+ "%sAuto cache resize -- decrease by unknown mode. HR = %lf\n",
+ cache_ptr->prefix, hit_rate);
+ }
+
+ HDfprintf(stdout,
+ "%s cache size decreased from (%Zu/%Zu) to (%Zu/%Zu).\n",
+ cache_ptr->prefix,
+ old_max_cache_size,
+ old_min_clean_size,
+ new_max_cache_size,
+ new_min_clean_size);
+ break;
+
+ case at_max_size2:
+ HDfprintf(stdout,
+ "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
+ cache_ptr->prefix, hit_rate,
+ (cache_ptr->resize_ctl).lower_hr_threshold);
+ HDfprintf(stdout,
+ "%s cache already at maximum size so no change.\n",
+ cache_ptr->prefix);
+ break;
+
+ case at_min_size2:
+ HDfprintf(stdout,
+ "%sAuto cache resize -- hit rate (%lf) -- can't decrease.\n",
+ cache_ptr->prefix, hit_rate);
+ HDfprintf(stdout, "%s cache already at minimum size.\n",
+ cache_ptr->prefix);
+ break;
+
+ case increase_disabled2:
+ HDfprintf(stdout,
+ "%sAuto cache resize -- increase disabled -- HR = %lf.",
+ cache_ptr->prefix, hit_rate);
+ break;
+
+ case decrease_disabled2:
+ HDfprintf(stdout,
+ "%sAuto cache resize -- decrease disabled -- HR = %lf.\n",
+ cache_ptr->prefix, hit_rate);
+ break;
+
+ case not_full2:
+ HDassert( hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold );
+
+ HDfprintf(stdout,
+ "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
+ cache_ptr->prefix, hit_rate,
+ (cache_ptr->resize_ctl).lower_hr_threshold);
+ HDfprintf(stdout,
+ "%s cache not full so no increase in size.\n",
+ cache_ptr->prefix);
+ break;
+
+ default:
+ HDfprintf(stdout, "%sAuto cache resize -- unknown status code.\n",
+ cache_ptr->prefix);
+ break;
+ }
+
+ return;
+
+} /* H5C2_def_auto_resize_rpt_fcn() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_dest
+ *
+ * Purpose: Flush all data to disk and destroy the cache.
+ *
+ * This function fails if any object are protected since the
+ * resulting file might not be consistent.
+ *
+ * The primary_dxpl_id and secondary_dxpl_id parameters
+ * specify the dxpl_ids used on the first write occasioned
+ * by the destroy (primary_dxpl_id), and on all subsequent
+ * writes (secondary_dxpl_id). This is useful in the metadata
+ * cache, but may not be needed elsewhere. If so, just use the
+ * same dxpl_id for both parameters.
+ *
+ * Note that *cache_ptr has been freed upon successful return.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ * Modifications:
+ *
+ * JRM -- 7/11/07
+ * Reworked parameter list for the revised cache API.
+ * The function lost its pointer to an instance of
+ * H5F_t (now supplied via cache_ptr), and one of its
+ * dxlp ids.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C2_dest(H5C2_t * cache_ptr,
+ hid_t dxpl_id)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_dest, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+ if ( H5C2_flush_cache(cache_ptr, dxpl_id,
+ H5C2__FLUSH_INVALIDATE_FLAG) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
+ }
+
+ if ( cache_ptr->slist_ptr != NULL ) {
+
+ H5SL_close(cache_ptr->slist_ptr);
+ cache_ptr->slist_ptr = NULL;
+ }
+
+ cache_ptr->magic = 0;
+
+ H5FL_FREE(H5C2_t, cache_ptr);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_dest() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_dest_empty
+ *
+ * Purpose: Destroy an empty cache.
+ *
+ * This function fails if the cache is not empty on entry.
+ *
+ * Note that *cache_ptr has been freed upon successful return.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C2_dest_empty(H5C2_t * cache_ptr)
+{
+ herr_t ret_value=SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_dest_empty, FAIL)
+
+ /* This would normally be an assert, but we need to use an HGOTO_ERROR
+ * call to shut up the compiler.
+ */
+ if ( ( ! cache_ptr ) ||
+ ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ||
+ ( cache_ptr->index_len != 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Bad cache_ptr or non-empty cache on entry.")
+ }
+
+
+ if ( cache_ptr->slist_ptr != NULL ) {
+
+ H5SL_close(cache_ptr->slist_ptr);
+ cache_ptr->slist_ptr = NULL;
+ }
+
+ cache_ptr->magic = 0;
+
+ H5FL_FREE(H5C2_t, cache_ptr);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_dest_empty() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2_expunge_entry
+ *
+ * Purpose: Use this function to tell the cache to expunge an entry
+ * from the cache without writing it to disk even if it is
+ * dirty. The entry may not be either pinned or protected.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/29/06
+ *
+ * Modifications:
+ *
+ * JRM -- 7/11/07
+ * Reworked the parameter list for the revised cache API.
+ * The function lost its file pointer (now passed in the
+ * *cache_ptr), and one of the dxpl ids.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C2_expunge_entry(H5C2_t * cache_ptr,
+ hid_t dxpl_id,
+ const H5C2_class_t * type,
+ haddr_t addr)
+{
+ /* const char * fcn_name = "H5C2_expunge_entry()"; */
+ herr_t result;
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5C2_cache_entry_t * entry_ptr = NULL;
+
+ FUNC_ENTER_NOAPI(H5C2_expunge_entry, FAIL)
+
+ HDassert( H5F_addr_defined(addr) );
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( type );
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+ if ( H5C2_validate_lru_list(cache_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "LRU sanity check failed.\n");
+ }
+#endif /* H5C2_DO_EXTREME_SANITY_CHECKS */
+
+ H5C2__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
+
+ if ( ( entry_ptr == NULL ) || ( entry_ptr->type != type ) ) {
+
+ /* the target doesn't exist in the cache, so we are done. */
+ HGOTO_DONE(SUCCEED)
+ }
+
+ HDassert( entry_ptr->addr == addr );
+ HDassert( entry_ptr->type == type );
+
+ if ( entry_ptr->is_protected ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, \
+ "Target entry is protected.")
+ }
+
+ if ( entry_ptr->is_pinned ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, \
+ "Target entry is pinned.")
+ }
+
+ /* If we get this far, call H5C2_flush_single_entry() with the
+ * H5C2__FLUSH_INVALIDATE_FLAG and the H5C2__FLUSH_CLEAR_ONLY_FLAG.
+ * This will clear the entry, and then delete it from the cache.
+ */
+
+ result = H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ entry_ptr->addr,
+ H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ TRUE);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, \
+ "H5C2_flush_single_entry() failed.")
+ }
+
+done:
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+ if ( H5C2_validate_lru_list(cache_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "LRU sanity check failed.\n");
+ }
+#endif /* H5C2_DO_EXTREME_SANITY_CHECKS */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_expunge_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_flush_cache
+ *
+ * Purpose: Flush (and possibly destroy) the entries contained in the
+ * specified cache.
+ *
+ * If the cache contains protected entries, the function will
+ * fail, as protected entries cannot be flushed. However
+ * all unprotected entries should be flushed before the
+ * function returns failure.
+ *
+ * The primary_dxpl_id and secondary_dxpl_id parameters
+ * specify the dxpl_ids used on the first write occasioned
+ * by the flush (primary_dxpl_id), and on all subsequent
+ * writes (secondary_dxpl_id). This is useful in the metadata
+ * cache, but may not be needed elsewhere. If so, just use the
+ * same dxpl_id for both parameters.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ * Modifications:
+ *
+ * JRM -- 7/20/04
+ * Modified the function for the addition of the hash table.
+ *
+ * JRM -- 11/22/04
+ * Added code to remove all epoch markers (if any) from the
+ * LRU list before a destroy. Strictly speaking, this isn't
+ * necessary, as the marker entries reside only in the LRU
+ * list, never in the index or in the tree. However, it
+ * never hurts to tidy up.
+ *
+ * JRM -- 1/6/05
+ * Reworked code to support the new
+ * H5C2__FLUSH_MARKED_ENTRIES_FLAG, and for the replacement of
+ * H5F_FLUSH_INVALIDATE flag with H5C2__FLUSH_INVALIDATE_FLAG.
+ *
+ * Note that the H5C2__FLUSH_INVALIDATE_FLAG takes precidence
+ * over the H5C2__FLUSH_MARKED_ENTRIES_FLAG. Thus if both are
+ * set, the functions behaves as if just the
+ * H5C2__FLUSH_INVALIDATE_FLAG was set.
+ *
+ * The H5C2__FLUSH_CLEAR_ONLY_FLAG flag can co-exist with
+ * either the H5C2__FLUSH_MARKED_ENTRIES_FLAG, or the
+ * H5C2__FLUSH_INVALIDATE_FLAG. In all cases, it is simply
+ * passed along to H5C2_flush_single_entry(). In the case of
+ * H5C2__FLUSH_MARKED_ENTRIES_FLAG, it will only apply to
+ * the marked entries.
+ *
+ * JRM -- 10/15/05
+ * Added code supporting the new
+ * H5C2__FLUSH_IGNORE_PROTECTED_FLAG. We need this flag, as
+ * we now use this function to flush large number of entries
+ * in increasing address order. We do this by marking the
+ * entries to be flushed, calling this function to flush them,
+ * and then restoring LRU order.
+ *
+ * However, it is possible that the cache will contain other,
+ * unmarked protected entries, when we make this call. This
+ * new flag allows us to ignore them.
+ *
+ * Note that even with this flag set, it is still an error
+ * to try to flush a protected entry.
+ *
+ * JRM -- 3/25/06
+ * Updated function to handle pinned entries.
+ *
+ * JRM -- 8/19/06
+ * Added code managing the new flush_in_progress field of
+ * H5C2_t.
+ *
+ * Also reworked function to allow for the possibility that
+ * entries will be dirtied, resized, or renamed during flush
+ * callbacks. As a result, we may have to make multiple
+ * passes through the skip list before the cache is flushed.
+ *
+ * JRM -- 7/11/07
+ * Reworked function to support the new metadata cache API.
+ * The function lost the H5F_t parameter (now passed via
+ * *cache_ptr), and one of the dxpl id parameters.
+ *
+ * JRM -- 10/13/07
+ * Added code to detect and manage the case in which a
+ * serialize callback changes the s-list out from under
+ * the function. The only way I can think of in which this
+ * can happen is if a serialize function loads an entry
+ * into the cache that isn't there already. Quincey tells
+ * me that this will never happen, but I'm not sure I
+ * believe him.
+ *
+ * Note that this is a pretty bad scenario if it ever
+ * happens. The code I have added should allow us to
+ * handle the situation, but one can argue that I should
+ * just scream and die if I ever detect the condidtion.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C2_flush_cache(H5C2_t * cache_ptr,
+ hid_t dxpl_id,
+ unsigned flags)
+{
+ /* const char * fcn_name = "H5C2_flush_cache()"; */
+ herr_t status;
+ herr_t ret_value = SUCCEED;
+ hbool_t destroy;
+ hbool_t flushed_entries_last_pass;
+ hbool_t flush_marked_entries;
+ hbool_t ignore_protected;
+ hbool_t tried_to_flush_protected_entry = FALSE;
+ int32_t passes = 0;
+ int32_t protected_entries = 0;
+ H5SL_node_t * node_ptr = NULL;
+ H5C2_cache_entry_t * entry_ptr = NULL;
+ H5C2_cache_entry_t * next_entry_ptr = NULL;
+#if H5C2_DO_SANITY_CHECKS
+ int64_t flushed_entries_count;
+ size_t flushed_entries_size;
+ int64_t initial_slist_len;
+ size_t initial_slist_size;
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+ FUNC_ENTER_NOAPI(H5C2_flush_cache, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( cache_ptr->skip_file_checks || cache_ptr->f );
+ HDassert( cache_ptr->slist_ptr );
+
+ ignore_protected = ( (flags & H5C2__FLUSH_IGNORE_PROTECTED_FLAG) != 0 );
+
+ destroy = ( (flags & H5C2__FLUSH_INVALIDATE_FLAG) != 0 );
+
+ /* note that flush_marked_entries is set to FALSE if destroy is TRUE */
+ flush_marked_entries = ( ( (flags & H5C2__FLUSH_MARKED_ENTRIES_FLAG) != 0 )
+ &&
+ ( ! destroy )
+ );
+
+ HDassert( ! ( destroy && ignore_protected ) );
+
+ HDassert( ! ( cache_ptr->flush_in_progress ) );
+
+ cache_ptr->flush_in_progress = TRUE;
+
+ if ( destroy ) {
+
+ status = H5C2_flush_invalidate_cache(dxpl_id,
+ cache_ptr,
+ flags);
+
+ if ( status < 0 ) {
+
+ /* This shouldn't happen -- if it does, we are toast so
+ * just scream and die.
+ */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "flush invalidate failed.")
+ }
+ } else {
+ /* When we are only flushing marked entries, the slist will usually
+ * still contain entries when we have flushed everything we should.
+ * Thus we track whether we have flushed any entries in the last
+ * pass, and terminate if we haven't.
+ */
+
+ flushed_entries_last_pass = TRUE;
+
+ while ( ( passes < H5C2__MAX_PASSES_ON_FLUSH ) &&
+ ( cache_ptr->slist_len != 0 ) &&
+ ( protected_entries == 0 ) &&
+ ( flushed_entries_last_pass ) )
+ {
+ flushed_entries_last_pass = FALSE;
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ if ( node_ptr != NULL ) {
+ next_entry_ptr = (H5C2_cache_entry_t *)H5SL_item(node_ptr);
+
+ if ( next_entry_ptr == NULL ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "next_entry_ptr == NULL 1 ?!?!");
+ }
+ HDassert( next_entry_ptr->magic ==
+ H5C2__H5C2_CACHE_ENTRY_T_MAGIC );
+ HDassert( next_entry_ptr->is_dirty );
+ HDassert( next_entry_ptr->in_slist );
+ } else {
+ next_entry_ptr = NULL;
+ }
+
+ HDassert( node_ptr != NULL );
+
+#if H5C2_DO_SANITY_CHECKS
+ /* For sanity checking, try to verify that the skip list has
+ * the expected size and number of entries at the end of each
+ * internal while loop (see below).
+ *
+ * Doing this get a bit tricky, as depending on flags, we may
+ * or may not flush all the entries in the slist.
+ *
+ * To make things more entertaining, with the advent of the
+ * fractal heap, the entry serialize callback can cause entries
+ * to be dirtied, resized, and/or renamed.
+ *
+ * To deal with this, we first make note of the initial
+ * skip list length and size:
+ */
+ initial_slist_len = cache_ptr->slist_len;
+ initial_slist_size = cache_ptr->slist_size;
+
+ /* We then zero counters that we use to track the number
+ * and total size of entries flushed:
+ */
+ flushed_entries_count = 0;
+ flushed_entries_size = 0;
+
+ /* As mentioned above, there is the possibility that
+ * entries will be dirtied, resized, and/or flushed during
+ * our pass through the skip list. To capture the number
+ * of entries added, and the skip list size delta,
+ * zero the slist_len_increase and slist_size_increase of
+ * the cache's instance of H5C2_t. These fields will be
+ * updated elsewhere to account for slist insertions and/or
+ * dirty entry size changes.
+ */
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
+
+ /* at the end of the loop, use these values to compute the
+ * expected slist length and size and compare this with the
+ * value recorded in the cache's instance of H5C2_t.
+ */
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+ while ( node_ptr != NULL )
+ {
+ entry_ptr = next_entry_ptr;
+
+ /* With the advent of the fractal heap, it is possible
+ * that the serialize callback will dirty and/or resize
+ * other entries in the cache. In particular, while
+ * Quincey has promised me that this will never happen,
+ * it is possible that the serialize callback for an
+ * entry may protect an entry that is not in the cache,
+ * perhaps causing the cache to flush and possibly
+ * evict the entry associated with node_ptr to make
+ * space for the new entry.
+ *
+ * Thus we do a bit of extra sanity checking on entry_ptr,
+ * and break out of this scan of the skip list if we
+ * detect minor problems. We have a bit of leaway on the
+ * number of passes though the skip list, so this shouldn't
+ * be an issue in the flush in and of itself, as it should
+ * be all but impossible for this to happen more than once
+ * in any flush.
+ *
+ * Observe that that breaking out of the scan early
+ * shouldn't break the sanity checks just after the end
+ * of this while loop.
+ *
+ * If an entry has merely been marked clean and removed from
+ * the s-list, we simply break out of the scan.
+ *
+ * If the entry has been evicted, we flag an error and
+ * exit.
+ */
+
+ if ( entry_ptr->magic != H5C2__H5C2_CACHE_ENTRY_T_MAGIC ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "entry_ptr->magic invalid ?!?!");
+
+ } else if ( ( ! entry_ptr->is_dirty ) ||
+ ( ! entry_ptr->in_slist ) ) {
+
+ /* the s-list has been modified out from under us.
+ * set node_ptr to NULL and break out of the loop.
+ */
+ node_ptr = NULL;
+ break;
+ }
+
+ /* increment node pointer now, before we delete its target
+ * from the slist.
+ */
+ node_ptr = H5SL_next(node_ptr);
+ if ( node_ptr != NULL ) {
+ next_entry_ptr = (H5C2_cache_entry_t *)H5SL_item(node_ptr);
+
+ if ( next_entry_ptr == NULL ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "next_entry_ptr == NULL 2 ?!?!");
+ }
+ HDassert( next_entry_ptr->magic ==
+ H5C2__H5C2_CACHE_ENTRY_T_MAGIC );
+ HDassert( next_entry_ptr->is_dirty );
+ HDassert( next_entry_ptr->in_slist );
+ } else {
+ next_entry_ptr = NULL;
+ }
+
+ HDassert( entry_ptr != NULL );
+ HDassert( entry_ptr->in_slist );
+
+ if ( ( ! flush_marked_entries ) ||
+ ( entry_ptr->flush_marker ) ) {
+
+ if ( entry_ptr->is_protected ) {
+
+ /* we probably have major problems -- but lets flush
+ * everything we can before we decide whether to flag
+ * an error.
+ */
+ tried_to_flush_protected_entry = TRUE;
+ protected_entries++;
+
+ } else if ( entry_ptr->is_pinned ) {
+
+ /* Test to see if we are can flush the entry now.
+ * If we can, go ahead and flush. Note that we
+ * aren't trying to do a destroy here, so that
+ * is not an issue.
+ */
+ if ( TRUE ) { /* When we get to multithreaded cache,
+ * we will need either locking code,
+ * and/or a test to see if the entry
+ * is in flushable condition here.
+ */
+#if H5C2_DO_SANITY_CHECKS
+ flushed_entries_count++;
+ flushed_entries_size += entry_ptr->size;
+#endif /* H5C2_DO_SANITY_CHECKS */
+ status = H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ entry_ptr->addr,
+ flags,
+ FALSE);
+ if ( status < 0 ) {
+
+ /* This shouldn't happen -- if it does, we
+ * are toast so just scream and die.
+ */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "dirty pinned entry flush failed.")
+ }
+ }
+ } else {
+#if H5C2_DO_SANITY_CHECKS
+ flushed_entries_count++;
+ flushed_entries_size += entry_ptr->size;
+#endif /* H5C2_DO_SANITY_CHECKS */
+ status = H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ entry_ptr->addr,
+ flags,
+ FALSE);
+ if ( status < 0 ) {
+
+ /* This shouldn't happen -- if it does, we are
+ * toast so just scream and die.
+ */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Can't flush entry.")
+ }
+ flushed_entries_last_pass = TRUE;
+ }
+ }
+ } /* while ( node_ptr != NULL ) */
+
+#if H5C2_DO_SANITY_CHECKS
+ /* Verify that the slist size and length are as expected. */
+
+ HDassert( (initial_slist_len + cache_ptr->slist_len_increase -
+ flushed_entries_count) == cache_ptr->slist_len );
+ HDassert( (initial_slist_size + cache_ptr->slist_size_increase -
+ flushed_entries_size) == cache_ptr->slist_size );
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+ passes++;
+
+ } /* while */
+
+ HDassert( protected_entries <= cache_ptr->pl_len );
+
+ if ( ( ( cache_ptr->pl_len > 0 ) && ( !ignore_protected ) )
+ ||
+ ( tried_to_flush_protected_entry ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "cache has protected items")
+ }
+
+ if ( ( cache_ptr->slist_len != 0 ) &&
+ ( passes >= H5C2__MAX_PASSES_ON_FLUSH ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "flush pass limit exceeded.")
+ }
+
+#if H5C2_DO_SANITY_CHECKS
+ if ( ! flush_marked_entries ) {
+
+ HDassert( cache_ptr->slist_len == 0 );
+ HDassert( cache_ptr->slist_size == 0 );
+ }
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+ }
+
+done:
+
+ cache_ptr->flush_in_progress = FALSE;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_flush_cache() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_flush_to_min_clean
+ *
+ * Purpose: Flush dirty entries until the caches min clean size is
+ * attained.
+ *
+ * This function is used in the implementation of the
+ * metadata cache in PHDF5. To avoid "messages from the
+ * future", the cache on process 0 can't be allowed to
+ * flush entries until the other processes have reached
+ * the same point in the calculation. If this constraint
+ * is not met, it is possible that the other processes will
+ * read metadata generated at a future point in the
+ * computation.
+ *
+ *
+ * Return: Non-negative on success/Negative on failure or if
+ * write is not permitted.
+ *
+ * Programmer: John Mainzer
+ * 9/16/05
+ *
+ * Modifications:
+ *
+ * Re-wrote function to flush dirty entries in increasing
+ * address order, while maintaining LRU order in the LRU list
+ * upon return.
+ *
+ * Do this by scanning up the dirty LRU list for entries to
+ * flush to reach min clean size, setting their flush_marker
+ * flags, and recording their addresses in the order
+ * encountered.
+ *
+ * Then call H5C2_flush_cache() to flush the marked entries.
+ *
+ * Finally, use the list of marked entries to force the
+ * correct LRU list order after the flush.
+ *
+ * JRM - 10/13/05
+ *
+ * This change had the oposite of the desired effect. Lets
+ * leave it in (albeit commented out for now). If we can't
+ * find a case where it helps, lets get rid of it.
+ *
+ *
+ * Added some sanity checks to the change which verify the
+ * expected values of the new is_read_only and ro_ref_count
+ * fields.
+ * JRM - 3/29/07
+ *
+ * Modified parameter list for the new metadata cache API.
+ * THe function lost its H5F_t parameter (now passed via
+ * *cache_ptr), and one of its dxpl ids.
+ *
+ * JRM - 7/11/07
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C2_flush_to_min_clean(H5C2_t * cache_ptr,
+ hid_t dxpl_id)
+{
+ herr_t result;
+ herr_t ret_value = SUCCEED;
+ hbool_t write_permitted;
+#if 0 /* modified code -- commented out for now */
+ int i;
+ int flushed_entries_count = 0;
+ size_t flushed_entries_size = 0;
+ size_t space_needed = 0;
+ haddr_t * flushed_entries_list = NULL;
+ H5C2_cache_entry_t * entry_ptr = NULL;
+#endif /* JRM */
+
+ FUNC_ENTER_NOAPI(H5C2_flush_to_min_clean, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( cache_ptr->skip_file_checks || cache_ptr->f );
+
+ if ( cache_ptr->check_write_permitted != NULL ) {
+
+ result = (cache_ptr->check_write_permitted)(cache_ptr->f,
+ dxpl_id,
+ &write_permitted);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Can't get write_permitted")
+ }
+ } else {
+
+ write_permitted = cache_ptr->write_permitted;
+ }
+
+ if ( ! write_permitted ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "cache write is not permitted!?!\n");
+ }
+#if 1 /* original code */
+ result = H5C2_make_space_in_cache(dxpl_id,
+ cache_ptr,
+ (size_t)0,
+ write_permitted);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C2_make_space_in_cache failed.")
+ }
+#else /* modified code -- commented out for now */
+ if ( cache_ptr->max_cache_size > cache_ptr->index_size ) {
+
+ if ( ((cache_ptr->max_cache_size - cache_ptr->index_size) +
+ cache_ptr->cLRU_list_size) >= cache_ptr->min_clean_size ) {
+
+ space_needed = 0;
+
+ } else {
+
+ space_needed = cache_ptr->min_clean_size -
+ ((cache_ptr->max_cache_size - cache_ptr->index_size) +
+ cache_ptr->cLRU_list_size);
+ }
+ } else {
+
+ if ( cache_ptr->min_clean_size <= cache_ptr->cLRU_list_size ) {
+
+ space_needed = 0;
+
+ } else {
+
+ space_needed = cache_ptr->min_clean_size -
+ cache_ptr->cLRU_list_size;
+ }
+ }
+
+ if ( space_needed > 0 ) { /* we have work to do */
+
+ HDassert( cache_ptr->slist_len > 0 );
+
+ /* allocate an array to keep a list of the entries that we
+ * mark for flush. We need this list to touch up the LRU
+ * list after the flush.
+ */
+ flushed_entries_list = (haddr_t *)H5MM_malloc(sizeof(haddr_t) *
+ (size_t)(cache_ptr->slist_len));
+
+ if ( flushed_entries_list == NULL ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
+ "memory allocation failed for flushed entries list")
+ }
+
+ /* Scan the dirty LRU list from tail forward and mark sufficient
+ * entries to free up the necessary space. Keep a list of the
+ * entries marked in the order in which they are encountered.
+ */
+ entry_ptr = cache_ptr->dLRU_tail_ptr;
+
+ while ( ( flushed_entries_size < space_needed ) &&
+ ( flushed_entries_count < cache_ptr->slist_len ) &&
+ ( entry_ptr != NULL ) )
+ {
+ HDassert( ! (entry_ptr->is_protected) );
+ HDassert( ! (entry_ptr->is_read_only) );
+ HDassert( entry_ptr->ro_ref_count == 0 );
+ HDassert( entry_ptr->is_dirty );
+ HDassert( entry_ptr->in_slist );
+
+ entry_ptr->flush_marker = TRUE;
+ flushed_entries_size += entry_ptr->size;
+ flushed_entries_list[flushed_entries_count] = entry_ptr->addr;
+ flushed_entries_count++;
+ entry_ptr = entry_ptr->aux_prev;
+ }
+
+ if ( ( flushed_entries_count > cache_ptr->slist_len) ||
+ ( flushed_entries_size < space_needed ) ) {
+ HDfprintf(stdout, "flushed_entries_count = %d <= %d = slist_size\n",
+ (int)flushed_entries_count, (int)(cache_ptr->slist_size));
+ HDfprintf(stdout,
+ "flushed_entries_size = %d < %d = space_needed.\n",
+ (int)flushed_entries_size, (int)space_needed);
+ }
+
+ HDassert( flushed_entries_count <= cache_ptr->slist_len );
+ HDassert( flushed_entries_size >= space_needed );
+
+
+ /* Flush the marked entries */
+ result = H5C2_flush_cache(f, primary_dxpl_id, secondary_dxpl_id,
+ cache_ptr, H5C2__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C2__FLUSH_IGNORE_PROTECTED_FLAG);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C2_flush_cache failed.")
+ }
+
+ /* Now touch up the LRU list so as to place the flushed entries in
+ * the order they they would be in if we had flushed them in the
+ * order we encountered them in.
+ */
+
+ i = 0;
+ while ( i < flushed_entries_count )
+ {
+ H5C2__SEARCH_INDEX_NO_STATS(cache_ptr, flushed_entries_list[i], \
+ entry_ptr, FAIL)
+
+ /* At present, the above search must always succeed. However,
+ * that may change. Write the code so we need only remove the
+ * following assert in that event.
+ */
+ HDassert( entry_ptr != NULL );
+ H5C2__FAKE_RP_FOR_MOST_RECENT_ACCESS(cache_ptr, entry_ptr, FAIL)
+ i++;
+ }
+ } /* if ( space_needed > 0 ) */
+#endif /* end modified code -- commented out for now */
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_flush_to_min_clean() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_get_cache_auto_resize_config
+ *
+ * Purpose: Copy the current configuration of the cache automatic
+ * re-sizing function into the instance of H5C2_auto_size_ctl_t
+ * pointed to by config_ptr.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 10/8/04
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C2_get_cache_auto_resize_config(H5C2_t * cache_ptr,
+ H5C2_auto_size_ctl_t *config_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_get_cache_auto_resize_config, FAIL)
+
+ if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.")
+ }
+
+ if ( config_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad config_ptr on entry.")
+ }
+
+ *config_ptr = cache_ptr->resize_ctl;
+
+ config_ptr->set_initial_size = FALSE;
+ config_ptr->initial_size = cache_ptr->max_cache_size;
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_get_cache_auto_resize_config() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_get_cache_size
+ *
+ * Purpose: Return the cache maximum size, the minimum clean size, the
+ * current size, and the current number of entries in
+ * *max_size_ptr, *min_clean_size_ptr, *cur_size_ptr, and
+ * *cur_num_entries_ptr respectively. If any of these
+ * parameters are NULL, skip that value.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 10/8/04
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C2_get_cache_size(H5C2_t * cache_ptr,
+ size_t * max_size_ptr,
+ size_t * min_clean_size_ptr,
+ size_t * cur_size_ptr,
+ int32_t * cur_num_entries_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_get_cache_size, FAIL)
+
+ if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.")
+ }
+
+ if ( max_size_ptr != NULL ) {
+
+ *max_size_ptr = cache_ptr->max_cache_size;
+ }
+
+ if ( min_clean_size_ptr != NULL ) {
+
+ *min_clean_size_ptr = cache_ptr->min_clean_size;
+ }
+
+ if ( cur_size_ptr != NULL ) {
+
+ *cur_size_ptr = cache_ptr->index_size;
+ }
+
+ if ( cur_num_entries_ptr != NULL ) {
+
+ *cur_num_entries_ptr = cache_ptr->index_len;
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_get_cache_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_get_cache_hit_rate
+ *
+ * Purpose: Compute and return the current cache hit rate in
+ * *hit_rate_ptr. If there have been no accesses since the
+ * last time the cache hit rate stats were reset, set
+ * *hit_rate_ptr to 0.0. On error, *hit_rate_ptr is
+ * undefined.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 10/7/04
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C2_get_cache_hit_rate(H5C2_t * cache_ptr,
+ double * hit_rate_ptr)
+
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_get_cache_hit_rate, FAIL)
+
+ if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.")
+ }
+
+ if ( hit_rate_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad hit_rate_ptr on entry.")
+ }
+
+ HDassert( cache_ptr->cache_hits >= 0 );
+ HDassert( cache_ptr->cache_accesses >= cache_ptr->cache_hits );
+
+ if ( cache_ptr->cache_accesses > 0 ) {
+
+ *hit_rate_ptr = ((double)(cache_ptr->cache_hits)) /
+ ((double)(cache_ptr->cache_accesses));
+
+ } else {
+
+ *hit_rate_ptr = 0.0;
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_get_cache_hit_rate() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2_get_entry_status
+ *
+ * Purpose: This function is used to determine whether the cache
+ * contains an entry with the specified base address. If
+ * the entry exists, it also reports some status information
+ * on the entry.
+ *
+ * Status information is reported in the locations pointed
+ * to by the size_ptr, in_cache_ptr, is_dirty_ptr, and
+ * is_protected_ptr. While in_cache_ptr must be defined,
+ * the remaining pointers may be NULL, in which case the
+ * associated data is not reported.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 7/1/05
+ *
+ * Modifications:
+ *
+ * JRM -- 4/26/06
+ * Added the is_pinned_ptr parameter and supporting code.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C2_get_entry_status(H5C2_t * cache_ptr,
+ haddr_t addr,
+ size_t * size_ptr,
+ hbool_t * in_cache_ptr,
+ hbool_t * is_dirty_ptr,
+ hbool_t * is_protected_ptr,
+ hbool_t * is_pinned_ptr)
+{
+ /* const char * fcn_name = "H5C2_get_entry_status()"; */
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5C2_cache_entry_t * entry_ptr = NULL;
+
+ FUNC_ENTER_NOAPI(H5C2_get_entry_status, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( H5F_addr_defined(addr) );
+ HDassert( in_cache_ptr != NULL );
+
+ /* this test duplicates tow of the above asserts, but we need an
+ * invocation of HGOTO_ERROR to keep the compiler happy.
+ */
+ if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.")
+ }
+
+ H5C2__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
+
+ if ( entry_ptr == NULL ) {
+
+ /* the entry doesn't exist in the cache -- report this
+ * and quit.
+ */
+ *in_cache_ptr = FALSE;
+
+ } else {
+
+ *in_cache_ptr = TRUE;
+
+ if ( size_ptr != NULL ) {
+
+ *size_ptr = entry_ptr->size;
+ }
+
+ if ( is_dirty_ptr != NULL ) {
+
+ *is_dirty_ptr = entry_ptr->is_dirty;
+ }
+
+ if ( is_protected_ptr != NULL ) {
+
+ *is_protected_ptr = entry_ptr->is_protected;
+ }
+
+ if ( is_pinned_ptr != NULL ) {
+
+ *is_pinned_ptr = entry_ptr->is_pinned;
+ }
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_get_entry_status() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_get_evictions_enabled()
+ *
+ * Purpose: Copy the current value of cache_ptr->evictions_enabled into
+ * *evictions_enabled_ptr.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 7/27/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C2_get_evictions_enabled(H5C2_t * cache_ptr,
+ hbool_t * evictions_enabled_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_get_evictions_enabled, FAIL)
+
+ if ( ( cache_ptr == NULL ) ||
+ ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.")
+ }
+
+ if ( evictions_enabled_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Bad evictions_enabled_ptr on entry.")
+ }
+
+ *evictions_enabled_ptr = cache_ptr->evictions_enabled;
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_get_evictions_enabled() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_get_trace_file_ptr
+ *
+ * Purpose: Get the trace_file_ptr field from the cache.
+ *
+ * This field will either be NULL (which indicates that trace
+ * file logging is turned off), or contain a pointer to the
+ * open file to which trace file data is to be written.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 1/20/06
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C2_get_trace_file_ptr(H5C2_t * cache_ptr,
+ FILE ** trace_file_ptr_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_get_trace_file_ptr, FAIL)
+
+ /* This would normally be an assert, but we need to use an HGOTO_ERROR
+ * call to shut up the compiler.
+ */
+ if ( ( ! cache_ptr ) || ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr")
+ }
+
+ if ( trace_file_ptr_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL trace_file_ptr_ptr")
+ }
+
+ *trace_file_ptr_ptr = cache_ptr->trace_file_ptr;
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_get_trace_file_ptr() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_insert_entry
+ *
+ * Purpose: Adds the specified thing to the cache. The thing need not
+ * exist on disk yet, but it must have an address and disk
+ * space reserved.
+ *
+ * The primary_dxpl_id and secondary_dxpl_id parameters
+ * specify the dxpl_ids used on the first write occasioned
+ * by the insertion (primary_dxpl_id), and on all subsequent
+ * writes (secondary_dxpl_id). This is useful in the
+ * metadata cache, but may not be needed elsewhere. If so,
+ * just use the same dxpl_id for both parameters.
+ *
+ * The primary_dxpl_id is the dxpl_id passed to the
+ * check_write_permitted function if such a function has been
+ * provided.
+ *
+ * Observe that this function cannot occasion a read.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ * Modifications:
+ *
+ * JRM -- 7/21/04
+ * Updated function for the addition of the hash table.
+ *
+ * JRM -- 10/28/04
+ * Added code to set the cache_full flag to TRUE when ever
+ * we need to make space in the cache.
+ *
+ * JRM -- 11/22/04
+ * Updated function for the addition of the first_flush_ptr
+ * parameter to H5C2_make_space_in_cache().
+ *
+ * JRM -- 1/6/05
+ * Added the flags parameter, and code supporting
+ * H5C2__SET_FLUSH_MARKER_FLAG. Note that this flag is
+ * ignored unless the new entry is dirty.
+ *
+ * JRM -- 6/6/05
+ * Added code to force all inserted entries to be dirty.
+ * This is part of a set of changes moving management of the
+ * is_dirty field of H5C2_cache_entry_t into the H5C2 code.
+ *
+ * JRM -- 6/24/05
+ * Added support for the new write_permitted field of
+ * the H5C2_t structure.
+ *
+ * JRM -- 3/16/06
+ * Added initialization for the new is_pinned field of the
+ * H5C2_cache_entry_t structure.
+ *
+ * JRM -- 5/3/06
+ * Added initialization for the new dirtied field of the
+ * H5C2_cache_entry_t structure.
+ *
+ * JRM -- 8/9/06
+ * Added code supporting insertion of pinned entries.
+ *
+ * JRM -- 8/21/06
+ * Added initialization for the new flush_in_progress and
+ * destroy_in_progress fields.
+ *
+ * JRM -- 3/29/07
+ * Added initialization for the new is_read_only and
+ * ro_ref_count fields.
+ *
+ * JRM -- 7/11/07
+ * Reworked the parameter list for the revised metadata
+ * cache API. The function lost its pointer to H5F_t
+ * (now supplied via *cache_ptr), and one of dxpl id
+ * parameters. It gained a entry length parameter.
+ * Numerous internal changes to support the API change.
+ *
+ * JRM -- 8/1/07
+ * Added code to disable evictions when the new
+ * evictions_enabled field is FALSE.
+ *
+ * JRM -- 10/12/07
+ * Added initialization for the new magic field.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C2_insert_entry(H5C2_t * cache_ptr,
+ hid_t dxpl_id,
+ const H5C2_class_t * type,
+ haddr_t addr,
+ size_t len,
+ void * thing,
+ unsigned int flags)
+{
+ herr_t result;
+ herr_t ret_value = SUCCEED; /* Return value */
+ hbool_t insert_pinned;
+ hbool_t set_flush_marker;
+ hbool_t write_permitted = TRUE;
+ H5C2_cache_entry_t * entry_ptr;
+ H5C2_cache_entry_t * test_entry_ptr;
+
+ FUNC_ENTER_NOAPI(H5C2_insert_entry, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( cache_ptr->skip_file_checks || cache_ptr->f );
+ HDassert( type );
+ HDassert( H5F_addr_defined(addr) );
+ HDassert( thing );
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+ if ( H5C2_verify_not_in_index(cache_ptr, (H5C2_cache_entry_t *)thing) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "thing already in index.\n");
+ }
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+ if ( H5C2_validate_lru_list(cache_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "LRU sanity check failed.\n");
+ }
+#endif /* H5C2_DO_EXTREME_SANITY_CHECKS */
+
+ set_flush_marker = ( (flags & H5C2__SET_FLUSH_MARKER_FLAG) != 0 );
+ insert_pinned = ( (flags & H5C2__PIN_ENTRY_FLAG) != 0 );
+
+ entry_ptr = (H5C2_cache_entry_t *)thing;
+
+ entry_ptr->magic = H5C2__H5C2_CACHE_ENTRY_T_MAGIC;
+
+ entry_ptr->addr = addr;
+ entry_ptr->type = type;
+
+ /* newly inserted entries are assumed to be dirty */
+ entry_ptr->is_dirty = TRUE;
+
+ /* not protected, so can't be dirtied */
+ entry_ptr->dirtied = FALSE;
+
+ entry_ptr->size = len;
+
+ HDassert( entry_ptr->size < H5C2_MAX_ENTRY_SIZE );
+
+ entry_ptr->in_slist = FALSE;
+
+#ifdef H5_HAVE_PARALLEL
+ entry_ptr->clear_on_unprotect = FALSE;
+#endif /* H5_HAVE_PARALLEL */
+
+ entry_ptr->flush_in_progress = FALSE;
+ entry_ptr->destroy_in_progress = FALSE;
+
+ entry_ptr->ht_next = NULL;
+ entry_ptr->ht_prev = NULL;
+
+ entry_ptr->next = NULL;
+ entry_ptr->prev = NULL;
+
+ entry_ptr->aux_next = NULL;
+ entry_ptr->aux_prev = NULL;
+
+ H5C2__RESET_CACHE_ENTRY_STATS(entry_ptr)
+
+ if ( ( cache_ptr->evictions_enabled ) &&
+ ( (cache_ptr->index_size + entry_ptr->size) >
+ cache_ptr->max_cache_size ) ) {
+
+ size_t space_needed;
+
+ cache_ptr->cache_full = TRUE;
+
+ if ( cache_ptr->check_write_permitted != NULL ) {
+
+ result = (cache_ptr->check_write_permitted)(cache_ptr->f,
+ dxpl_id,
+ &write_permitted);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
+ "Can't get write_permitted")
+ }
+ } else {
+
+ write_permitted = cache_ptr->write_permitted;
+ }
+
+ HDassert( entry_ptr->size <= H5C2_MAX_ENTRY_SIZE );
+
+ space_needed = entry_ptr->size;
+
+ if ( space_needed > cache_ptr->max_cache_size ) {
+
+ space_needed = cache_ptr->max_cache_size;
+ }
+
+ /* Note that space_needed is just the amount of space that
+ * needed to insert the new entry without exceeding the cache
+ * size limit. The subsequent call to H5C2_make_space_in_cache()
+ * may evict the entries required to free more or less space
+ * depending on conditions. It MAY be less if the cache is
+ * currently undersized, or more if the cache is oversized.
+ *
+ * The cache can exceed its maximum size limit via the following
+ * mechanisms:
+ *
+ * First, it is possible for the cache to grow without
+ * bound as long as entries are protected and not unprotected.
+ *
+ * Second, when writes are not permitted it is also possible
+ * for the cache to grow without bound.
+ *
+ * Finally, we usually don't check to see if the cache is
+ * oversized at the end of an unprotect. As a result, it is
+ * possible to have a vastly oversized cache with no protected
+ * entries as long as all the protects preceed the unprotects.
+ *
+ * Since items 1 and 2 are not changing any time soon, I see
+ * no point in worrying about the third.
+ */
+
+ result = H5C2_make_space_in_cache(dxpl_id,
+ cache_ptr,
+ space_needed,
+ write_permitted);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
+ "H5C2_make_space_in_cache failed.")
+ }
+ }
+
+ /* verify that the new entry isn't already in the hash table -- scream
+ * and die if it is.
+ */
+
+ H5C2__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
+
+ if ( test_entry_ptr != NULL ) {
+
+ if ( test_entry_ptr == entry_ptr ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
+ "entry already in cache.")
+
+ } else {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
+ "duplicate entry in cache.")
+ }
+ }
+
+ /* we don't initialize the protected field until here as it is
+ * possible that the entry is already in the cache, and already
+ * protected. If it is, we don't want to make things worse by
+ * marking it unprotected.
+ */
+
+ entry_ptr->is_protected = FALSE;
+ entry_ptr->is_read_only = FALSE;
+ entry_ptr->ro_ref_count = 0;
+
+ entry_ptr->is_pinned = insert_pinned;
+
+ H5C2__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
+
+ /* New entries are presumed to be dirty, so this if statement is
+ * unnecessary. Rework it once the rest of the code changes are
+ * in and tested. -- JRM
+ */
+ if ( entry_ptr->is_dirty ) {
+
+ entry_ptr->flush_marker = set_flush_marker;
+ H5C2__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+
+ } else {
+
+ entry_ptr->flush_marker = FALSE;
+ }
+
+ H5C2__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, FAIL)
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+ if ( H5C2_validate_lru_list(cache_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "LRU sanity check failed.\n");
+ }
+#endif /* H5C2_DO_EXTREME_SANITY_CHECKS */
+
+ H5C2__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr)
+
+done:
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+ if ( H5C2_validate_lru_list(cache_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "LRU sanity check failed.\n");
+ }
+#endif /* H5C2_DO_EXTREME_SANITY_CHECKS */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_insert_entry() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2_mark_entries_as_clean
+ *
+ * Purpose: When the H5C2 code is used to implement the metadata caches
+ * in PHDF5, only the cache with MPI_rank 0 is allowed to
+ * actually write entries to disk -- all other caches must
+ * retain dirty entries until they are advised that the
+ * entries are clean.
+ *
+ * This function exists to allow the H5C2 code to receive these
+ * notifications.
+ *
+ * The function receives a list of entry base addresses
+ * which must refer to dirty entries in the cache. If any
+ * of the entries are either clean or don't exist, the
+ * function flags an error.
+ *
+ * The function scans the list of entries and flushes all
+ * those that are currently unprotected with the
+ * H5C2__FLUSH_CLEAR_ONLY_FLAG. Those that are currently
+ * protected are flagged for clearing when they are
+ * unprotected.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 7/5/05
+ *
+ * Modifications:
+ *
+ * Reworked function to flush entries in LRU order instead
+ * of increasing address order. The hope is that this will
+ * improve the hit rate on the slave caches.
+ *
+ * JRM - 10/13/05
+ *
+ * Leave the old code in place for now (commented out) for
+ * benchmarking.
+ *
+ * JRM -- 4/13/06
+ * Updated function to deal with pinned entries.
+ *
+ * JRM -- 7/11/07
+ * Revised function for the new metadata cache API. The
+ * function lost its point to H5F_t (now supplied via
+ * *cache_ptr), and one of its dxpl ids. Also internal
+ * changes supporting the revised API.
+ *
+ * JRM -- 10/13/07
+ * Didn't modify this function to detect the case in which
+ * the LRU is modified out from under it. It shouldn't be
+ * an issue here, as this function is only called in the
+ * parallel case, and serialize callbacks must not modify
+ * other entries in parallel case. If they do, they will
+ * cause inconsistancies in metadata across processes.
+ *
+ * Further, since this function only clears entries, and
+ * thus the serialize functions are never called, the
+ * situation will be even worse, as the changes will only
+ * exist on process 0.
+ *
+ * Bottom line -- the calls to H5C2_flush_single_entry()
+ * in this function will not call serialize, thus no change
+ * in the LRU is possible. Even if they did, the serialize()
+ * callbacks are banned from making such changes in the
+ * parallel case.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef H5_HAVE_PARALLEL
+herr_t
+H5C2_mark_entries_as_clean(H5C2_t * cache_ptr,
+ hid_t dxpl_id,
+ int32_t ce_array_len,
+ haddr_t * ce_array_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ int entries_cleared;
+ int entries_examined;
+ int i;
+ int initial_list_len;
+ haddr_t addr;
+#if H5C2_DO_SANITY_CHECKS
+ int pinned_entries_marked = 0;
+ int protected_entries_marked = 0;
+ int other_entries_marked = 0;
+ haddr_t last_addr;
+#endif /* H5C2_DO_SANITY_CHECKS */
+ H5C2_cache_entry_t * clear_ptr = NULL;
+ H5C2_cache_entry_t * entry_ptr = NULL;
+
+ FUNC_ENTER_NOAPI(H5C2_mark_entries_as_clean, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+ HDassert( ce_array_len > 0 );
+ HDassert( ce_array_ptr != NULL );
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+ if ( H5C2_validate_lru_list(cache_ptr) < 0 ) {
+
+ HDassert(0);
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "LRU sanity check failed.\n");
+ }
+#endif /* H5C2_DO_EXTREME_SANITY_CHECKS */
+
+ for ( i = 0; i < ce_array_len; i++ )
+ {
+ addr = ce_array_ptr[i];
+
+#if H5C2_DO_SANITY_CHECKS
+ if ( i == 0 ) {
+
+ last_addr = addr;
+
+ } else {
+
+ if ( last_addr == addr ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Duplicate entry in cleaned list.\n");
+
+ } else if ( last_addr > addr ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "cleaned list not sorted.\n");
+ }
+ }
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+ if ( H5C2_validate_lru_list(cache_ptr) < 0 ) {
+
+ HDassert(0);
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "LRU sanity check failed.\n");
+ }
+#endif /* H5C2_DO_EXTREME_SANITY_CHECKS */
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+ HDassert( H5F_addr_defined(addr) );
+
+ H5C2__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
+
+ if ( entry_ptr == NULL ) {
+#if H5C2_DO_SANITY_CHECKS
+ HDfprintf(stdout,
+ "H5C2_mark_entries_as_clean: entry[%d] = %ld not in cache.\n",
+ (int)i,
+ (long)addr);
+#endif /* H5C2_DO_SANITY_CHECKS */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Listed entry not in cache?!?!?.")
+
+ } else if ( ! entry_ptr->is_dirty ) {
+
+#if H5C2_DO_SANITY_CHECKS
+ HDfprintf(stdout,
+ "H5C2_mark_entries_as_clean: entry %ld is not dirty!?!\n",
+ (long)addr);
+#endif /* H5C2_DO_SANITY_CHECKS */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Listed entry not dirty?!?!?.")
+#if 0 /* original code */
+ } else if ( entry_ptr->is_protected ) {
+
+ entry_ptr->clear_on_unprotect = TRUE;
+
+ } else {
+
+ if ( H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ addr,
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ TRUE) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
+ }
+ }
+#else /* modified code */
+ } else {
+ /* Mark the entry to be cleared on unprotect. We will
+ * scan the LRU list shortly, and clear all those entries
+ * not currently protected.
+ */
+ entry_ptr->clear_on_unprotect = TRUE;
+#if H5C2_DO_SANITY_CHECKS
+ if ( entry_ptr->is_protected ) {
+
+ protected_entries_marked++;
+
+ } else if ( entry_ptr->is_pinned ) {
+
+ pinned_entries_marked++;
+
+ } else {
+
+ other_entries_marked++;
+ }
+#endif /* H5C2_DO_SANITY_CHECKS */
+ }
+#endif /* end modified code */
+ }
+#if 1 /* modified code */
+ /* Scan through the LRU list from back to front, and flush the
+ * entries whose clear_on_unprotect flags are set. Observe that
+ * any protected entries will not be on the LRU, and therefore
+ * will not be flushed at this time.
+ */
+
+ entries_cleared = 0;
+ entries_examined = 0;
+ initial_list_len = cache_ptr->LRU_list_len;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+
+ while ( ( entry_ptr != NULL ) &&
+ ( entries_examined <= initial_list_len ) &&
+ ( entries_cleared < ce_array_len ) )
+ {
+ if ( entry_ptr->clear_on_unprotect ) {
+
+ entry_ptr->clear_on_unprotect = FALSE;
+ clear_ptr = entry_ptr;
+ entry_ptr = entry_ptr->prev;
+ entries_cleared++;
+
+ if ( H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ clear_ptr->type,
+ clear_ptr->addr,
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ TRUE) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
+ }
+ } else {
+
+ entry_ptr = entry_ptr->prev;
+ }
+ entries_examined++;
+ }
+
+#if H5C2_DO_SANITY_CHECKS
+ HDassert( entries_cleared == other_entries_marked );
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+ /* It is also possible that some of the cleared entries are on the
+ * pinned list. Must scan that also.
+ */
+
+ entry_ptr = cache_ptr->pel_head_ptr;
+
+ while ( entry_ptr != NULL )
+ {
+ if ( entry_ptr->clear_on_unprotect ) {
+
+ entry_ptr->clear_on_unprotect = FALSE;
+ clear_ptr = entry_ptr;
+ entry_ptr = entry_ptr->next;
+ entries_cleared++;
+
+ if ( H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ clear_ptr->type,
+ clear_ptr->addr,
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ TRUE) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
+ }
+ } else {
+
+ entry_ptr = entry_ptr->next;
+ }
+ }
+
+#if H5C2_DO_SANITY_CHECKS
+ HDassert( entries_cleared == pinned_entries_marked + other_entries_marked );
+ HDassert( entries_cleared + protected_entries_marked == ce_array_len );
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+ HDassert( ( entries_cleared == ce_array_len ) ||
+ ( (ce_array_len - entries_cleared) <= cache_ptr->pl_len ) );
+
+#if H5C2_DO_SANITY_CHECKS
+ i = 0;
+ entry_ptr = cache_ptr->pl_head_ptr;
+ while ( entry_ptr != NULL )
+ {
+ if ( entry_ptr->clear_on_unprotect ) {
+
+ i++;
+ }
+ entry_ptr = entry_ptr->next;
+ }
+ HDassert( (entries_cleared + i) == ce_array_len );
+#endif /* H5C2_DO_SANITY_CHECKS */
+#endif /* modified code */
+
+done:
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+ if ( H5C2_validate_lru_list(cache_ptr) < 0 ) {
+
+ HDassert(0);
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "LRU sanity check failed.\n");
+ }
+#endif /* H5C2_DO_EXTREME_SANITY_CHECKS */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_mark_entries_as_clean() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_mark_pinned_entry_dirty
+ *
+ * Purpose: Mark a pinned entry as dirty. The target entry MUST be
+ * be pinned, and MUST be unprotected.
+ *
+ * If the entry has changed size, the function updates
+ * data structures for the size change.
+ *
+ * If the entry is not already dirty, the function places
+ * the entry on the skip list.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 3/22/06
+ *
+ * Modifications:
+ *
+ * None
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C2_mark_pinned_entry_dirty(H5C2_t * cache_ptr,
+ void * thing,
+ hbool_t size_changed,
+ size_t new_size)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5C2_cache_entry_t * entry_ptr;
+
+ FUNC_ENTER_NOAPI(H5C2_mark_pinned_entry_dirty, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( thing );
+ HDassert( ( size_changed == TRUE ) || ( size_changed == FALSE ) );
+
+ entry_ptr = (H5C2_cache_entry_t *)thing;
+
+ if ( ! ( entry_ptr->is_pinned ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
+ "Entry isn't pinned??")
+ }
+
+ if ( entry_ptr->is_protected ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
+ "Entry is protected??")
+ }
+
+ /* mark the entry as dirty if it isn't already */
+ entry_ptr->is_dirty = TRUE;
+
+ /* update for change in entry size if necessary */
+ if ( ( size_changed ) && ( entry_ptr->size != new_size ) ) {
+
+ /* update the protected entry list */
+ H5C2__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pel_len), \
+ (cache_ptr->pel_size), \
+ (entry_ptr->size), (new_size));
+
+ /* update the hash table */
+ H5C2__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size),\
+ (new_size));
+
+ /* if the entry is in the skip list, update that too */
+ if ( entry_ptr->in_slist ) {
+
+ H5C2__UPDATE_SLIST_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size),\
+ (new_size));
+ }
+
+ /* update statistics just before changing the entry size */
+ H5C2__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE((cache_ptr), (entry_ptr), \
+ (new_size));
+
+ /* finally, update the entry size proper */
+ entry_ptr->size = new_size;
+ }
+
+ if ( ! (entry_ptr->in_slist) ) {
+
+ H5C2__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+ }
+
+ H5C2__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_mark_pinned_entry_dirty() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_mark_pinned_or_protected_entry_dirty
+ *
+ * Purpose: Mark a pinned or protected entry as dirty. The target entry
+ * MUST be either pinned or protected, and MAY be both.
+ *
+ * At present, this funtion does not support size change.
+ *
+ * In the protected case, this call is the functional
+ * equivalent of setting the H5C2__DIRTIED_FLAG on an unprotect
+ * call.
+ *
+ * In the pinned but not protected case, if the entry is not
+ * already dirty, the function places function marks the entry
+ * dirty and places it on the skip list.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 5/15/06
+ *
+ * Modifications:
+ *
+ * JRM -- 3/29/07
+ * Added sanity check to verify that the pinned entry
+ * is not protected read only.
+ *
+ * This sanity check is commented out for now -- uncomment
+ * it once we deal with the problem of entries being protected
+ * read only, and then dirtied.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C2_mark_pinned_or_protected_entry_dirty(H5C2_t * cache_ptr,
+ void * thing)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5C2_cache_entry_t * entry_ptr;
+
+ FUNC_ENTER_NOAPI(H5C2_mark_pinned_or_protected_entry_dirty, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( thing );
+
+ entry_ptr = (H5C2_cache_entry_t *)thing;
+
+ if ( entry_ptr->is_protected ) {
+#if 1 /* JRM - uncomment this when possible */
+ HDassert( ! ((entry_ptr)->is_read_only) );
+#endif
+ /* set the dirtied flag */
+ entry_ptr->dirtied = TRUE;
+
+ } else if ( entry_ptr->is_pinned ) {
+
+ /* mark the entry as dirty if it isn't already */
+ entry_ptr->is_dirty = TRUE;
+
+
+ if ( ! (entry_ptr->in_slist) ) {
+
+ H5C2__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+ }
+
+ H5C2__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
+
+ } else {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
+ "Entry is neither pinned nor protected??")
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_mark_pinned_or_protected_entry_dirty() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2_rename_entry
+ *
+ * Purpose: Use this function to notify the cache that an entry's
+ * file address changed.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ * Modifications:
+ *
+ * JRM -- 7/21/04
+ * Updated function for the addition of the hash table.
+ *
+ * JRM -- 6/6/05
+ * Updated function to force all renamed entries to be
+ * dirty. This is part of a series of code modifications
+ * moving management of the is_dirty field of
+ * H5C2_cache_entry_t into the H5C2 code.
+ *
+ * JRM -- 4/3/06
+ * Updated function to disallow renaming of pinned entries.
+ *
+ * JRM -- 4/27/06
+ * Updated function to support renaming of pinned entries.
+ *
+ * JRM -- 8/24/06
+ * Updated function to refrain from alterning the index, the
+ * replacement policy data structures, and skip list when
+ * the function is called within the flush callback for the
+ * target entry and the target entry is being destroyed.
+ *
+ * Note that in this case H5C2_flush_single_entry() will handle
+ * all these details for us.
+ *
+ * JRM -- 10/13/07
+ * Renames of the target entry in a serialize callback are
+ * now handled by H5C2_flush_single_entry() -- hence the above
+ * modification is now obsolete.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C2_rename_entry(H5C2_t * cache_ptr,
+ const H5C2_class_t * type,
+ haddr_t old_addr,
+ haddr_t new_addr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ hbool_t was_dirty;
+ H5C2_cache_entry_t * entry_ptr = NULL;
+ H5C2_cache_entry_t * test_entry_ptr = NULL;
+#if H5C2_DO_SANITY_CHECKS
+ hbool_t removed_entry_from_slist = FALSE;
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+ FUNC_ENTER_NOAPI(H5C2_rename_entry, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( type );
+ HDassert( H5F_addr_defined(old_addr) );
+ HDassert( H5F_addr_defined(new_addr) );
+ HDassert( H5F_addr_ne(old_addr, new_addr) );
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+ if ( H5C2_validate_lru_list(cache_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "LRU sanity check failed.\n");
+ }
+#endif /* H5C2_DO_EXTREME_SANITY_CHECKS */
+
+ H5C2__SEARCH_INDEX(cache_ptr, old_addr, entry_ptr, FAIL)
+
+ if ( ( entry_ptr == NULL ) || ( entry_ptr->type != type ) ) {
+
+ /* the old item doesn't exist in the cache, so we are done. */
+ HGOTO_DONE(SUCCEED)
+ }
+
+ HDassert( entry_ptr->addr == old_addr );
+ HDassert( entry_ptr->type == type );
+
+ if ( entry_ptr->is_protected ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRENAME, FAIL, \
+ "Target entry is protected.")
+ }
+
+ H5C2__SEARCH_INDEX(cache_ptr, new_addr, test_entry_ptr, FAIL)
+
+ if ( test_entry_ptr != NULL ) { /* we are hosed */
+
+ if ( test_entry_ptr->type == type ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRENAME, FAIL, \
+ "Target already renamed & reinserted???.")
+
+ } else {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRENAME, FAIL, \
+ "New address already in use?.")
+
+ }
+ }
+
+ /* If we get this far we have work to do. Remove *entry_ptr from
+ * the hash table (and skip list if necessary), change its address to the
+ * new address, mark it as dirty (if it isn't already) and then re-insert.
+ *
+ * Update the replacement policy for a hit to avoid an eviction before
+ * the renamed entry is touched. Update stats for a rename.
+ *
+ * Note that we do not check the size of the cache, or evict anything.
+ * Since this is a simple re-name, cache size should be unaffected.
+ *
+ * Check to see if the target entry is in the process of being destroyed
+ * before we delete from the index, etc. If it is, all we do is
+ * change the addr. If the entry is only in the process of being flushed,
+ * don't mark it as dirty either, lest we confuse the flush call back.
+ */
+
+ if ( ! ( entry_ptr->destroy_in_progress ) ) {
+
+ H5C2__DELETE_FROM_INDEX(cache_ptr, entry_ptr)
+
+ if ( entry_ptr->in_slist ) {
+
+ HDassert( cache_ptr->slist_ptr );
+
+ H5C2__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr)
+
+#if H5C2_DO_SANITY_CHECKS
+
+ removed_entry_from_slist = TRUE;
+
+#endif /* H5C2_DO_SANITY_CHECKS */
+ }
+ }
+
+ entry_ptr->addr = new_addr;
+
+ if ( ! ( entry_ptr->destroy_in_progress ) ) {
+
+ was_dirty = entry_ptr->is_dirty;
+
+ if ( ! ( entry_ptr->flush_in_progress ) ) {
+
+ entry_ptr->is_dirty = TRUE;
+ }
+
+ H5C2__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
+
+ if ( ! ( entry_ptr->flush_in_progress ) ) {
+
+ H5C2__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+
+#if H5C2_DO_SANITY_CHECKS
+
+ if ( removed_entry_from_slist ) {
+
+ /* we just removed the entry from the slist. Thus we
+ * must touch up cache_ptr->slist_len_increase and
+ * cache_ptr->slist_size_increase to keep from skewing
+ * the sanity checks.
+ */
+ HDassert( cache_ptr->slist_len_increase > 1 );
+ HDassert( cache_ptr->slist_size_increase > entry_ptr->size );
+
+ cache_ptr->slist_len_increase -= 1;
+ cache_ptr->slist_size_increase -= entry_ptr->size;
+ }
+
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+ H5C2__UPDATE_RP_FOR_RENAME(cache_ptr, entry_ptr, was_dirty, FAIL)
+ }
+ }
+
+ H5C2__UPDATE_STATS_FOR_RENAME(cache_ptr, entry_ptr)
+
+done:
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+ if ( H5C2_validate_lru_list(cache_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "LRU sanity check failed.\n");
+ }
+#endif /* H5C2_DO_EXTREME_SANITY_CHECKS */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_rename_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_resize_pinned_entry
+ *
+ * Purpose: Resize a pinned entry. The target entry MUST be
+ * be pinned, and MUST not be unprotected.
+ *
+ * Resizing an entry dirties it, so if the entry is not
+ * already dirty, the function places the entry on the
+ * skip list.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 7/5/06
+ *
+ * Modifications:
+ *
+ * None
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C2_resize_pinned_entry(H5C2_t * cache_ptr,
+ void * thing,
+ size_t new_size)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5C2_cache_entry_t * entry_ptr;
+
+ FUNC_ENTER_NOAPI(H5C2_resize_pinned_entry, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( thing );
+
+ entry_ptr = (H5C2_cache_entry_t *)thing;
+
+ if ( new_size <= 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, \
+ "New size is non-positive.")
+ }
+
+ if ( ! ( entry_ptr->is_pinned ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, \
+ "Entry isn't pinned??")
+ }
+
+ if ( entry_ptr->is_protected ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, \
+ "Entry is protected??")
+ }
+
+ /* resizing dirties entries -- mark the entry as dirty if it
+ * isn't already
+ */
+ entry_ptr->is_dirty = TRUE;
+
+ /* update for change in entry size if necessary */
+ if ( entry_ptr->size != new_size ) {
+
+ /* update the protected entry list */
+ H5C2__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pel_len), \
+ (cache_ptr->pel_size), \
+ (entry_ptr->size), (new_size));
+
+ /* update the hash table */
+ H5C2__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size),\
+ (new_size));
+
+ /* if the entry is in the skip list, update that too */
+ if ( entry_ptr->in_slist ) {
+
+ H5C2__UPDATE_SLIST_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size),\
+ (new_size));
+ }
+
+ /* update statistics just before changing the entry size */
+ H5C2__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE((cache_ptr), (entry_ptr), \
+ (new_size));
+
+ /* finally, update the entry size proper */
+ entry_ptr->size = new_size;
+ }
+
+ if ( ! (entry_ptr->in_slist) ) {
+
+ H5C2__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+ }
+
+ H5C2__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_resize_pinned_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_pin_protected_entry()
+ *
+ * Purpose: Pin a protected cache entry. The entry must be protected
+ * at the time of call, and must be unpinned.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 4/26/06
+ *
+ * Modifications:
+ *
+ * JRM -- 4/26/06
+ * Modified routine to allow it to operate on protected
+ * entries.
+ *
+ * JRM -- 2/16/07
+ * Added conditional compile to avoid unused parameter
+ * warning in production compile.
+ *
+ * JRM -- 4/4/07
+ * Fixed typo -- canged macro call to
+ * H5C2__UPDATE_STATS_FOR_UNPIN to call to
+ * H5C2__UPDATE_STATS_FOR_PIN.
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef NDEBUG
+herr_t
+H5C2_pin_protected_entry(H5C2_t * cache_ptr,
+ void * thing)
+#else
+herr_t
+H5C2_pin_protected_entry(H5C2_t UNUSED * cache_ptr,
+ void * thing)
+#endif
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5C2_cache_entry_t * entry_ptr;
+
+ FUNC_ENTER_NOAPI(H5C2_pin_protected_entry, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( thing );
+
+ entry_ptr = (H5C2_cache_entry_t *)thing;
+
+ HDassert( H5F_addr_defined(entry_ptr->addr) );
+
+ if ( ! ( entry_ptr->is_protected ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Entry isn't protected")
+ }
+
+ if ( entry_ptr->is_pinned ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Entry is already pinned")
+ }
+
+ entry_ptr->is_pinned = TRUE;
+
+ H5C2__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr)
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_pin_protected_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_protect
+ *
+ * Purpose: If the target entry is not in the cache, load it. If
+ * necessary, attempt to evict one or more entries to keep
+ * the cache within its maximum size.
+ *
+ * Mark the target entry as protected, and return its address
+ * to the caller. The caller must call H5C2_unprotect() when
+ * finished with the entry.
+ *
+ * While it is protected, the entry may not be either evicted
+ * or flushed -- nor may it be accessed by another call to
+ * H5C2_protect. Any attempt to do so will result in a failure.
+ *
+ * The primary_dxpl_id and secondary_dxpl_id parameters
+ * specify the dxpl_ids used on the first write occasioned
+ * by the insertion (primary_dxpl_id), and on all subsequent
+ * writes (secondary_dxpl_id). This is useful in the
+ * metadata cache, but may not be needed elsewhere. If so,
+ * just use the same dxpl_id for both parameters.
+ *
+ * All reads are performed with the primary_dxpl_id.
+ *
+ * Similarly, the primary_dxpl_id is passed to the
+ * check_write_permitted function if it is called.
+ *
+ * Return: Success: Ptr to the desired entry
+ *
+ * Failure: NULL
+ *
+ * Programmer: John Mainzer - 6/2/04
+ *
+ * Modifications:
+ *
+ * JRM -- 7/21/04
+ * Updated for the addition of the hash table.
+ *
+ * JRM -- 10/28/04
+ * Added code to set cache_full to TRUE whenever we try to
+ * make space in the cache.
+ *
+ * JRM -- 11/12/04
+ * Added code to call to H5C2_make_space_in_cache()
+ * after the call to H5C2__auto_adjust_cache_size() if that
+ * function sets the size_decreased flag is TRUE.
+ *
+ * JRM -- 4/25/05
+ * The size_decreased flag can also be set to TRUE in
+ * H5C2_set_cache_auto_resize_config() if a new configuration
+ * forces an immediate reduction in cache size. Modified
+ * the code to deal with this eventuallity.
+ *
+ * JRM -- 6/24/05
+ * Added support for the new write_permitted field of H5C2_t.
+ *
+ * JRM -- 10/22/05
+ * Hand optimizations.
+ *
+ * JRM -- 5/3/06
+ * Added code to set the new dirtied field in
+ * H5C2_cache_entry_t to FALSE prior to return.
+ *
+ * JRM -- 6/23/06
+ * Modified code to allow dirty entries to be loaded from
+ * disk. This is necessary as a bug fix in the object
+ * header code requires us to modify a header as it is read.
+ *
+ * JRM -- 3/28/07
+ * Added the flags parameter and supporting code. At least
+ * for now, this parameter is used to allow the entry to
+ * be protected read only, thus allowing multiple protects.
+ *
+ * Also added code to allow multiple read only protects
+ * of cache entries.
+ *
+ * JRM -- 7/27/07
+ * Added code supporting the new evictions_enabled fieled
+ * in H5C2_t.
+ *
+ * JRM -- 7/11/07
+ * Revised function for the new metadata cache API. The
+ * function lost its point to H5F_t (now supplied via
+ * *cache_ptr), one of its dxpl ids. and one of the udata
+ * fields. Gained the len parameter. Also internal
+ * changes supporting the revised API.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void *
+H5C2_protect(H5C2_t * cache_ptr,
+ hid_t dxpl_id,
+ const H5C2_class_t * type,
+ haddr_t addr,
+ size_t len,
+ const void * udata,
+ unsigned flags)
+{
+ /* const char * fcn_name = "H5C2_protect()"; */
+ hbool_t hit;
+ hbool_t first_flush;
+ hbool_t have_write_permitted = FALSE;
+ hbool_t read_only = FALSE;
+ hbool_t write_permitted;
+ hbool_t chk_len = FALSE;
+ herr_t result;
+ void * thing;
+ H5C2_cache_entry_t * entry_ptr;
+ void * ret_value; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_protect, NULL)
+
+ /* check args */
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( cache_ptr->skip_file_checks || cache_ptr->f );
+ HDassert( type );
+ HDassert( H5F_addr_defined(addr) );
+ HDassert( len > 0 );
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+ if ( H5C2_validate_lru_list(cache_ptr) < 0 ) {
+
+ HDassert(0);
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, \
+ "LRU sanity check failed.\n");
+ }
+#endif /* H5C2_DO_EXTREME_SANITY_CHECKS */
+
+ if ( (flags & H5C2__READ_ONLY_FLAG) != 0 )
+ {
+ read_only = TRUE;
+ }
+
+ if ( (flags & H5C2__CHECK_SIZE_FLAG) != 0 )
+ {
+ chk_len = TRUE;
+ }
+
+ /* first check to see if the target is in cache */
+ H5C2__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL)
+
+ if ( entry_ptr != NULL ) {
+
+ hit = TRUE;
+ thing = (void *)entry_ptr;
+
+ } else {
+
+ /* must try to load the entry from disk. */
+
+ hit = FALSE;
+
+ thing = H5C2_load_entry(cache_ptr->f, dxpl_id, type,
+ addr, len, chk_len, udata);
+
+ if ( thing == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry")
+ }
+
+ entry_ptr = (H5C2_cache_entry_t *)thing;
+
+ /* try to free up some space if necessary and if
+ * evictions are permitted
+ */
+ if ( ( cache_ptr->evictions_enabled ) &&
+ ( (cache_ptr->index_size + entry_ptr->size) >
+ cache_ptr->max_cache_size ) ) {
+
+ size_t space_needed;
+
+ cache_ptr->cache_full = TRUE;
+
+ if ( cache_ptr->check_write_permitted != NULL ) {
+
+ result = (cache_ptr->check_write_permitted)(cache_ptr->f,
+ dxpl_id,
+ &write_permitted);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
+ "Can't get write_permitted 1")
+
+ } else {
+
+ have_write_permitted = TRUE;
+
+ first_flush = TRUE;
+ }
+ } else {
+
+ write_permitted = cache_ptr->write_permitted;
+
+ have_write_permitted = TRUE;
+
+ first_flush = TRUE;
+ }
+
+ HDassert( entry_ptr->size <= H5C2_MAX_ENTRY_SIZE );
+
+ space_needed = entry_ptr->size;
+
+ if ( space_needed > cache_ptr->max_cache_size ) {
+
+ space_needed = cache_ptr->max_cache_size;
+ }
+
+ /* Note that space_needed is just the amount of space that
+ * needed to insert the new entry without exceeding the cache
+ * size limit. The subsequent call to H5C2_make_space_in_cache()
+ * may evict the entries required to free more or less space
+ * depending on conditions. It MAY be less if the cache is
+ * currently undersized, or more if the cache is oversized.
+ *
+ * The cache can exceed its maximum size limit via the following
+ * mechanisms:
+ *
+ * First, it is possible for the cache to grow without
+ * bound as long as entries are protected and not unprotected.
+ *
+ * Second, when writes are not permitted it is also possible
+ * for the cache to grow without bound.
+ *
+ * Third, the user may choose to disable evictions -- causing
+ * the cache to grow without bound until evictions are
+ * re-enabled.
+ *
+ * Finally, we usually don't check to see if the cache is
+ * oversized at the end of an unprotect. As a result, it is
+ * possible to have a vastly oversized cache with no protected
+ * entries as long as all the protects preceed the unprotects.
+ *
+ * Since items 1, 2, and 3 are not changing any time soon, I
+ * see no point in worrying about the fourth.
+ */
+
+ result = H5C2_make_space_in_cache(dxpl_id, cache_ptr,
+ space_needed, write_permitted);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
+ "H5C2_make_space_in_cache failed 1.")
+ }
+ }
+
+ /* Insert the entry in the hash table. It can't be dirty yet, so
+ * we don't even check to see if it should go in the skip list.
+ *
+ * This is no longer true -- due to a bug fix, we may modify
+ * data on load to repair a file.
+ */
+ H5C2__INSERT_IN_INDEX(cache_ptr, entry_ptr, NULL)
+
+ if ( ( entry_ptr->is_dirty ) && ( ! (entry_ptr->in_slist) ) ) {
+
+ H5C2__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, NULL)
+ }
+
+ /* insert the entry in the data structures used by the replacement
+ * policy. We are just going to take it out again when we update
+ * the replacement policy for a protect, but this simplifies the
+ * code. If we do this often enough, we may want to optimize this.
+ */
+ H5C2__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, NULL)
+ }
+
+ HDassert( entry_ptr->addr == addr );
+ HDassert( entry_ptr->type == type );
+
+ if ( entry_ptr->is_protected ) {
+
+ if ( ( read_only ) && ( entry_ptr->is_read_only ) ) {
+
+ HDassert( entry_ptr->ro_ref_count > 0 );
+
+ (entry_ptr->ro_ref_count)++;
+
+ } else {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
+ "Target already protected & not read only?!?.")
+ }
+ } else {
+
+ H5C2__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, NULL)
+
+ entry_ptr->is_protected = TRUE;
+
+ if ( read_only ) {
+
+ entry_ptr->is_read_only = TRUE;
+ entry_ptr->ro_ref_count = 1;
+ }
+
+ entry_ptr->dirtied = FALSE;
+ }
+
+ H5C2__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit)
+
+ H5C2__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit)
+
+ ret_value = thing;
+
+ if ( ( cache_ptr->evictions_enabled ) &&
+ ( ( cache_ptr->size_decreased ) ||
+ ( ( cache_ptr->resize_enabled ) &&
+ ( cache_ptr->cache_accesses >=
+ (cache_ptr->resize_ctl).epoch_length ) ) ) ) {
+
+ if ( ! have_write_permitted ) {
+
+ if ( cache_ptr->check_write_permitted != NULL ) {
+
+ result = (cache_ptr->check_write_permitted)(cache_ptr->f,
+ dxpl_id,
+ &write_permitted);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
+ "Can't get write_permitted 2")
+
+ } else {
+
+ have_write_permitted = TRUE;
+
+ first_flush = TRUE;
+ }
+ } else {
+
+ write_permitted = cache_ptr->write_permitted;
+
+ have_write_permitted = TRUE;
+
+ first_flush = TRUE;
+ }
+ }
+
+ if ( ( cache_ptr->resize_enabled ) &&
+ ( cache_ptr->cache_accesses >=
+ (cache_ptr->resize_ctl).epoch_length ) ) {
+
+ result = H5C2__auto_adjust_cache_size(cache_ptr,
+ dxpl_id,
+ write_permitted);
+ if ( result != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
+ "Cache auto-resize failed.")
+ }
+ }
+
+ if ( cache_ptr->size_decreased ) {
+
+ cache_ptr->size_decreased = FALSE;
+
+ /* check to see if the cache is now oversized due to the cache
+ * size reduction. If it is, try to evict enough entries to
+ * bring the cache size down to the current maximum cache size.
+ */
+ if ( cache_ptr->index_size > cache_ptr->max_cache_size ) {
+
+ cache_ptr->cache_full = TRUE;
+
+ result = H5C2_make_space_in_cache(dxpl_id,
+ cache_ptr,
+ (size_t)0,
+ write_permitted);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
+ "H5C2_make_space_in_cache failed 2.")
+ }
+ }
+ }
+ }
+
+done:
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+ if ( H5C2_validate_lru_list(cache_ptr) < 0 ) {
+
+ HDassert(0);
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, \
+ "LRU sanity check failed.\n");
+ }
+#endif /* H5C2_DO_EXTREME_SANITY_CHECKS */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_protect() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2_reset_cache_hit_rate_stats()
+ *
+ * Purpose: Reset the cache hit rate computation fields.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer, 10/5/04
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C2_reset_cache_hit_rate_stats(H5C2_t * cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_reset_cache_hit_rate_stats, FAIL)
+
+ if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.")
+ }
+
+ cache_ptr->cache_hits = 0;
+ cache_ptr->cache_accesses = 0;
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_reset_cache_hit_rate_stats() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_set_cache_auto_resize_config
+ *
+ * Purpose: Set the cache automatic resize configuration to the
+ * provided values if they are in range, and fail if they
+ * are not.
+ *
+ * If the new configuration enables automatic cache resizing,
+ * coerce the cache max size and min clean size into agreement
+ * with the new policy and re-set the full cache hit rate
+ * stats.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 10/8/04
+ *
+ * Modifications:
+ *
+ * JRM -- 11/18/04
+ * Reworked function to match major changes in
+ * H5C2_auto_size_ctl_t.
+ *
+ * JRM -- 4/25/05
+ * Added code to set cache_ptr->size_decreased to TRUE
+ * if the new configuration forces an immediate reduction
+ * in cache size.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C2_set_cache_auto_resize_config(H5C2_t * cache_ptr,
+ H5C2_auto_size_ctl_t *config_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ herr_t result;
+ size_t new_max_cache_size;
+ size_t new_min_clean_size;
+
+ FUNC_ENTER_NOAPI(H5C2_set_cache_auto_resize_config, FAIL)
+
+ if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.")
+ }
+
+ if ( config_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry.")
+ }
+
+ if ( config_ptr->version != H5C2__CURR_AUTO_SIZE_CTL_VER ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version.")
+ }
+
+ /* check general configuration section of the config: */
+ if ( SUCCEED != H5C2_validate_resize_config(config_ptr,
+ H5C2_RESIZE_CFG__VALIDATE_GENERAL) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, \
+ "error in general configuration fields of new config.")
+ }
+
+ /* check size increase control fields of the config: */
+ if ( SUCCEED != H5C2_validate_resize_config(config_ptr,
+ H5C2_RESIZE_CFG__VALIDATE_INCREMENT) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, \
+ "error in the size increase control fields of new config.")
+ }
+
+ /* check size decrease control fields of the config: */
+ if ( SUCCEED != H5C2_validate_resize_config(config_ptr,
+ H5C2_RESIZE_CFG__VALIDATE_DECREMENT) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, \
+ "error in the size decrease control fields of new config.")
+ }
+
+ /* check for conflicts between size increase and size decrease controls: */
+ if ( SUCCEED != H5C2_validate_resize_config(config_ptr,
+ H5C2_RESIZE_CFG__VALIDATE_INTERACTIONS) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, \
+ "conflicting threshold fields in new config.")
+ }
+
+ cache_ptr->size_increase_possible = TRUE; /* will set to FALSE if needed */
+ cache_ptr->size_decrease_possible = TRUE; /* will set to FALSE if needed */
+
+ switch ( config_ptr->incr_mode )
+ {
+ case H5C2_incr__off:
+ cache_ptr->size_increase_possible = FALSE;
+ break;
+
+ case H5C2_incr__threshold:
+ if ( ( config_ptr->lower_hr_threshold <= 0.0 ) ||
+ ( config_ptr->increment <= 1.0 ) ||
+ ( ( config_ptr->apply_max_increment ) &&
+ ( config_ptr->max_increment <= 0 ) ) ) {
+
+ cache_ptr->size_increase_possible = FALSE;
+ }
+ break;
+
+ default: /* should be unreachable */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown incr_mode?!?!?.")
+ }
+
+ switch ( config_ptr->decr_mode )
+ {
+ case H5C2_decr__off:
+ cache_ptr->size_decrease_possible = FALSE;
+ break;
+
+ case H5C2_decr__threshold:
+ if ( ( config_ptr->upper_hr_threshold >= 1.0 ) ||
+ ( config_ptr->decrement >= 1.0 ) ||
+ ( ( config_ptr->apply_max_decrement ) &&
+ ( config_ptr->max_decrement <= 0 ) ) ) {
+
+ cache_ptr->size_decrease_possible = FALSE;
+ }
+ break;
+
+ case H5C2_decr__age_out:
+ if ( ( ( config_ptr->apply_empty_reserve ) &&
+ ( config_ptr->empty_reserve >= 1.0 ) ) ||
+ ( ( config_ptr->apply_max_decrement ) &&
+ ( config_ptr->max_decrement <= 0 ) ) ) {
+
+ cache_ptr->size_decrease_possible = FALSE;
+ }
+ break;
+
+ case H5C2_decr__age_out_with_threshold:
+ if ( ( ( config_ptr->apply_empty_reserve ) &&
+ ( config_ptr->empty_reserve >= 1.0 ) ) ||
+ ( ( config_ptr->apply_max_decrement ) &&
+ ( config_ptr->max_decrement <= 0 ) ) ||
+ ( config_ptr->upper_hr_threshold >= 1.0 ) ) {
+
+ cache_ptr->size_decrease_possible = FALSE;
+ }
+ break;
+
+ default: /* should be unreachable */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown decr_mode?!?!?.")
+ }
+
+ if ( config_ptr->max_size == config_ptr->min_size ) {
+
+ cache_ptr->size_increase_possible = FALSE;
+ cache_ptr->size_decrease_possible = FALSE;
+ }
+
+ cache_ptr->resize_enabled = cache_ptr->size_increase_possible ||
+ cache_ptr->size_decrease_possible;
+
+ cache_ptr->resize_ctl = *config_ptr;
+
+ /* Resize the cache to the supplied initial value if requested, or as
+ * necessary to force it within the bounds of the current automatic
+ * cache resizing configuration.
+ *
+ * Note that the min_clean_fraction may have changed, so we
+ * go through the exercise even if the current size is within
+ * range and an initial size has not been provided.
+ */
+ if ( (cache_ptr->resize_ctl).set_initial_size ) {
+
+ new_max_cache_size = (cache_ptr->resize_ctl).initial_size;
+ }
+ else if ( cache_ptr->max_cache_size > (cache_ptr->resize_ctl).max_size ) {
+
+ new_max_cache_size = (cache_ptr->resize_ctl).max_size;
+ }
+ else if ( cache_ptr->max_cache_size < (cache_ptr->resize_ctl).min_size ) {
+
+ new_max_cache_size = (cache_ptr->resize_ctl).min_size;
+
+ } else {
+
+ new_max_cache_size = cache_ptr->max_cache_size;
+ }
+
+ new_min_clean_size = (size_t)
+ ((double)new_max_cache_size *
+ ((cache_ptr->resize_ctl).min_clean_fraction));
+
+
+ /* since new_min_clean_size is of type size_t, we have
+ *
+ * ( 0 <= new_min_clean_size )
+ *
+ * by definition.
+ */
+ HDassert( new_min_clean_size <= new_max_cache_size );
+ HDassert( (cache_ptr->resize_ctl).min_size <= new_max_cache_size );
+ HDassert( new_max_cache_size <= (cache_ptr->resize_ctl).max_size );
+
+ if ( new_max_cache_size < cache_ptr->max_cache_size ) {
+
+ cache_ptr->size_decreased = TRUE;
+ }
+
+ cache_ptr->max_cache_size = new_max_cache_size;
+ cache_ptr->min_clean_size = new_min_clean_size;
+
+ if ( H5C2_reset_cache_hit_rate_stats(cache_ptr) != SUCCEED ) {
+
+ /* this should be impossible... */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C2_reset_cache_hit_rate_stats failed.")
+ }
+
+ /* remove excess epoch markers if any */
+ if ( ( config_ptr->decr_mode == H5C2_decr__age_out_with_threshold ) ||
+ ( config_ptr->decr_mode == H5C2_decr__age_out ) ) {
+
+ if ( cache_ptr->epoch_markers_active >
+ (cache_ptr->resize_ctl).epochs_before_eviction ) {
+
+ result =
+ H5C2__autoadjust__ageout__remove_excess_markers(cache_ptr);
+
+ if ( result != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "can't remove excess epoch markers.")
+ }
+ }
+ } else if ( cache_ptr->epoch_markers_active > 0 ) {
+
+ result = H5C2__autoadjust__ageout__remove_all_markers(cache_ptr);
+
+ if ( result != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "error removing all epoch markers.")
+ }
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_set_cache_auto_resize_config() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_set_evictions_enabled()
+ *
+ * Purpose: Set cache_ptr->evictions_enabled to the value of the
+ * evictions enabled parameter.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 7/27/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C2_set_evictions_enabled(H5C2_t * cache_ptr,
+ hbool_t evictions_enabled)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_set_evictions_enabled, FAIL)
+
+ if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.")
+ }
+
+ if ( ( evictions_enabled != TRUE ) && ( evictions_enabled != FALSE ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Bad evictions_enabled on entry.")
+ }
+
+ /* There is no fundamental reason why we should not permit
+ * evictions to be disabled while automatic resize is enabled.
+ * However, I can't think of any good reason why one would
+ * want to, and allowing it would greatly complicate testing
+ * the feature. Hence the following:
+ */
+ if ( ( evictions_enabled != TRUE ) &&
+ ( ( cache_ptr->resize_ctl.incr_mode != H5C2_incr__off ) ||
+ ( cache_ptr->resize_ctl.decr_mode != H5C2_decr__off ) ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Can't disable evictions when auto resize enabled.")
+ }
+
+ cache_ptr->evictions_enabled = evictions_enabled;
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_set_evictions_enabled() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_set_prefix
+ *
+ * Purpose: Set the values of the prefix field of H5C2_t. This
+ * filed is used to label some debugging output.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 1/20/06
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C2_set_prefix(H5C2_t * cache_ptr,
+ char * prefix)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_set_prefix, FAIL)
+
+ /* This would normally be an assert, but we need to use an HGOTO_ERROR
+ * call to shut up the compiler.
+ */
+ if ( ( ! cache_ptr ) || ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr")
+ }
+
+ HDassert( prefix );
+ HDassert( HDstrlen(prefix) < H5C2__PREFIX_LEN ) ;
+
+ HDstrcpy(&(cache_ptr->prefix[0]), prefix);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_set_prefix() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_set_skip_flags
+ *
+ * Purpose: Set the values of the skip sanity check flags.
+ *
+ * This function and the skip sanity check flags were created
+ * for the convenience of the test bed. However it is
+ * possible that there may be other uses for the flags.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/11/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C2_set_skip_flags(H5C2_t * cache_ptr,
+ hbool_t skip_file_checks,
+ hbool_t skip_dxpl_id_checks)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_set_skip_flags, FAIL)
+
+ /* This would normally be an assert, but we need to use an HGOTO_ERROR
+ * call to shut up the compiler.
+ */
+ if ( ( ! cache_ptr ) || ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr")
+ }
+
+ cache_ptr->skip_file_checks = skip_file_checks;
+ cache_ptr->skip_dxpl_id_checks = skip_dxpl_id_checks;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_set_skip_flags() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_set_trace_file_ptr
+ *
+ * Purpose: Set the trace_file_ptr field for the cache.
+ *
+ * This field must either be NULL (which turns of trace
+ * file logging), or be a pointer to an open file to which
+ * trace file data is to be written.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 1/20/06
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C2_set_trace_file_ptr(H5C2_t * cache_ptr,
+ FILE * trace_file_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_set_trace_file_ptr, FAIL)
+
+ /* This would normally be an assert, but we need to use an HGOTO_ERROR
+ * call to shut up the compiler.
+ */
+ if ( ( ! cache_ptr ) || ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr")
+ }
+
+ cache_ptr->trace_file_ptr = trace_file_ptr;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_set_trace_file_ptr() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_stats
+ *
+ * Purpose: Prints statistics about the cache.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ * Modifications:
+ *
+ * JRM -- 7/21/04
+ * Updated function for the addition of the hash table.
+ *
+ * JRM -- 9/8/05
+ * Updated function for the addition of cache entry size
+ * change statistics.
+ *
+ * JRM -- 1/13/06
+ * Added code to use the prefix field of H5C2_t to allow
+ * tagging of statistics output.
+ *
+ * JRM -- 3/21/06
+ * Added code supporting the pinned entry related stats.
+ *
+ * JRM -- 8/9/06
+ * More code supporting pinned entry related stats.
+ *
+ * JRM -- 8/23/06
+ * Added code supporting new flush related statistics.
+ *
+ * JRM -- 3/31/07
+ * Added code supporting the new write_protects,
+ * read_protects, and max_read_protects fields.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C2_stats(H5C2_t * cache_ptr,
+ const char * cache_name,
+ hbool_t
+#if !H5C2_COLLECT_CACHE_STATS
+ UNUSED
+#endif /* H5C2_COLLECT_CACHE_STATS */
+ display_detailed_stats)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+#if H5C2_COLLECT_CACHE_STATS
+ int i;
+ int64_t total_hits = 0;
+ int64_t total_misses = 0;
+ int64_t total_write_protects = 0;
+ int64_t total_read_protects = 0;
+ int64_t max_read_protects = 0;
+ int64_t total_insertions = 0;
+ int64_t total_pinned_insertions = 0;
+ int64_t total_clears = 0;
+ int64_t total_flushes = 0;
+ int64_t total_evictions = 0;
+ int64_t total_renames = 0;
+ int64_t total_entry_flush_renames = 0;
+ int64_t total_cache_flush_renames = 0;
+ int64_t total_size_increases = 0;
+ int64_t total_size_decreases = 0;
+ int64_t total_entry_flush_size_changes = 0;
+ int64_t total_cache_flush_size_changes = 0;
+ int64_t total_pins = 0;
+ int64_t total_unpins = 0;
+ int64_t total_dirty_pins = 0;
+ int64_t total_pinned_flushes = 0;
+ int64_t total_pinned_clears = 0;
+ int32_t aggregate_max_accesses = 0;
+ int32_t aggregate_min_accesses = 1000000;
+ int32_t aggregate_max_clears = 0;
+ int32_t aggregate_max_flushes = 0;
+ size_t aggregate_max_size = 0;
+ int32_t aggregate_max_pins = 0;
+ double hit_rate;
+ double average_successful_search_depth = 0.0;
+ double average_failed_search_depth = 0.0;
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+ FUNC_ENTER_NOAPI(H5C2_stats, FAIL)
+
+ /* This would normally be an assert, but we need to use an HGOTO_ERROR
+ * call to shut up the compiler.
+ */
+ if ( ( ! cache_ptr ) ||
+ ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ||
+ ( !cache_name ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr or cache_name")
+ }
+
+#if H5C2_COLLECT_CACHE_STATS
+
+ for ( i = 0; i <= cache_ptr->max_type_id; i++ ) {
+
+ total_hits += cache_ptr->hits[i];
+ total_misses += cache_ptr->misses[i];
+ total_write_protects += cache_ptr->write_protects[i];
+ total_read_protects += cache_ptr->read_protects[i];
+ if ( max_read_protects < cache_ptr->max_read_protects[i] ) {
+ max_read_protects = cache_ptr->max_read_protects[i];
+ }
+ total_insertions += cache_ptr->insertions[i];
+ total_pinned_insertions += cache_ptr->pinned_insertions[i];
+ total_clears += cache_ptr->clears[i];
+ total_flushes += cache_ptr->flushes[i];
+ total_evictions += cache_ptr->evictions[i];
+ total_renames += cache_ptr->renames[i];
+ total_entry_flush_renames
+ += cache_ptr->entry_flush_renames[i];
+ total_cache_flush_renames
+ += cache_ptr->cache_flush_renames[i];
+ total_size_increases += cache_ptr->size_increases[i];
+ total_size_decreases += cache_ptr->size_decreases[i];
+ total_entry_flush_size_changes
+ += cache_ptr->entry_flush_size_changes[i];
+ total_cache_flush_size_changes
+ += cache_ptr->cache_flush_size_changes[i];
+ total_pins += cache_ptr->pins[i];
+ total_unpins += cache_ptr->unpins[i];
+ total_dirty_pins += cache_ptr->dirty_pins[i];
+ total_pinned_flushes += cache_ptr->pinned_flushes[i];
+ total_pinned_clears += cache_ptr->pinned_clears[i];
+#if H5C2_COLLECT_CACHE_ENTRY_STATS
+ if ( aggregate_max_accesses < cache_ptr->max_accesses[i] )
+ aggregate_max_accesses = cache_ptr->max_accesses[i];
+ if ( aggregate_min_accesses > aggregate_max_accesses )
+ aggregate_min_accesses = aggregate_max_accesses;
+ if ( aggregate_min_accesses > cache_ptr->min_accesses[i] )
+ aggregate_min_accesses = cache_ptr->min_accesses[i];
+ if ( aggregate_max_clears < cache_ptr->max_clears[i] )
+ aggregate_max_clears = cache_ptr->max_clears[i];
+ if ( aggregate_max_flushes < cache_ptr->max_flushes[i] )
+ aggregate_max_flushes = cache_ptr->max_flushes[i];
+ if ( aggregate_max_size < cache_ptr->max_size[i] )
+ aggregate_max_size = cache_ptr->max_size[i];
+ if ( aggregate_max_pins < cache_ptr->max_pins[i] )
+ aggregate_max_pins = cache_ptr->max_pins[i];
+#endif /* H5C2_COLLECT_CACHE_ENTRY_STATS */
+ }
+
+ if ( ( total_hits > 0 ) || ( total_misses > 0 ) ) {
+
+ hit_rate = 100.0 * ((double)(total_hits)) /
+ ((double)(total_hits + total_misses));
+ } else {
+ hit_rate = 0.0;
+ }
+
+ if ( cache_ptr->successful_ht_searches > 0 ) {
+
+ average_successful_search_depth =
+ ((double)(cache_ptr->total_successful_ht_search_depth)) /
+ ((double)(cache_ptr->successful_ht_searches));
+ }
+
+ if ( cache_ptr->failed_ht_searches > 0 ) {
+
+ average_failed_search_depth =
+ ((double)(cache_ptr->total_failed_ht_search_depth)) /
+ ((double)(cache_ptr->failed_ht_searches));
+ }
+
+
+ HDfprintf(stdout, "\n%sH5C2: cache statistics for %s\n",
+ cache_ptr->prefix, cache_name);
+
+ HDfprintf(stdout, "\n");
+
+ HDfprintf(stdout,
+ "%s hash table insertion / deletions = %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->total_ht_insertions),
+ (long)(cache_ptr->total_ht_deletions));
+
+ HDfprintf(stdout,
+ "%s HT successful / failed searches = %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->successful_ht_searches),
+ (long)(cache_ptr->failed_ht_searches));
+
+ HDfprintf(stdout,
+ "%s Av. HT suc / failed search depth = %f / %f\n",
+ cache_ptr->prefix,
+ average_successful_search_depth,
+ average_failed_search_depth);
+
+ HDfprintf(stdout,
+ "%s current (max) index size / length = %ld (%ld) / %ld (%ld)\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->index_size),
+ (long)(cache_ptr->max_index_size),
+ (long)(cache_ptr->index_len),
+ (long)(cache_ptr->max_index_len));
+
+ HDfprintf(stdout,
+ "%s current (max) slist size / length = %ld (%ld) / %ld (%ld)\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->slist_size),
+ (long)(cache_ptr->max_slist_size),
+ (long)(cache_ptr->slist_len),
+ (long)(cache_ptr->max_slist_len));
+
+ HDfprintf(stdout,
+ "%s current (max) PL size / length = %ld (%ld) / %ld (%ld)\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->pl_size),
+ (long)(cache_ptr->max_pl_size),
+ (long)(cache_ptr->pl_len),
+ (long)(cache_ptr->max_pl_len));
+
+ HDfprintf(stdout,
+ "%s current (max) PEL size / length = %ld (%ld) / %ld (%ld)\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->pel_size),
+ (long)(cache_ptr->max_pel_size),
+ (long)(cache_ptr->pel_len),
+ (long)(cache_ptr->max_pel_len));
+
+ HDfprintf(stdout,
+ "%s current LRU list size / length = %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->LRU_list_size),
+ (long)(cache_ptr->LRU_list_len));
+
+ HDfprintf(stdout,
+ "%s current clean LRU size / length = %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->cLRU_list_size),
+ (long)(cache_ptr->cLRU_list_len));
+
+ HDfprintf(stdout,
+ "%s current dirty LRU size / length = %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->dLRU_list_size),
+ (long)(cache_ptr->dLRU_list_len));
+
+ HDfprintf(stdout,
+ "%s Total hits / misses / hit_rate = %ld / %ld / %f\n",
+ cache_ptr->prefix,
+ (long)total_hits,
+ (long)total_misses,
+ hit_rate);
+
+ HDfprintf(stdout,
+ "%s Total write / read (max) protects = %ld / %ld (%d)\n",
+ cache_ptr->prefix,
+ (long)total_write_protects,
+ (long)total_read_protects,
+ max_read_protects);
+
+ HDfprintf(stdout,
+ "%s Total clears / flushes / evictions = %ld / %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)total_clears,
+ (long)total_flushes,
+ (long)total_evictions);
+
+ HDfprintf(stdout,
+ "%s Total insertions(pinned) / renames = %ld(%ld) / %ld\n",
+ cache_ptr->prefix,
+ (long)total_insertions,
+ (long)total_pinned_insertions,
+ (long)total_renames);
+
+ HDfprintf(stdout,
+ "%s Total entry / cache flush renames = %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)total_entry_flush_renames,
+ (long)total_cache_flush_renames);
+
+ HDfprintf(stdout, "%s Total entry size incrs / decrs = %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)total_size_increases,
+ (long)total_size_decreases);
+
+ HDfprintf(stdout, "%s Ttl entry/cache flush size changes = %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)total_entry_flush_size_changes,
+ (long)total_cache_flush_size_changes);
+
+ HDfprintf(stdout,
+ "%s Total entry pins (dirty) / unpins = %ld (%ld) / %ld\n",
+ cache_ptr->prefix,
+ (long)total_pins,
+ (long)total_dirty_pins,
+ (long)total_unpins);
+
+ HDfprintf(stdout, "%s Total pinned flushes / clears = %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)total_pinned_flushes,
+ (long)total_pinned_clears);
+
+#if H5C2_COLLECT_CACHE_ENTRY_STATS
+
+ HDfprintf(stdout, "%s aggregate max / min accesses = %d / %d\n",
+ cache_ptr->prefix,
+ (int)aggregate_max_accesses,
+ (int)aggregate_min_accesses);
+
+ HDfprintf(stdout, "%s aggregate max_clears / max_flushes = %d / %d\n",
+ cache_ptr->prefix,
+ (int)aggregate_max_clears,
+ (int)aggregate_max_flushes);
+
+ HDfprintf(stdout, "%s aggregate max_size / max_pins = %d / %d\n",
+ cache_ptr->prefix,
+ (int)aggregate_max_size,
+ (int)aggregate_max_pins);
+
+#endif /* H5C2_COLLECT_CACHE_ENTRY_STATS */
+
+ if ( display_detailed_stats )
+ {
+
+ for ( i = 0; i <= cache_ptr->max_type_id; i++ ) {
+
+ HDfprintf(stdout, "\n");
+
+ HDfprintf(stdout, "%s Stats on %s:\n",
+ cache_ptr->prefix,
+ ((cache_ptr->type_name_table_ptr))[i]);
+
+ if ( ( cache_ptr->hits[i] > 0 ) || ( cache_ptr->misses[i] > 0 ) ) {
+
+ hit_rate = 100.0 * ((double)(cache_ptr->hits[i])) /
+ ((double)(cache_ptr->hits[i] + cache_ptr->misses[i]));
+ } else {
+ hit_rate = 0.0;
+ }
+
+ HDfprintf(stdout,
+ "%s hits / misses / hit_rate = %ld / %ld / %f\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->hits[i]),
+ (long)(cache_ptr->misses[i]),
+ hit_rate);
+
+ HDfprintf(stdout,
+ "%s write / read (max) protects = %ld / %ld (%d)\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->write_protects[i]),
+ (long)(cache_ptr->read_protects[i]),
+ (int)(cache_ptr->max_read_protects[i]));
+
+ HDfprintf(stdout,
+ "%s clears / flushes / evictions = %ld / %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->clears[i]),
+ (long)(cache_ptr->flushes[i]),
+ (long)(cache_ptr->evictions[i]));
+
+ HDfprintf(stdout,
+ "%s insertions(pinned) / renames = %ld(%ld) / %ld\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->insertions[i]),
+ (long)(cache_ptr->pinned_insertions[i]),
+ (long)(cache_ptr->renames[i]));
+
+ HDfprintf(stdout,
+ "%s entry / cache flush renames = %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->entry_flush_renames[i]),
+ (long)(cache_ptr->cache_flush_renames[i]));
+
+ HDfprintf(stdout,
+ "%s size increases / decreases = %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->size_increases[i]),
+ (long)(cache_ptr->size_decreases[i]));
+
+ HDfprintf(stdout,
+ "%s entry/cache flush size changes = %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->entry_flush_size_changes[i]),
+ (long)(cache_ptr->cache_flush_size_changes[i]));
+
+
+ HDfprintf(stdout,
+ "%s entry pins / unpins = %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->pins[i]),
+ (long)(cache_ptr->unpins[i]));
+
+ HDfprintf(stdout,
+ "%s entry dirty pins/pin'd flushes = %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->dirty_pins[i]),
+ (long)(cache_ptr->pinned_flushes[i]));
+
+#if H5C2_COLLECT_CACHE_ENTRY_STATS
+
+ HDfprintf(stdout,
+ "%s entry max / min accesses = %d / %d\n",
+ cache_ptr->prefix,
+ cache_ptr->max_accesses[i],
+ cache_ptr->min_accesses[i]);
+
+ HDfprintf(stdout,
+ "%s entry max_clears / max_flushes = %d / %d\n",
+ cache_ptr->prefix,
+ cache_ptr->max_clears[i],
+ cache_ptr->max_flushes[i]);
+
+ HDfprintf(stdout,
+ "%s entry max_size / max_pins = %d / %d\n",
+ cache_ptr->prefix,
+ (int)(cache_ptr->max_size[i]),
+ (int)(cache_ptr->max_pins[i]));
+
+
+#endif /* H5C2_COLLECT_CACHE_ENTRY_STATS */
+
+ }
+ }
+
+ HDfprintf(stdout, "\n");
+
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_stats() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2_stats__reset
+ *
+ * Purpose: Reset the stats fields to their initial values.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer, 4/28/04
+ *
+ * Modifications:
+ *
+ * JRM - 7/21/04
+ * Updated for hash table related statistics.
+ *
+ * JRM - 9/8/05
+ * Updated for size increase / decrease statistics.
+ *
+ * JRM - 3/20/06
+ * Updated for pin / unpin related statistics.
+ *
+ * JRM - 8/9/06
+ * Further updates for pin related statistics.
+ *
+ * JRM 8/23/06
+ * Added initialization code for new flush related statistics.
+ *
+ * JRM 2/16/07
+ * Added conditional compile code to avoid unused parameter
+ * warning in the production build.
+ *
+ * JRM 3/31/07
+ * Added initialization for the new write_protects,
+ * read_protects, and max_read_protects fields.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+#ifndef NDEBUG
+H5C2_stats__reset(H5C2_t * cache_ptr)
+#else /* NDEBUG */
+#if H5C2_COLLECT_CACHE_STATS
+H5C2_stats__reset(H5C2_t * cache_ptr)
+#else /* H5C2_COLLECT_CACHE_STATS */
+H5C2_stats__reset(H5C2_t UNUSED * cache_ptr)
+#endif /* H5C2_COLLECT_CACHE_STATS */
+#endif /* NDEBUG */
+{
+#if H5C2_COLLECT_CACHE_STATS
+ int i;
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+#if H5C2_COLLECT_CACHE_STATS
+ for ( i = 0; i <= cache_ptr->max_type_id; i++ )
+ {
+ cache_ptr->hits[i] = 0;
+ cache_ptr->misses[i] = 0;
+ cache_ptr->write_protects[i] = 0;
+ cache_ptr->read_protects[i] = 0;
+ cache_ptr->max_read_protects[i] = 0;
+ cache_ptr->insertions[i] = 0;
+ cache_ptr->pinned_insertions[i] = 0;
+ cache_ptr->clears[i] = 0;
+ cache_ptr->flushes[i] = 0;
+ cache_ptr->evictions[i] = 0;
+ cache_ptr->renames[i] = 0;
+ cache_ptr->entry_flush_renames[i] = 0;
+ cache_ptr->cache_flush_renames[i] = 0;
+ cache_ptr->pins[i] = 0;
+ cache_ptr->unpins[i] = 0;
+ cache_ptr->dirty_pins[i] = 0;
+ cache_ptr->pinned_flushes[i] = 0;
+ cache_ptr->pinned_clears[i] = 0;
+ cache_ptr->size_increases[i] = 0;
+ cache_ptr->size_decreases[i] = 0;
+ cache_ptr->entry_flush_size_changes[i] = 0;
+ cache_ptr->cache_flush_size_changes[i] = 0;
+ }
+
+ cache_ptr->total_ht_insertions = 0;
+ cache_ptr->total_ht_deletions = 0;
+ cache_ptr->successful_ht_searches = 0;
+ cache_ptr->total_successful_ht_search_depth = 0;
+ cache_ptr->failed_ht_searches = 0;
+ cache_ptr->total_failed_ht_search_depth = 0;
+
+ cache_ptr->max_index_len = 0;
+ cache_ptr->max_index_size = (size_t)0;
+
+ cache_ptr->max_slist_len = 0;
+ cache_ptr->max_slist_size = (size_t)0;
+
+ cache_ptr->max_pl_len = 0;
+ cache_ptr->max_pl_size = (size_t)0;
+
+ cache_ptr->max_pel_len = 0;
+ cache_ptr->max_pel_size = (size_t)0;
+
+#if H5C2_COLLECT_CACHE_ENTRY_STATS
+
+ for ( i = 0; i <= cache_ptr->max_type_id; i++ )
+ {
+ cache_ptr->max_accesses[i] = 0;
+ cache_ptr->min_accesses[i] = 1000000;
+ cache_ptr->max_clears[i] = 0;
+ cache_ptr->max_flushes[i] = 0;
+ cache_ptr->max_size[i] = (size_t)0;
+ cache_ptr->max_pins[i] = 0;
+ }
+
+#endif /* H5C2_COLLECT_CACHE_ENTRY_STATS */
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+ return;
+
+} /* H5C2_stats__reset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_unpin_entry()
+ *
+ * Purpose: Unpin a cache entry. The entry must be unprotected at
+ * the time of call, and must be pinned.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 3/22/06
+ *
+ * Modifications:
+ *
+ * JRM -- 4/26/06
+ * Modified routine to allow it to operate on protected
+ * entries.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C2_unpin_entry(H5C2_t * cache_ptr,
+ void * thing)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5C2_cache_entry_t * entry_ptr;
+
+ FUNC_ENTER_NOAPI(H5C2_unpin_entry, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( thing );
+
+ entry_ptr = (H5C2_cache_entry_t *)thing;
+
+ if ( ! ( entry_ptr->is_pinned ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Entry isn't pinned")
+ }
+
+ if ( ! ( entry_ptr->is_protected ) ) {
+
+ H5C2__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, FAIL)
+ }
+
+ entry_ptr->is_pinned = FALSE;
+
+ H5C2__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_unpin_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_unprotect
+ *
+ * Purpose: Undo an H5C2_protect() call -- specifically, mark the
+ * entry as unprotected, remove it from the protected list,
+ * and give it back to the replacement policy.
+ *
+ * The TYPE and ADDR arguments must be the same as those in
+ * the corresponding call to H5C2_protect() and the THING
+ * argument must be the value returned by that call to
+ * H5C2_protect().
+ *
+ * The primary_dxpl_id and secondary_dxpl_id parameters
+ * specify the dxpl_ids used on the first write occasioned
+ * by the unprotect (primary_dxpl_id), and on all subsequent
+ * writes (secondary_dxpl_id). Since an uprotect cannot
+ * occasion a write at present, all this is moot for now.
+ * However, things change, and in any case,
+ * H5C2_flush_single_entry() needs primary_dxpl_id and
+ * secondary_dxpl_id in its parameter list.
+ *
+ * The function can't cause a read either, so the dxpl_id
+ * parameters are moot in this case as well.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * If the deleted flag is TRUE, simply remove the target entry
+ * from the cache, clear it, and free it without writing it to
+ * disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ * Modifications:
+ *
+ * JRM -- 7/21/04
+ * Updated the function for the addition of the hash table.
+ * In particular, we now add dirty entries to the tree if
+ * they aren't in the tree already.
+ *
+ * JRM -- 1/6/05
+ * Added the flags parameter, and code supporting
+ * H5C2__SET_FLUSH_MARKER_FLAG. Note that this flag is
+ * ignored unless the new entry is dirty. Also note that
+ * once the flush_marker field of an entry is set, the
+ * only way it can be reset is by being flushed.
+ *
+ * JRM -- 6/3/05
+ * Added the dirtied parameter and supporting code. This
+ * is part of an effort to move management of the is_dirty
+ * field into the cache code. This has become necessary
+ * to repair a cache coherency bug in PHDF5.
+ *
+ * JRM -- 7/5/05
+ * Added code supporting the new clear_on_unprotect field
+ * of H5C2_cache_entry_t. This change is also part of the
+ * above mentioned cache coherency bug fix in PHDF5.
+ *
+ * JRM -- 9/8/05
+ * Added the size_changed and new_size parameters and the
+ * supporting code. Since the metadata cache synchronizes
+ * on dirty bytes creation in the PHDF5 case, we must now
+ * track changes in entry size.
+ *
+ * Note that the new_size parameter is ignored unless the
+ * size_changed parameter is TRUE. In this case, the new_size
+ * must be positive.
+ *
+ * Also observe that if size_changed is TRUE, dirtied must be
+ * TRUE.
+ *
+ * JRM -- 9/23/05
+ * Moved the size_changed parameter into flags.
+ *
+ * JRM -- 3/21/06
+ * Unpdated function to pin and unpin entries as directed via
+ * the new H5C2__PIN_ENTRY_FLAG and H5C2__UNPIN_ENTRY_FLAG flags.
+ *
+ * JRM -- 5/3/06
+ * Added code to make use of the new dirtied field in
+ * H5C2_cache_entry_t. If this field is TRUE, it is the
+ * equivalent of setting the H5C2__DIRTIED_FLAG.
+ *
+ * JRM -- 3/29/07
+ * Modified function to allow a entry to be protected
+ * more than once if the entry is protected read only.
+ *
+ * Also added sanity checks using the new is_read_only and
+ * ro_ref_count parameters.
+ *
+ * JRM -- 9/8/07
+ * Revised function for the new metadata cache API. The
+ * function lost its pointer to H5F_t (now supplied via
+ * *cache_ptr), and one of its dxpl ids. Also internal
+ * changes supporting the revised API.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C2_unprotect(H5C2_t * cache_ptr,
+ hid_t dxpl_id,
+ const H5C2_class_t * type,
+ haddr_t addr,
+ void * thing,
+ unsigned int flags,
+ size_t new_size)
+{
+ /* const char * fcn_name = "H5C2_unprotect()"; */
+ hbool_t deleted;
+ hbool_t dirtied;
+ hbool_t set_flush_marker;
+ hbool_t size_changed;
+ hbool_t pin_entry;
+ hbool_t unpin_entry;
+#ifdef H5_HAVE_PARALLEL
+ hbool_t clear_entry = FALSE;
+#endif /* H5_HAVE_PARALLEL */
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5C2_cache_entry_t * entry_ptr;
+ H5C2_cache_entry_t * test_entry_ptr;
+
+ FUNC_ENTER_NOAPI(H5C2_unprotect, FAIL)
+
+ deleted = ( (flags & H5C2__DELETED_FLAG) != 0 );
+ dirtied = ( (flags & H5C2__DIRTIED_FLAG) != 0 );
+ set_flush_marker = ( (flags & H5C2__SET_FLUSH_MARKER_FLAG) != 0 );
+ size_changed = ( (flags & H5C2__SIZE_CHANGED_FLAG) != 0 );
+ pin_entry = ( (flags & H5C2__PIN_ENTRY_FLAG) != 0 );
+ unpin_entry = ( (flags & H5C2__UNPIN_ENTRY_FLAG) != 0 );
+
+ /* Changing the size of an entry dirties it. Thus, set the
+ * dirtied flag if the size_changed flag is set.
+ */
+
+ dirtied |= size_changed;
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( cache_ptr->skip_file_checks || cache_ptr->f );
+ HDassert( type );
+ HDassert( H5F_addr_defined(addr) );
+ HDassert( thing );
+ HDassert( ( size_changed == TRUE ) || ( size_changed == FALSE ) );
+ HDassert( ( ! size_changed ) || ( dirtied ) );
+ HDassert( ( ! size_changed ) || ( new_size > 0 ) );
+ HDassert( ! ( pin_entry && unpin_entry ) );
+
+ entry_ptr = (H5C2_cache_entry_t *)thing;
+
+ HDassert( entry_ptr->addr == addr );
+ HDassert( entry_ptr->type == type );
+
+ /* also set the dirtied variable if the dirtied field is set in
+ * the entry.
+ */
+ dirtied |= entry_ptr->dirtied;
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+ if ( H5C2_validate_lru_list(cache_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "LRU sanity check failed.\n");
+ }
+#endif /* H5C2_DO_EXTREME_SANITY_CHECKS */
+
+
+ /* if the entry has multiple read only protects, just decrement
+ * the ro_ref_counter. Don't actually unprotect until the ref count
+ * drops to zero.
+ */
+ if ( entry_ptr->ro_ref_count > 1 ) {
+
+ HDassert( entry_ptr->is_protected );
+ HDassert( entry_ptr->is_read_only );
+
+ if ( dirtied ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "Read only entry modified(1)??")
+ }
+
+ (entry_ptr->ro_ref_count)--;
+
+ /* Pin or unpin the entry as requested. */
+ if ( pin_entry ) {
+
+ if ( entry_ptr->is_pinned ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, \
+ "Entry already pinned???")
+ }
+ entry_ptr->is_pinned = TRUE;
+ H5C2__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr)
+
+ } else if ( unpin_entry ) {
+
+ if ( ! ( entry_ptr->is_pinned ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, \
+ "Entry already unpinned???")
+ }
+ entry_ptr->is_pinned = FALSE;
+ H5C2__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
+
+ }
+
+ } else {
+
+ if ( entry_ptr->is_read_only ) {
+
+ HDassert( entry_ptr->ro_ref_count == 1 );
+
+ if ( dirtied ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "Read only entry modified(2)??")
+ }
+
+ entry_ptr->is_read_only = FALSE;
+ entry_ptr->ro_ref_count = 0;
+ }
+
+#ifdef H5_HAVE_PARALLEL
+ /* When the H5C2 code is used to implement the metadata cache in the
+ * PHDF5 case, only the cache on process 0 is allowed to write to file.
+ * All the other metadata caches must hold dirty entries until they
+ * are told that the entries are clean.
+ *
+ * The clear_on_unprotect flag in the H5C2_cache_entry_t structure
+ * exists to deal with the case in which an entry is protected when
+ * its cache receives word that the entry is now clean. In this case,
+ * the clear_on_unprotect flag is set, and the entry is flushed with
+ * the H5C2__FLUSH_CLEAR_ONLY_FLAG.
+ *
+ * All this is a bit awkward, but until the metadata cache entries
+ * are contiguous, with only one dirty flag, we have to let the supplied
+ * functions deal with the reseting the is_dirty flag.
+ */
+ if ( entry_ptr->clear_on_unprotect ) {
+
+ HDassert( entry_ptr->is_dirty );
+
+ entry_ptr->clear_on_unprotect = FALSE;
+
+ if ( ! dirtied ) {
+
+ clear_entry = TRUE;
+ }
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+ if ( ! (entry_ptr->is_protected) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "Entry already unprotected??")
+ }
+
+ /* mark the entry as dirty if appropriate */
+ entry_ptr->is_dirty = ( (entry_ptr->is_dirty) || dirtied );
+
+ /* update for change in entry size if necessary */
+ if ( ( size_changed ) && ( entry_ptr->size != new_size ) ) {
+
+ /* update the protected list */
+ H5C2__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pl_len), \
+ (cache_ptr->pl_size), \
+ (entry_ptr->size), (new_size));
+
+ /* update the hash table */
+ H5C2__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size),\
+ (new_size));
+
+ /* if the entry is in the skip list, update that too */
+ if ( entry_ptr->in_slist ) {
+
+ H5C2__UPDATE_SLIST_FOR_SIZE_CHANGE((cache_ptr), \
+ (entry_ptr->size),\
+ (new_size));
+ }
+
+ /* update statistics just before changing the entry size */
+ H5C2__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE((cache_ptr), (entry_ptr), \
+ (new_size));
+
+ /* finally, update the entry size proper */
+ entry_ptr->size = new_size;
+ }
+
+ /* Pin or unpin the entry as requested. */
+ if ( pin_entry ) {
+
+ if ( entry_ptr->is_pinned ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, \
+ "Entry already pinned???")
+ }
+ entry_ptr->is_pinned = TRUE;
+ H5C2__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr)
+
+ } else if ( unpin_entry ) {
+
+ if ( ! ( entry_ptr->is_pinned ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, \
+ "Entry already unpinned???")
+ }
+ entry_ptr->is_pinned = FALSE;
+ H5C2__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
+
+ }
+
+ /* H5C2__UPDATE_RP_FOR_UNPROTECT will places the unprotected entry on
+ * the pinned entry list if entry_ptr->is_pined is TRUE.
+ */
+ H5C2__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, FAIL)
+
+ entry_ptr->is_protected = FALSE;
+
+ /* if the entry is dirty, 'or' its flush_marker with the set flush flag,
+ * and then add it to the skip list if it isn't there already.
+ */
+
+ if ( entry_ptr->is_dirty ) {
+
+ entry_ptr->flush_marker |= set_flush_marker;
+
+ if ( ! (entry_ptr->in_slist) ) {
+
+ H5C2__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+ }
+ }
+
+ /* this implementation of the "deleted" option is a bit inefficient, as
+ * we re-insert the entry to be deleted into the replacement policy
+ * data structures, only to remove them again. Depending on how often
+ * we do this, we may want to optimize a bit.
+ *
+ * On the other hand, this implementation is reasonably clean, and
+ * makes good use of existing code.
+ * JRM - 5/19/04
+ */
+ if ( deleted ) {
+
+ /* we can't delete a pinned entry */
+ HDassert ( ! (entry_ptr->is_pinned ) );
+
+ /* verify that the target entry is in the cache. */
+
+ H5C2__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
+
+ if ( test_entry_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "entry not in hash table?!?.")
+ }
+ else if ( test_entry_ptr != entry_ptr ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "hash table contains multiple entries for addr?!?.")
+ }
+
+ if ( H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ type,
+ addr,
+ (H5C2__FLUSH_CLEAR_ONLY_FLAG |
+ H5C2__FLUSH_INVALIDATE_FLAG),
+ TRUE) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush.")
+ }
+ }
+#ifdef H5_HAVE_PARALLEL
+ else if ( clear_entry ) {
+
+ /* verify that the target entry is in the cache. */
+
+ H5C2__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
+
+ if ( test_entry_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "entry not in hash table?!?.")
+ }
+ else if ( test_entry_ptr != entry_ptr ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "hash table contains multiple entries for addr?!?.")
+ }
+
+ if ( H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ type,
+ addr,
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ TRUE) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear.")
+ }
+ }
+#endif /* H5_HAVE_PARALLEL */
+ }
+
+ H5C2__UPDATE_STATS_FOR_UNPROTECT(cache_ptr)
+
+done:
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+ if ( H5C2_validate_lru_list(cache_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "LRU sanity check failed.\n");
+ }
+#endif /* H5C2_DO_EXTREME_SANITY_CHECKS */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_unprotect() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_validate_resize_config()
+ *
+ * Purpose: Run a sanity check on the specified sections of the
+ * provided instance of struct H5C2_auto_size_ctl_t.
+ *
+ * Do nothing and return SUCCEED if no errors are detected,
+ * and flag an error and return FAIL otherwise.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 3/23/05
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C2_validate_resize_config(H5C2_auto_size_ctl_t * config_ptr,
+ unsigned int tests)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C2_validate_resize_config, FAIL)
+
+ if ( config_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry.")
+ }
+
+ if ( config_ptr->version != H5C2__CURR_AUTO_SIZE_CTL_VER ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version.")
+ }
+
+
+ if ( (tests & H5C2_RESIZE_CFG__VALIDATE_GENERAL) != 0 ) {
+
+ if ( ( config_ptr->set_initial_size != TRUE ) &&
+ ( config_ptr->set_initial_size != FALSE ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "set_initial_size must be either TRUE or FALSE");
+ }
+
+ if ( config_ptr->max_size > H5C2__MAX_MAX_CACHE_SIZE ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "max_size too big");
+ }
+
+ if ( config_ptr->min_size < H5C2__MIN_MAX_CACHE_SIZE ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size too small");
+ }
+
+ if ( config_ptr->min_size > config_ptr->max_size ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size > max_size");
+ }
+
+ if ( ( config_ptr->set_initial_size ) &&
+ ( ( config_ptr->initial_size < config_ptr->min_size ) ||
+ ( config_ptr->initial_size > config_ptr->max_size ) ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "initial_size must be in the interval [min_size, max_size]");
+ }
+
+ if ( ( config_ptr->min_clean_fraction < 0.0 ) ||
+ ( config_ptr->min_clean_fraction > 1.0 ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "min_clean_fraction must be in the interval [0.0, 1.0]");
+ }
+
+ if ( config_ptr->epoch_length < H5C2__MIN_AR_EPOCH_LENGTH ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too small");
+ }
+
+ if ( config_ptr->epoch_length > H5C2__MAX_AR_EPOCH_LENGTH ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too big");
+ }
+ } /* H5C2_RESIZE_CFG__VALIDATE_GENERAL */
+
+
+ if ( (tests & H5C2_RESIZE_CFG__VALIDATE_INCREMENT) != 0 ) {
+
+ if ( ( config_ptr->incr_mode != H5C2_incr__off ) &&
+ ( config_ptr->incr_mode != H5C2_incr__threshold ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid incr_mode");
+ }
+
+ if ( config_ptr->incr_mode == H5C2_incr__threshold ) {
+
+ if ( ( config_ptr->lower_hr_threshold < 0.0 ) ||
+ ( config_ptr->lower_hr_threshold > 1.0 ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "lower_hr_threshold must be in the range [0.0, 1.0]");
+ }
+
+ if ( config_ptr->increment < 1.0 ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "increment must be greater than or equal to 1.0");
+ }
+
+ if ( ( config_ptr->apply_max_increment != TRUE ) &&
+ ( config_ptr->apply_max_increment != FALSE ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "apply_max_increment must be either TRUE or FALSE");
+ }
+
+ /* no need to check max_increment, as it is a size_t,
+ * and thus must be non-negative.
+ */
+ } /* H5C2_incr__threshold */
+
+ } /* H5C2_RESIZE_CFG__VALIDATE_INCREMENT */
+
+
+ if ( (tests & H5C2_RESIZE_CFG__VALIDATE_DECREMENT) != 0 ) {
+
+ if ( ( config_ptr->decr_mode != H5C2_decr__off ) &&
+ ( config_ptr->decr_mode != H5C2_decr__threshold ) &&
+ ( config_ptr->decr_mode != H5C2_decr__age_out ) &&
+ ( config_ptr->decr_mode != H5C2_decr__age_out_with_threshold )
+ ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid decr_mode");
+ }
+
+ if ( config_ptr->decr_mode == H5C2_decr__threshold ) {
+
+ if ( config_ptr->upper_hr_threshold > 1.0 ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "upper_hr_threshold must be <= 1.0");
+ }
+
+ if ( ( config_ptr->decrement > 1.0 ) ||
+ ( config_ptr->decrement < 0.0 ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "decrement must be in the interval [0.0, 1.0]");
+ }
+
+ /* no need to check max_decrement as it is a size_t
+ * and thus must be non-negative.
+ */
+ } /* H5C2_decr__threshold */
+
+ if ( ( config_ptr->decr_mode == H5C2_decr__age_out ) ||
+ ( config_ptr->decr_mode == H5C2_decr__age_out_with_threshold )
+ ) {
+
+ if ( config_ptr->epochs_before_eviction < 1 ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "epochs_before_eviction must be positive");
+ }
+
+ if ( config_ptr->epochs_before_eviction > H5C2__MAX_EPOCH_MARKERS ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "epochs_before_eviction too big");
+ }
+
+ if ( ( config_ptr->apply_empty_reserve != TRUE ) &&
+ ( config_ptr->apply_empty_reserve != FALSE ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "apply_empty_reserve must be either TRUE or FALSE");
+ }
+
+ if ( ( config_ptr->apply_empty_reserve ) &&
+ ( ( config_ptr->empty_reserve > 1.0 ) ||
+ ( config_ptr->empty_reserve < 0.0 ) ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "empty_reserve must be in the interval [0.0, 1.0]");
+ }
+
+ /* no need to check max_decrement as it is a size_t
+ * and thus must be non-negative.
+ */
+ } /* H5C2_decr__age_out || H5C2_decr__age_out_with_threshold */
+
+ if ( config_ptr->decr_mode == H5C2_decr__age_out_with_threshold ) {
+
+ if ( ( config_ptr->upper_hr_threshold > 1.0 ) ||
+ ( config_ptr->upper_hr_threshold < 0.0 ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "upper_hr_threshold must be in the interval [0.0, 1.0]");
+ }
+ } /* H5C2_decr__age_out_with_threshold */
+
+ } /* H5C2_RESIZE_CFG__VALIDATE_DECREMENT */
+
+
+ if ( (tests & H5C2_RESIZE_CFG__VALIDATE_INTERACTIONS) != 0 ) {
+
+ if ( ( config_ptr->incr_mode == H5C2_incr__threshold )
+ &&
+ ( ( config_ptr->decr_mode == H5C2_decr__threshold )
+ ||
+ ( config_ptr->decr_mode == H5C2_decr__age_out_with_threshold )
+ )
+ &&
+ ( config_ptr->lower_hr_threshold
+ >=
+ config_ptr->upper_hr_threshold
+ )
+ ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "conflicting threshold fields in config.")
+ }
+ } /* H5C2_RESIZE_CFG__VALIDATE_INTERACTIONS */
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_validate_resize_config() */
+
+
+/*************************************************************************/
+/**************************** Private Functions: *************************/
+/*************************************************************************/
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2__auto_adjust_cache_size
+ *
+ * Purpose: Obtain the current full cache hit rate, and compare it
+ * with the hit rate thresholds for modifying cache size.
+ * If one of the thresholds has been crossed, adjusts the
+ * size of the cache accordingly.
+ *
+ * The function then resets the full cache hit rate
+ * statistics, and exits.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * an attempt to flush a protected item.
+ *
+ *
+ * Programmer: John Mainzer, 10/7/04
+ *
+ * Modifications:
+ *
+ * JRM -- 11/18/04
+ * Major re-write to support ageout method of cache size
+ * reduction, and to adjust to changes in the
+ * H5C2_auto_size_ctl_t structure.
+ *
+ * JRM -- 9/8/07
+ * Reworked to accomodate cache API changes needed to
+ * support metadata journaling. Mostly, this involved
+ * removing a bunch of parameters that used to be
+ * passed through to other calls, and are no longer
+ * needed.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C2__auto_adjust_cache_size(H5C2_t * cache_ptr,
+ hid_t dxpl_id,
+ hbool_t write_permitted)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ herr_t result;
+ hbool_t inserted_epoch_marker = FALSE;
+ size_t new_max_cache_size = 0;
+ size_t old_max_cache_size = 0;
+ size_t new_min_clean_size = 0;
+ size_t old_min_clean_size = 0;
+ double hit_rate;
+ enum H5C2_resize_status status = in_spec2; /* will change if needed */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5C2__auto_adjust_cache_size)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( cache_ptr->cache_accesses >=
+ (cache_ptr->resize_ctl).epoch_length );
+ HDassert( 0.0 <= (cache_ptr->resize_ctl).min_clean_fraction );
+ HDassert( (cache_ptr->resize_ctl).min_clean_fraction <= 100.0 );
+
+ if ( !cache_ptr->resize_enabled ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled.")
+ }
+
+ HDassert( ( (cache_ptr->resize_ctl).incr_mode != H5C2_incr__off ) || \
+ ( (cache_ptr->resize_ctl).decr_mode != H5C2_decr__off ) );
+
+ if ( H5C2_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate.")
+ }
+
+ HDassert( ( 0.0 <= hit_rate ) && ( hit_rate <= 1.0 ) );
+
+ switch ( (cache_ptr->resize_ctl).incr_mode )
+ {
+ case H5C2_incr__off:
+ if ( cache_ptr->size_increase_possible ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "size_increase_possible but H5C2_incr__off?!?!?")
+ }
+ break;
+
+ case H5C2_incr__threshold:
+ if ( hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold ) {
+
+ if ( ! cache_ptr->size_increase_possible ) {
+
+ status = increase_disabled2;
+
+ } else if ( cache_ptr->max_cache_size >=
+ (cache_ptr->resize_ctl).max_size ) {
+
+ HDassert( cache_ptr->max_cache_size == \
+ (cache_ptr->resize_ctl).max_size );
+ status = at_max_size2;
+
+ } else if ( ! cache_ptr->cache_full ) {
+
+ status = not_full2;
+
+ } else {
+
+ new_max_cache_size = (size_t)
+ (((double)(cache_ptr->max_cache_size)) *
+ (cache_ptr->resize_ctl).increment);
+
+ /* clip to max size if necessary */
+ if ( new_max_cache_size >
+ (cache_ptr->resize_ctl).max_size ) {
+
+ new_max_cache_size = (cache_ptr->resize_ctl).max_size;
+ }
+
+ /* clip to max increment if necessary */
+ if ( ( (cache_ptr->resize_ctl).apply_max_increment ) &&
+ ( (cache_ptr->max_cache_size +
+ (cache_ptr->resize_ctl).max_increment) <
+ new_max_cache_size ) ) {
+
+ new_max_cache_size = cache_ptr->max_cache_size +
+ (cache_ptr->resize_ctl).max_increment;
+ }
+
+ status = increase2;
+ }
+ }
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode.")
+ }
+
+ /* If the decr_mode is either age out or age out with threshold, we
+ * must run the marker maintenance code, whether we run the size
+ * reduction code or not. We do this in two places -- here we
+ * insert a new marker if the number of active epoch markers is
+ * is less than the the current epochs before eviction, and after
+ * the ageout call, we cycle the markers.
+ *
+ * However, we can't call the ageout code or cycle the markers
+ * unless there was a full complement of markers in place on
+ * entry. The inserted_epoch_marker flag is used to track this.
+ */
+
+ if ( ( ( (cache_ptr->resize_ctl).decr_mode == H5C2_decr__age_out )
+ ||
+ ( (cache_ptr->resize_ctl).decr_mode ==
+ H5C2_decr__age_out_with_threshold
+ )
+ )
+ &&
+ ( cache_ptr->epoch_markers_active <
+ (cache_ptr->resize_ctl).epochs_before_eviction
+ )
+ ) {
+
+ result = H5C2__autoadjust__ageout__insert_new_marker(cache_ptr);
+
+ if ( result != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "can't insert new epoch marker.")
+
+ } else {
+
+ inserted_epoch_marker = TRUE;
+ }
+ }
+
+ /* don't run the cache size decrease code unless the cache size
+ * increase code is disabled, or the size increase code sees no need
+ * for action. In either case, status == in_spec2 at this point.
+ */
+
+ if ( status == in_spec2 ) {
+
+ switch ( (cache_ptr->resize_ctl).decr_mode )
+ {
+ case H5C2_decr__off:
+ break;
+
+ case H5C2_decr__threshold:
+ if ( hit_rate > (cache_ptr->resize_ctl).upper_hr_threshold ) {
+
+ if ( ! cache_ptr->size_decrease_possible ) {
+
+ status = decrease_disabled2;
+
+ } else if ( cache_ptr->max_cache_size <=
+ (cache_ptr->resize_ctl).min_size ) {
+
+ HDassert( cache_ptr->max_cache_size ==
+ (cache_ptr->resize_ctl).min_size );
+ status = at_min_size2;
+
+ } else {
+
+ new_max_cache_size = (size_t)
+ (((double)(cache_ptr->max_cache_size)) *
+ (cache_ptr->resize_ctl).decrement);
+
+ /* clip to min size if necessary */
+ if ( new_max_cache_size <
+ (cache_ptr->resize_ctl).min_size ) {
+
+ new_max_cache_size =
+ (cache_ptr->resize_ctl).min_size;
+ }
+
+ /* clip to max decrement if necessary */
+ if ( ( (cache_ptr->resize_ctl).apply_max_decrement ) &&
+ ( ((cache_ptr->resize_ctl).max_decrement +
+ new_max_cache_size) <
+ cache_ptr->max_cache_size ) ) {
+
+ new_max_cache_size = cache_ptr->max_cache_size -
+ (cache_ptr->resize_ctl).max_decrement;
+ }
+
+ status = decrease2;
+ }
+ }
+ break;
+
+ case H5C2_decr__age_out_with_threshold:
+ case H5C2_decr__age_out:
+ if ( ! inserted_epoch_marker ) {
+
+ if ( ! cache_ptr->size_decrease_possible ) {
+
+ status = decrease_disabled2;
+
+ } else {
+
+ result = H5C2__autoadjust__ageout(cache_ptr,
+ hit_rate,
+ &status,
+ &new_max_cache_size,
+ dxpl_id,
+ write_permitted);
+
+ if ( result != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "ageout code failed.")
+ }
+ }
+ }
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode.")
+ }
+ }
+
+ /* cycle the epoch markers here if appropriate */
+ if ( ( ( (cache_ptr->resize_ctl).decr_mode == H5C2_decr__age_out )
+ ||
+ ( (cache_ptr->resize_ctl).decr_mode ==
+ H5C2_decr__age_out_with_threshold
+ )
+ )
+ &&
+ ( ! inserted_epoch_marker )
+ ) {
+
+ /* move last epoch marker to the head of the LRU list */
+ result = H5C2__autoadjust__ageout__cycle_epoch_marker(cache_ptr);
+
+ if ( result != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "error cycling epoch marker.")
+ }
+ }
+
+ if ( ( status == increase2 ) || ( status == decrease2 ) ) {
+
+ old_max_cache_size = cache_ptr->max_cache_size;
+ old_min_clean_size = cache_ptr->min_clean_size;
+
+ new_min_clean_size = (size_t)
+ ((double)new_max_cache_size *
+ ((cache_ptr->resize_ctl).min_clean_fraction));
+
+ /* new_min_clean_size is of size_t, and thus must be non-negative.
+ * Hence we have
+ *
+ * ( 0 <= new_min_clean_size ).
+ *
+ * by definition.
+ */
+ HDassert( new_min_clean_size <= new_max_cache_size );
+ HDassert( (cache_ptr->resize_ctl).min_size <= new_max_cache_size );
+ HDassert( new_max_cache_size <= (cache_ptr->resize_ctl).max_size );
+
+ cache_ptr->max_cache_size = new_max_cache_size;
+ cache_ptr->min_clean_size = new_min_clean_size;
+
+ if ( status == increase2 ) {
+
+ cache_ptr->cache_full = FALSE;
+
+ } else if ( status == decrease2 ) {
+
+ cache_ptr->size_decreased = TRUE;
+ }
+ }
+
+ if ( (cache_ptr->resize_ctl).rpt_fcn != NULL ) {
+
+ (*((cache_ptr->resize_ctl).rpt_fcn))
+ (cache_ptr,
+ H5C2__CURR_AUTO_RESIZE_RPT_FCN_VER,
+ hit_rate,
+ status,
+ old_max_cache_size,
+ new_max_cache_size,
+ old_min_clean_size,
+ new_min_clean_size);
+ }
+
+ if ( H5C2_reset_cache_hit_rate_stats(cache_ptr) != SUCCEED ) {
+
+ /* this should be impossible... */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C2_reset_cache_hit_rate_stats failed.")
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2__auto_adjust_cache_size() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2__autoadjust__ageout
+ *
+ * Purpose: Implement the ageout automatic cache size decrement
+ * algorithm. Note that while this code evicts aged out
+ * entries, the code does not change the maximum cache size.
+ * Instead, the function simply computes the new value (if
+ * any change is indicated) and reports this value in
+ * *new_max_cache_size_ptr.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * an attempt to flush a protected item.
+ *
+ *
+ * Programmer: John Mainzer, 11/18/04
+ *
+ * Modifications:
+ *
+ * JRM -- 9/9/07
+ * Reworked function to support API changes in support of
+ * metadata cacheing. In essence, the change involved
+ * removal of arguments that are no longer needed by the
+ * callbacks, and thus no-longer need be passed through.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C2__autoadjust__ageout(H5C2_t * cache_ptr,
+ double hit_rate,
+ enum H5C2_resize_status * status_ptr,
+ size_t * new_max_cache_size_ptr,
+ hid_t dxpl_id,
+ hbool_t write_permitted)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ herr_t result;
+ size_t test_size;
+
+ FUNC_ENTER_NOAPI_NOINIT(H5C2__autoadjust__ageout)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( ( status_ptr ) && ( *status_ptr == in_spec2 ) );
+ HDassert( ( new_max_cache_size_ptr ) && ( *new_max_cache_size_ptr == 0 ) );
+
+ /* remove excess epoch markers if any */
+ if ( cache_ptr->epoch_markers_active >
+ (cache_ptr->resize_ctl).epochs_before_eviction ) {
+
+ result = H5C2__autoadjust__ageout__remove_excess_markers(cache_ptr);
+
+ if ( result != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "can't remove excess epoch markers.")
+ }
+ }
+
+ if ( ( (cache_ptr->resize_ctl).decr_mode == H5C2_decr__age_out )
+ ||
+ ( ( (cache_ptr->resize_ctl).decr_mode ==
+ H5C2_decr__age_out_with_threshold
+ )
+ &&
+ ( hit_rate >= (cache_ptr->resize_ctl).upper_hr_threshold )
+ )
+ ) {
+
+ if ( cache_ptr->max_cache_size > (cache_ptr->resize_ctl).min_size ){
+
+ /* evict aged out cache entries if appropriate... */
+ result = H5C2__autoadjust__ageout__evict_aged_out_entries
+ (
+ dxpl_id,
+ cache_ptr,
+ write_permitted
+ );
+
+ if ( result != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "error flushing aged out entries.")
+ }
+
+ /* ... and then reduce cache size if appropriate */
+ if ( cache_ptr->index_size < cache_ptr->max_cache_size ) {
+
+ if ( (cache_ptr->resize_ctl).apply_empty_reserve ) {
+
+ test_size = (size_t)(((double)cache_ptr->index_size) /
+ (1 - (cache_ptr->resize_ctl).empty_reserve));
+
+ if ( test_size < cache_ptr->max_cache_size ) {
+
+ *status_ptr = decrease2;
+ *new_max_cache_size_ptr = test_size;
+ }
+ } else {
+
+ *status_ptr = decrease2;
+ *new_max_cache_size_ptr = cache_ptr->index_size;
+ }
+
+ if ( *status_ptr == decrease2 ) {
+
+ /* clip to min size if necessary */
+ if ( *new_max_cache_size_ptr <
+ (cache_ptr->resize_ctl).min_size ) {
+
+ *new_max_cache_size_ptr =
+ (cache_ptr->resize_ctl).min_size;
+ }
+
+ /* clip to max decrement if necessary */
+ if ( ( (cache_ptr->resize_ctl).apply_max_decrement ) &&
+ ( ((cache_ptr->resize_ctl).max_decrement +
+ *new_max_cache_size_ptr) <
+ cache_ptr->max_cache_size ) ) {
+
+ *new_max_cache_size_ptr = cache_ptr->max_cache_size -
+ (cache_ptr->resize_ctl).max_decrement;
+ }
+ }
+ }
+ } else {
+
+ *status_ptr = at_min_size2;
+ }
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2__autoadjust__ageout() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2__autoadjust__ageout__cycle_epoch_marker
+ *
+ * Purpose: Remove the oldest epoch marker from the LRU list,
+ * and reinsert it at the head of the LRU list. Also
+ * remove the epoch marker's index from the head of the
+ * ring buffer, and re-insert it at the tail of the ring
+ * buffer.
+ *
+ * Return: SUCCEED on success/FAIL on failure.
+ *
+ * Programmer: John Mainzer, 11/22/04
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C2__autoadjust__ageout__cycle_epoch_marker(H5C2_t * cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ int i;
+
+ FUNC_ENTER_NOAPI_NOINIT(H5C2__autoadjust__ageout__cycle_epoch_marker)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+ if ( cache_ptr->epoch_markers_active <= 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "No active epoch markers on entry?!?!?.")
+ }
+
+ /* remove the last marker from both the ring buffer and the LRU list */
+
+ i = cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_first];
+
+ cache_ptr->epoch_marker_ringbuf_first =
+ (cache_ptr->epoch_marker_ringbuf_first + 1) %
+ (H5C2__MAX_EPOCH_MARKERS + 1);
+
+ cache_ptr->epoch_marker_ringbuf_size -= 1;
+
+ if ( cache_ptr->epoch_marker_ringbuf_size < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow.")
+ }
+
+ if ( (cache_ptr->epoch_marker_active)[i] != TRUE ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
+ }
+
+ H5C2__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), \
+ (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, \
+ (FAIL))
+
+ /* now, re-insert it at the head of the LRU list, and at the tail of
+ * the ring buffer.
+ */
+
+ HDassert( ((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i );
+ HDassert( ((cache_ptr->epoch_markers)[i]).next == NULL );
+ HDassert( ((cache_ptr->epoch_markers)[i]).prev == NULL );
+
+ cache_ptr->epoch_marker_ringbuf_last =
+ (cache_ptr->epoch_marker_ringbuf_last + 1) %
+ (H5C2__MAX_EPOCH_MARKERS + 1);
+
+ (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i;
+
+ cache_ptr->epoch_marker_ringbuf_size += 1;
+
+ if ( cache_ptr->epoch_marker_ringbuf_size > H5C2__MAX_EPOCH_MARKERS ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow.")
+ }
+
+ H5C2__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), \
+ (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, \
+ (FAIL))
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2__autoadjust__ageout__cycle_epoch_marker() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2__autoadjust__ageout__evict_aged_out_entries
+ *
+ * Purpose: Evict clean entries in the cache that haven't
+ * been accessed for at least
+ * (cache_ptr->resize_ctl).epochs_before_eviction epochs,
+ * and flush dirty entries that haven't been accessed for
+ * that amount of time.
+ *
+ * Depending on configuration, the function will either
+ * flush or evict all such entries, or all such entries it
+ * encounters until it has freed the maximum amount of space
+ * allowed under the maximum decrement.
+ *
+ * If we are running in parallel mode, writes may not be
+ * permitted. If so, the function simply skips any dirty
+ * entries it may encounter.
+ *
+ * The function makes no attempt to maintain the minimum
+ * clean size, as there is no guarantee that the cache size
+ * will be changed.
+ *
+ * If there is no cache size change, the minimum clean size
+ * constraint will be met through a combination of clean
+ * entries and free space in the cache.
+ *
+ * If there is a cache size reduction, the minimum clean size
+ * will be re-calculated, and will be enforced the next time
+ * we have to make space in the cache.
+ *
+ * The primary_dxpl_id and secondary_dxpl_id parameters
+ * specify the dxpl_ids used depending on the value of
+ * *first_flush_ptr. The idea is to use the primary_dxpl_id
+ * on the first write in a sequence of writes, and to use
+ * the secondary_dxpl_id on all subsequent writes.
+ *
+ * This is useful in the metadata cache, but may not be
+ * needed elsewhere. If so, just use the same dxpl_id for
+ * both parameters.
+ *
+ * Observe that this function cannot occasion a read.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 11/22/04
+ *
+ * Modifications:
+ *
+ * JRM -- 9/9/07
+ * Reworked function to support API changes in support of
+ * metadata cacheing. In essence, the change involved
+ * removal of arguments that are no longer needed by the
+ * callbacks, and thus no-longer need be passed through.
+ *
+ * JRM -- 10/13/07
+ * Reworked code to allow it to the function to handle the
+ * case in which the LRU list is modified out from under the
+ * function by a serialize function. This can happen if
+ * the serialize function associated with the entry being
+ * flushed either accesses the next item in the LRU list,
+ * or (as Quincey assures me is impossible), it accesses
+ * an entry not currently in cache, causing the eviction
+ * of the next entry in the LRU.
+ *
+ * We handle this situation by detecting it, and restarting
+ * the scan of the LRU when it occurs.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C2__autoadjust__ageout__evict_aged_out_entries(hid_t dxpl_id,
+ H5C2_t * cache_ptr,
+ hbool_t write_permitted)
+{
+ /* const char * fcn_name =
+ "H5C2__autoadjust__ageout__evict_aged_out_entries()"; */
+ herr_t ret_value = SUCCEED; /* Return value */
+ herr_t result;
+ size_t eviction_size_limit;
+ size_t bytes_evicted = 0;
+ hbool_t prev_is_dirty = FALSE;
+ H5C2_cache_entry_t * entry_ptr;
+ H5C2_cache_entry_t * next_ptr;
+ H5C2_cache_entry_t * prev_ptr;
+
+ FUNC_ENTER_NOAPI_NOINIT(H5C2__autoadjust__ageout__evict_aged_out_entries)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+ /* if there is a limit on the amount that the cache size can be decrease
+ * in any one round of the cache size reduction algorithm, load that
+ * limit into eviction_size_limit. Otherwise, set eviction_size_limit
+ * to the equivalent of infinity. The current size of the index will
+ * do nicely.
+ */
+ if ( (cache_ptr->resize_ctl).apply_max_decrement ) {
+
+ eviction_size_limit = (cache_ptr->resize_ctl).max_decrement;
+
+ } else {
+
+ eviction_size_limit = cache_ptr->index_size; /* i.e. infinity */
+ }
+
+ if ( write_permitted ) {
+
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+
+ while ( ( entry_ptr != NULL ) &&
+ ( (entry_ptr->type)->id != H5C2__EPOCH_MARKER_TYPE ) &&
+ ( bytes_evicted < eviction_size_limit ) )
+ {
+ HDassert( ! (entry_ptr->is_protected) );
+
+ next_ptr = entry_ptr->next;
+ prev_ptr = entry_ptr->prev;
+
+ if ( prev_ptr != NULL ) {
+
+ prev_is_dirty = prev_ptr->is_dirty;
+ }
+
+ if ( entry_ptr->is_dirty ) {
+
+ result = H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ entry_ptr->addr,
+ H5C2__NO_FLAGS_SET,
+ FALSE);
+ } else {
+
+ bytes_evicted += entry_ptr->size;
+
+ result = H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ entry_ptr->addr,
+ H5C2__FLUSH_INVALIDATE_FLAG,
+ TRUE);
+ }
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to flush entry")
+ }
+
+ if ( prev_ptr != NULL ) {
+
+ if ( prev_ptr->magic != H5C2__H5C2_CACHE_ENTRY_T_MAGIC ) {
+
+ /* something horrible has happened to *prev_ptr --
+ * scream and die.
+ */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "*prev_ptr corrupt")
+
+ } else if ( ( prev_ptr->is_dirty != prev_is_dirty )
+ ||
+ ( prev_ptr->next != next_ptr )
+ ||
+ ( prev_ptr->is_protected )
+ ||
+ ( prev_ptr->is_pinned ) ) {
+
+ /* something has happened to the LRU -- start over
+ * from the tail.
+ */
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+
+ } else {
+
+ entry_ptr = prev_ptr;
+
+ }
+ } else {
+
+ entry_ptr = NULL;
+
+ }
+ } /* end while */
+
+ /* for now at least, don't bother to maintain the minimum clean size,
+ * as the cache should now be less than its maximum size. Due to
+ * the vaguries of the cache size reduction algorthim, we may not
+ * reduce the size of the cache.
+ *
+ * If we do, we will calculate a new minimum clean size, which will
+ * be enforced the next time we try to make space in the cache.
+ *
+ * If we don't, no action is necessary, as we have just evicted and/or
+ * or flushed a bunch of entries and therefore the sum of the clean
+ * and free space in the cache must be greater than or equal to the
+ * min clean space requirement (assuming that requirement was met on
+ * entry).
+ */
+
+ } else /* ! write_permitted */ {
+
+ /* since we are not allowed to write, all we can do is evict
+ * any clean entries that we may encounter before we either
+ * hit the eviction size limit, or encounter the epoch marker.
+ *
+ * If we are operating read only, this isn't an issue, as there
+ * will not be any dirty entries.
+ *
+ * If we are operating in R/W mode, all the dirty entries we
+ * skip will be flushed the next time we attempt to make space
+ * when writes are permitted. This may have some local
+ * performance implications, but it shouldn't cause any net
+ * slowdown.
+ */
+
+ HDassert( H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS );
+
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+
+ while ( ( entry_ptr != NULL ) &&
+ ( (entry_ptr->type)->id != H5C2__EPOCH_MARKER_TYPE ) &&
+ ( bytes_evicted < eviction_size_limit ) )
+ {
+ HDassert( ! (entry_ptr->is_protected) );
+
+ prev_ptr = entry_ptr->prev;
+
+ if ( ! (entry_ptr->is_dirty) ) {
+
+ result = H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ entry_ptr->addr,
+ H5C2__FLUSH_INVALIDATE_FLAG,
+ TRUE);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to flush clean entry")
+ }
+ }
+ /* just skip the entry if it is dirty, as we can't do
+ * anything with it now since we can't write.
+ *
+ * Since all entries are clean, serialize() will no be called,
+ * and thus we needn't test to see if the LRU has been changed
+ * out from under us.
+ */
+
+ entry_ptr = prev_ptr;
+
+ } /* end while */
+ }
+
+ if ( cache_ptr->index_size < cache_ptr->max_cache_size ) {
+
+ cache_ptr->cache_full = FALSE;
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2__autoadjust__ageout__evict_aged_out_entries() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2__autoadjust__ageout__insert_new_marker
+ *
+ * Purpose: Find an unused marker cache entry, mark it as used, and
+ * insert it at the head of the LRU list. Also add the
+ * marker's index in the epoch_markers array.
+ *
+ * Return: SUCCEED on success/FAIL on failure.
+ *
+ * Programmer: John Mainzer, 11/19/04
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C2__autoadjust__ageout__insert_new_marker(H5C2_t * cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ int i;
+
+ FUNC_ENTER_NOAPI_NOINIT(H5C2__autoadjust__ageout__insert_new_marker)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+ if ( cache_ptr->epoch_markers_active >=
+ (cache_ptr->resize_ctl).epochs_before_eviction ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Already have a full complement of markers.")
+ }
+
+ /* find an unused marker */
+ i = 0;
+ while ( ( (cache_ptr->epoch_marker_active)[i] ) &&
+ ( i < H5C2__MAX_EPOCH_MARKERS ) )
+ {
+ i++;
+ }
+
+ HDassert( i < H5C2__MAX_EPOCH_MARKERS );
+
+ if ( (cache_ptr->epoch_marker_active)[i] != FALSE ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker.")
+ }
+
+ HDassert( ((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i );
+ HDassert( ((cache_ptr->epoch_markers)[i]).next == NULL );
+ HDassert( ((cache_ptr->epoch_markers)[i]).prev == NULL );
+
+ (cache_ptr->epoch_marker_active)[i] = TRUE;
+
+ cache_ptr->epoch_marker_ringbuf_last =
+ (cache_ptr->epoch_marker_ringbuf_last + 1) %
+ (H5C2__MAX_EPOCH_MARKERS + 1);
+
+ (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i;
+
+ cache_ptr->epoch_marker_ringbuf_size += 1;
+
+ if ( cache_ptr->epoch_marker_ringbuf_size > H5C2__MAX_EPOCH_MARKERS ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow.")
+ }
+
+ H5C2__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), \
+ (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, \
+ (FAIL))
+
+ cache_ptr->epoch_markers_active += 1;
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2__autoadjust__ageout__insert_new_marker() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2__autoadjust__ageout__remove_all_markers
+ *
+ * Purpose: Remove all epoch markers from the LRU list and mark them
+ * as inactive.
+ *
+ * Return: SUCCEED on success/FAIL on failure.
+ *
+ * Programmer: John Mainzer, 11/22/04
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C2__autoadjust__ageout__remove_all_markers(H5C2_t * cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ int i;
+ int ring_buf_index;
+
+ FUNC_ENTER_NOAPI_NOINIT(H5C2__autoadjust__ageout__remove_all_markers)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+ while ( cache_ptr->epoch_markers_active > 0 )
+ {
+ /* get the index of the last epoch marker in the LRU list
+ * and remove it from the ring buffer.
+ */
+
+ ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
+ i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
+
+ cache_ptr->epoch_marker_ringbuf_first =
+ (cache_ptr->epoch_marker_ringbuf_first + 1) %
+ (H5C2__MAX_EPOCH_MARKERS + 1);
+
+ cache_ptr->epoch_marker_ringbuf_size -= 1;
+
+ if ( cache_ptr->epoch_marker_ringbuf_size < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow.")
+ }
+
+ if ( (cache_ptr->epoch_marker_active)[i] != TRUE ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
+ }
+
+ /* remove the epoch marker from the LRU list */
+ H5C2__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), \
+ (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, \
+ (FAIL))
+
+ /* mark the epoch marker as unused. */
+ (cache_ptr->epoch_marker_active)[i] = FALSE;
+
+ HDassert( ((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i );
+ HDassert( ((cache_ptr->epoch_markers)[i]).next == NULL );
+ HDassert( ((cache_ptr->epoch_markers)[i]).prev == NULL );
+
+ /* decrement the number of active epoch markers */
+ cache_ptr->epoch_markers_active -= 1;
+
+ HDassert( cache_ptr->epoch_markers_active == \
+ cache_ptr->epoch_marker_ringbuf_size );
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2__autoadjust__ageout__remove_all_markers() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2__autoadjust__ageout__remove_excess_markers
+ *
+ * Purpose: Remove epoch markers from the end of the LRU list and
+ * mark them as inactive until the number of active markers
+ * equals the the current value of
+ * (cache_ptr->resize_ctl).epochs_before_eviction.
+ *
+ * Return: SUCCEED on success/FAIL on failure.
+ *
+ * Programmer: John Mainzer, 11/19/04
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C2__autoadjust__ageout__remove_excess_markers(H5C2_t * cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ int i;
+ int ring_buf_index;
+
+ FUNC_ENTER_NOAPI_NOINIT(H5C2__autoadjust__ageout__remove_excess_markers)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+ if ( cache_ptr->epoch_markers_active <=
+ (cache_ptr->resize_ctl).epochs_before_eviction ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry.")
+ }
+
+ while ( cache_ptr->epoch_markers_active >
+ (cache_ptr->resize_ctl).epochs_before_eviction )
+ {
+ /* get the index of the last epoch marker in the LRU list
+ * and remove it from the ring buffer.
+ */
+
+ ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
+ i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
+
+ cache_ptr->epoch_marker_ringbuf_first =
+ (cache_ptr->epoch_marker_ringbuf_first + 1) %
+ (H5C2__MAX_EPOCH_MARKERS + 1);
+
+ cache_ptr->epoch_marker_ringbuf_size -= 1;
+
+ if ( cache_ptr->epoch_marker_ringbuf_size < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow.")
+ }
+
+ if ( (cache_ptr->epoch_marker_active)[i] != TRUE ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
+ }
+
+ /* remove the epoch marker from the LRU list */
+ H5C2__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), \
+ (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, \
+ (FAIL))
+
+ /* mark the epoch marker as unused. */
+ (cache_ptr->epoch_marker_active)[i] = FALSE;
+
+ HDassert( ((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i );
+ HDassert( ((cache_ptr->epoch_markers)[i]).next == NULL );
+ HDassert( ((cache_ptr->epoch_markers)[i]).prev == NULL );
+
+ /* decrement the number of active epoch markers */
+ cache_ptr->epoch_markers_active -= 1;
+
+ HDassert( cache_ptr->epoch_markers_active == \
+ cache_ptr->epoch_marker_ringbuf_size );
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2__autoadjust__ageout__remove_excess_markers() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C2_flush_invalidate_cache
+ *
+ * Purpose: Flush and destroy the entries contained in the target
+ * cache.
+ *
+ * If the cache contains protected entries, the function will
+ * fail, as protected entries cannot be either flushed or
+ * destroyed. However all unprotected entries should be
+ * flushed and destroyed before the function returns failure.
+ *
+ * While pinned entries can usually be flushed, they cannot
+ * be destroyed. However, they should be unpinned when all
+ * the entries that reference them have been destroyed (thus
+ * reduding the pinned entry's reference count to 0, allowing
+ * it to be unpinned).
+ *
+ * If pinned entries are present, the function makes repeated
+ * passes through the cache, flushing all dirty entries
+ * (including the pinned dirty entries where permitted) and
+ * destroying all unpinned entries. This process is repeated
+ * until either the cache is empty, or the number of pinned
+ * entries stops decreasing on each pass.
+ *
+ * The primary_dxpl_id and secondary_dxpl_id parameters
+ * specify the dxpl_ids used on the first write occasioned
+ * by the flush (primary_dxpl_id), and on all subsequent
+ * writes (secondary_dxpl_id).
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 3/24/065
+ *
+ * Modifications:
+ *
+ * To support the fractal heap, the cache must now deal with
+ * entries being dirtied, resized, and/or renamed inside
+ * flush callbacks. Updated function to support this.
+ *
+ * -- JRM 8/27/06
+ *
+ * Reworked argument list and code to reflect the
+ * removal of the secondary dxpl id, and the decision
+ * to store f in H5C2_t, removing the need to pass it
+ * in all the time.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C2_flush_invalidate_cache(hid_t dxpl_id,
+ H5C2_t * cache_ptr,
+ unsigned flags)
+{
+ /* const char * fcn_name = "H5C2_flush_invalidate_cache()"; */
+ herr_t status;
+ herr_t ret_value = SUCCEED;
+ hbool_t done = FALSE;
+ int32_t protected_entries = 0;
+ int32_t i;
+ int32_t cur_pel_len;
+ int32_t old_pel_len;
+ int32_t passes = 0;
+ unsigned cooked_flags;
+ H5SL_node_t * node_ptr = NULL;
+ H5C2_cache_entry_t * entry_ptr = NULL;
+ H5C2_cache_entry_t * next_entry_ptr = NULL;
+#if H5C2_DO_SANITY_CHECKS
+ int64_t actual_slist_len = 0;
+ int64_t initial_slist_len = 0;
+ size_t actual_slist_size = 0;
+ size_t initial_slist_size = 0;
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+ FUNC_ENTER_NOAPI(H5C2_flush_invalidate_cache, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( cache_ptr->skip_file_checks || cache_ptr->f );
+ HDassert( cache_ptr->slist_ptr );
+
+ /* Filter out the flags that are not relevant to the flush/invalidate.
+ * At present, only the H5C2__FLUSH_CLEAR_ONLY_FLAG is kept.
+ */
+ cooked_flags = flags & H5C2__FLUSH_CLEAR_ONLY_FLAG;
+
+ /* remove ageout markers if present */
+ if ( cache_ptr->epoch_markers_active > 0 ) {
+
+ status = H5C2__autoadjust__ageout__remove_all_markers(cache_ptr);
+
+ if ( status != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "error removing all epoch markers.")
+ }
+ }
+
+ /* The flush proceedure here is a bit strange.
+ *
+ * In the outer while loop we make at least one pass through the
+ * cache, and then repeat until either all the pinned entries
+ * unpin themselves, or until the number of pinned entries stops
+ * declining. In this later case, we scream and die.
+ *
+ * Since the fractal heap can dirty, resize, and/or rename entries
+ * in is flush callback, it is possible that the cache will still
+ * contain dirty entries at this point. If so, we must make up to
+ * H5C2__MAX_PASSES_ON_FLUSH more passes through the skip list
+ * to allow it to empty. If is is not empty at this point, we again
+ * scream and die.
+ *
+ * Further, since clean entries can be dirtied, resized, and/or renamed
+ * as the result of a flush call back (either the entries own, or that
+ * for some other cache entry), we can no longer promise to flush
+ * the cache entries in increasing address order.
+ *
+ * Instead, we just do the best we can -- making a pass through
+ * the skip list, and then a pass through the "clean" entries, and
+ * then repeating as needed. Thus it is quite possible that an
+ * entry will be evicted from the cache only to be re-loaded later
+ * in the flush process (From what Quincey tells me, the pin
+ * mechanism makes this impossible, but even it it is true now,
+ * we shouldn't count on it in the future.)
+ *
+ * The bottom line is that entries will probably be flushed in close
+ * to increasing address order, but there are no guarantees.
+ */
+
+ cur_pel_len = cache_ptr->pel_len;
+ old_pel_len = cache_ptr->pel_len;
+
+ while ( ! done )
+ {
+ /* first, try to flush-destroy any dirty entries. Do this by
+ * making a scan through the slist. Note that new dirty entries
+ * may be created by the flush call backs. Thus it is possible
+ * that the slist will not be empty after we finish the scan.
+ */
+
+ if ( cache_ptr->slist_len == 0 ) {
+
+ node_ptr = NULL;
+ next_entry_ptr = NULL;
+ HDassert( cache_ptr->slist_size == 0 );
+
+ } else {
+
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+
+ if ( node_ptr == NULL ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "slist_len != 0 && node_ptr == NULL");
+ }
+
+ next_entry_ptr = (H5C2_cache_entry_t *)H5SL_item(node_ptr);
+
+ if ( next_entry_ptr == NULL ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "next_entry_ptr == NULL 1 ?!?!");
+ }
+
+ HDassert( next_entry_ptr->magic == H5C2__H5C2_CACHE_ENTRY_T_MAGIC );
+ HDassert( next_entry_ptr->is_dirty );
+ HDassert( next_entry_ptr->in_slist );
+
+ }
+
+#if H5C2_DO_SANITY_CHECKS
+ /* Depending on circumstances, H5C2_flush_single_entry() will
+ * remove dirty entries from the slist as it flushes them.
+ * Thus for sanity checks we must make note of the initial
+ * slist length and size before we do any flushes.
+ */
+ initial_slist_len = cache_ptr->slist_len;
+ initial_slist_size = cache_ptr->slist_size;
+
+ /* There is also the possibility that entries will be
+ * dirtied, resized, and/or renamed as the result of
+ * calls to the flush callbacks. We use the slist_len_increase
+ * and slist_size_increase increase fields in struct H5C2_t
+ * to track these changes for purpose of sanity checking.
+ * To this end, we must zero these fields before we start
+ * the pass through the slist.
+ */
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
+
+ /* Finally, reset the actual_slist_len and actual_slist_size
+ * fields to zero, as these fields are used to accumulate
+ * the slist lenght and size that we see as we scan through
+ * the slist.
+ */
+ actual_slist_len = 0;
+ actual_slist_size = 0;
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+ while ( node_ptr != NULL )
+ {
+ entry_ptr = next_entry_ptr;
+
+ /* With the advent of the fractal heap, it is possible
+ * that the serialize callback will dirty and/or resize
+ * other entries in the cache. In particular, while
+ * Quincey has promised me that this will never happen,
+ * it is possible that the serialize callback for an
+ * entry may protect an entry that is not in the cache,
+ * perhaps causing the cache to flush and possibly
+ * evict the entry associated with node_ptr to make
+ * space for the new entry.
+ *
+ * Thus we do a bit of extra sanity checking on entry_ptr,
+ * and break out of this scan of the skip list if we
+ * detect major problems. We have a bit of leaway on the
+ * number of passes though the skip list, so this shouldn't
+ * be an issue in the flush in and of itself, as it should
+ * be all but impossible for this to happen more than once
+ * in any flush.
+ *
+ * Observe that that breaking out of the scan early
+ * shouldn't break the sanity checks just after the end
+ * of this while loop.
+ *
+ * If an entry has merely been marked clean and removed from
+ * the s-list, we simply break out of the scan.
+ *
+ * If the entry has been evicted, we flag an error and
+ * exit.
+ */
+
+ if ( entry_ptr->magic != H5C2__H5C2_CACHE_ENTRY_T_MAGIC ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "entry_ptr->magic is invalid ?!?!");
+
+ } else if ( ( ! entry_ptr->is_dirty ) ||
+ ( ! entry_ptr->in_slist ) ) {
+
+ /* the s-list has been modified out from under us.
+ * break out of the loop.
+ */
+ break;
+ }
+
+ /* increment node pointer now, before we delete its target
+ * from the slist.
+ */
+ node_ptr = H5SL_next(node_ptr);
+
+ if ( node_ptr != NULL ) {
+
+ next_entry_ptr = (H5C2_cache_entry_t *)H5SL_item(node_ptr);
+
+ if ( next_entry_ptr == NULL ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "next_entry_ptr == NULL 2 ?!?!");
+ }
+
+ HDassert( next_entry_ptr->magic ==
+ H5C2__H5C2_CACHE_ENTRY_T_MAGIC );
+ HDassert( next_entry_ptr->is_dirty );
+ HDassert( next_entry_ptr->in_slist );
+
+ } else {
+
+ next_entry_ptr = NULL;
+ }
+
+ /* Note that we now remove nodes from the slist as we flush
+ * the associated entries, instead of leaving them there
+ * until we are done, and then destroying all nodes in
+ * the slist.
+ *
+ * While this optimization used to be easy, with the possibility
+ * of new entries being added to the slist in the midst of the
+ * flush, we must keep the slist in cannonical form at all
+ * times.
+ */
+
+ HDassert( entry_ptr != NULL );
+ HDassert( entry_ptr->in_slist );
+
+#if H5C2_DO_SANITY_CHECKS
+ /* update actual_slist_len & actual_slist_size before
+ * the flush. Note that the entry will be removed
+ * from the slist after the flush, and thus may be
+ * resized by the flush callback. This is OK, as
+ * we will catch the size delta in
+ * cache_ptr->slist_size_increase.
+ *
+ * Note that we include pinned entries in this count, even
+ * though we will not actually flush them.
+ */
+ actual_slist_len++;
+ actual_slist_size += entry_ptr->size;
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+ if ( entry_ptr->is_protected ) {
+
+ /* we have major problems -- but lets flush
+ * everything we can before we flag an error.
+ */
+ protected_entries++;
+
+ } else if ( entry_ptr->is_pinned ) {
+
+ /* Test to see if we are can flush the entry now.
+ * If we can, go ahead and flush, but don't tell
+ * H5C2_flush_single_entry() to destroy the entry
+ * as pinned entries can't be evicted.
+ */
+ if ( TRUE ) { /* When we get to multithreaded cache,
+ * we will need either locking code, and/or
+ * a test to see if the entry is in flushable
+ * condition here.
+ */
+
+ status = H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ entry_ptr->addr,
+ H5C2__NO_FLAGS_SET,
+ FALSE);
+ if ( status < 0 ) {
+
+ /* This shouldn't happen -- if it does, we are toast
+ * so just scream and die.
+ */
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "dirty pinned entry flush failed.")
+ }
+ }
+ } else {
+
+ status = H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ entry_ptr->addr,
+ (cooked_flags |
+ H5C2__FLUSH_INVALIDATE_FLAG),
+ TRUE);
+ if ( status < 0 ) {
+
+ /* This shouldn't happen -- if it does, we are toast so
+ * just scream and die.
+ */
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "dirty entry flush destroy failed.")
+ }
+ }
+
+ } /* end while loop scanning skip list */
+
+#if H5C2_DO_SANITY_CHECKS
+ /* It is possible that entries were added to the slist during
+ * the scan, either before or after scan pointer. The following
+ * asserts take this into account.
+ *
+ * Don't bother with the sanity checks if node_ptr != NULL, as
+ * in this case we broke out of the loop because it got changed
+ * out from under us.
+ */
+
+ if ( node_ptr == NULL ) {
+
+ HDassert( (actual_slist_len + cache_ptr->slist_len) ==
+ (initial_slist_len + cache_ptr->slist_len_increase) );
+ HDassert( (actual_slist_size + cache_ptr->slist_size) ==
+ (initial_slist_size + cache_ptr->slist_size_increase) );
+ }
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+ /* Since we are doing a destroy, we must make a pass through
+ * the hash table and try to flush - destroy all entries that
+ * remain.
+ *
+ * It used to be that all entries remaining in the cache at
+ * this point had to be clean, but with the fractal heap mods
+ * this may not be the case. If so, we will flush entries out
+ * of increasing address order.
+ *
+ * Writes to disk are possible here.
+ */
+ for ( i = 0; i < H5C2__HASH_TABLE_LEN; i++ )
+ {
+ next_entry_ptr = cache_ptr->index[i];
+
+ while ( next_entry_ptr != NULL )
+ {
+ entry_ptr = next_entry_ptr;
+
+ HDassert( entry_ptr->magic == H5C2__H5C2_CACHE_ENTRY_T_MAGIC );
+
+ next_entry_ptr = entry_ptr->ht_next;
+
+ HDassert ( ( next_entry_ptr == NULL ) ||
+ ( next_entry_ptr->magic ==
+ H5C2__H5C2_CACHE_ENTRY_T_MAGIC ) );
+
+ if ( entry_ptr->is_protected ) {
+
+ /* we have major problems -- but lets flush and destroy
+ * everything we can before we flag an error.
+ */
+ protected_entries++;
+
+ if ( ! entry_ptr->in_slist ) {
+
+ HDassert( !(entry_ptr->is_dirty) );
+ }
+ } else if ( ! ( entry_ptr->is_pinned ) ) {
+
+ status =
+ H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ entry_ptr->addr,
+ (cooked_flags |
+ H5C2__FLUSH_INVALIDATE_FLAG),
+ TRUE);
+ if ( status < 0 ) {
+
+ /* This shouldn't happen -- if it does, we are toast so
+ * just scream and die.
+ */
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Entry flush destroy failed.")
+ }
+ }
+ /* We can't do anything if the entry is pinned. The
+ * hope is that the entry will be unpinned as the
+ * result of destroys of entries that reference it.
+ *
+ * We detect this by noting the change in the number
+ * of pinned entries from pass to pass. If it stops
+ * shrinking before it hits zero, we scream and die.
+ */
+
+ /* if the serialize function on the entry we last evicted
+ * loaded an entry into cache (as Quincey has promised me
+ * it never will), and if the cache was full, it is
+ * possible that *nexte_entry_ptr was flushed or evicted.
+ *
+ * Test to see if this happened here, and set next_entry_ptr
+ * to NULL if it did. Note that if this test is triggred,
+ * we are accessing a deallocated piece of dynamically
+ * allocated memory, so we just scream and die.
+ */
+ if ( ( next_entry_ptr != NULL ) &&
+ ( next_entry_ptr->magic !=
+ H5C2__H5C2_CACHE_ENTRY_T_MAGIC ) ) {
+
+ /* Something horrible has happened to
+ * *next_entry_ptr -- scream and die.
+ */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "next_entry_ptr->magic is invalid?!?!?.")
+ }
+ } /* end while loop scanning hash table bin */
+ } /* end for loop scanning hash table */
+
+ old_pel_len = cur_pel_len;
+ cur_pel_len = cache_ptr->pel_len;
+
+ if ( ( cur_pel_len > 0 ) && ( cur_pel_len >= old_pel_len ) ) {
+
+ /* The number of pinned entries is positive, and it is not
+ * declining. Scream and die.
+ */
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Can't unpin all pinned entries 1.")
+
+ } else if ( ( cur_pel_len == 0 ) && ( old_pel_len == 0 ) ) {
+
+ /* increment the pass count */
+ passes++;
+ }
+
+ if ( passes >= H5C2__MAX_PASSES_ON_FLUSH ) {
+
+ /* we have exceeded the maximum number of passes through the
+ * cache to flush and destroy all entries. Scream and die.
+ */
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Maximum passes on flush exceeded.")
+ }
+
+ if ( cache_ptr->index_len <= 0 ) {
+
+ done = TRUE;
+ HDassert( cache_ptr->index_size == 0 );
+ HDassert( cache_ptr->slist_len == 0 );
+ HDassert( cache_ptr->slist_size == 0 );
+ HDassert( cache_ptr->pel_len == 0 );
+ HDassert( cache_ptr->pel_size == 0 );
+ HDassert( cache_ptr->pl_len == 0 );
+ HDassert( cache_ptr->pl_size == 0 );
+ HDassert( cache_ptr->LRU_list_len == 0 );
+ HDassert( cache_ptr->LRU_list_size == 0 );
+ }
+
+ } /* main while loop */
+
+ HDassert( protected_entries <= cache_ptr->pl_len );
+
+ if ( protected_entries > 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Cache has protected entries.")
+
+ } else if ( cur_pel_len > 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Can't unpin all pinned entries 2.")
+
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_flush_invalidate_cache() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2_flush_single_entry
+ *
+ * Purpose: Flush or clear (and evict if requested) the cache entry
+ * with the specified address and type. If the type is NULL,
+ * any unprotected entry at the specified address will be
+ * flushed (and possibly evicted).
+ *
+ * Attempts to flush a protected entry will result in an
+ * error.
+ *
+ * *first_flush_ptr should be true if only one
+ * flush is contemplated before the next load, or if this
+ * is the first of a sequence of flushes that will be
+ * completed before the next load. *first_flush_ptr is set
+ * to false if a flush actually takes place, and should be
+ * left false until the end of the sequence.
+ *
+ * The primary_dxpl_id is used if *first_flush_ptr is TRUE
+ * on entry, and a flush actually takes place. The
+ * secondary_dxpl_id is used in any subsequent flush where
+ * *first_flush_ptr is FALSE on entry.
+ *
+ * If the H5C2__FLUSH_INVALIDATE_FLAG flag is set, the entry will
+ * be cleared and not flushed -- in the case *first_flush_ptr,
+ * primary_dxpl_id, and secondary_dxpl_id are all irrelevent,
+ * and the call can't be part of a sequence of flushes.
+ *
+ * If the caller knows the address of the TBBT node at
+ * which the target entry resides, it can avoid a lookup
+ * by supplying that address in the tgt_node_ptr parameter.
+ * If this parameter is NULL, the function will do a TBBT
+ * search for the entry instead.
+ *
+ * The function does nothing silently if there is no entry
+ * at the supplied address, or if the entry found has the
+ * wrong type.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * an attempt to flush a protected item.
+ *
+ * Programmer: John Mainzer, 5/5/04
+ *
+ * Modifications:
+ *
+ * JRM -- 7/21/04
+ * Updated function for the addition of the hash table.
+ *
+ * QAK -- 11/26/04
+ * Updated function for the switch from TBBTs to skip lists.
+ *
+ * JRM -- 1/6/05
+ * Updated function to reset the flush_marker field.
+ * Also replace references to H5F_FLUSH_INVALIDATE and
+ * H5F_FLUSH_CLEAR_ONLY with references to
+ * H5C2__FLUSH_INVALIDATE_FLAG and H5C2__FLUSH_CLEAR_ONLY_FLAG
+ * respectively.
+ *
+ * JRM -- 6/24/05
+ * Added code to remove dirty entries from the slist after
+ * they have been flushed. Also added a sanity check that
+ * will scream if we attempt a write when writes are
+ * completely disabled.
+ *
+ * JRM -- 7/5/05
+ * Added code to call the new log_flush callback whenever
+ * a dirty entry is written to disk. Note that the callback
+ * is not called if the H5C2__FLUSH_CLEAR_ONLY_FLAG is set,
+ * as there is no write to file in this case.
+ *
+ * JRM -- 8/21/06
+ * Added code maintaining the flush_in_progress and
+ * destroy_in_progress fields in H5C2_cache_entry_t.
+ *
+ * Also added flush_flags parameter to the call to
+ * type_ptr->flush() so that the flush routine can report
+ * whether the entry has been resized or renamed. Added
+ * code using the flush_flags variable to detect the case
+ * in which the target entry is resized during flush, and
+ * update the caches data structures accordingly.
+ *
+ *
+ * JRM -- 3/29/07
+ * Added sanity checks on the new is_read_only and
+ * ro_ref_count fields.
+ *
+ * JRM -- 6/25/07
+ * Rewrite of function to use the new metadata cache callback
+ * functions. These functions move all metadata file I/O into
+ * the cache proper, which is necessary for metadata journaling.
+ *
+ * To date, the functions of the H5C2__FLUSH_INVALIDATE_FLAG
+ * and H5C2__FLUSH_CLEAR_ONLY_FLAG have not been documented
+ * in H5C2, as these flags were just passed through to the
+ * client callbacks. As much of the callback functionality
+ * is now in the cache, the function of these flags should
+ * be documented explicitly here in H5C2.
+ *
+ * If the H5C2__FLUSH_INVALIDATE_FLAG is set, the entry is to
+ * be written to disk if dirty, and then evicted from the
+ * cache and discarded. As an optimization, the destroyed
+ * is deleted from the slist only on request.
+ *
+ * If the H5C2__FLUSH_CLEAR_ONLY_FLAG is set, the entry is
+ * to be marked clean if it is dirty. Under no circumstances
+ * will it be written to disk.
+ *
+ * If both the H5C2__FLUSH_INVALIDATE_FLAG and the
+ * H5C2__FLUSH_CLEAR_ONLY_FLAG are set, the entry is marked
+ * clean and then evicted from the cache without writing
+ * to disk. If dirty, the entry is removed from the slist
+ * or not as requested.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C2_flush_single_entry(H5F_t * f,
+ hid_t dxpl_id,
+ H5C2_t * cache_ptr,
+ const H5C2_class_t * type_ptr,
+ haddr_t addr,
+ unsigned flags,
+ hbool_t del_entry_from_slist_on_destroy)
+{
+ /* const char * fcn_name = "H5C2_flush_single_entry()"; */
+ hbool_t destroy;
+ hbool_t clear_only;
+ hbool_t was_dirty;
+ herr_t ret_value = SUCCEED; /* Return value */
+ herr_t status;
+ int type_id;
+ unsigned serialize_flags = 0;
+ haddr_t new_addr;
+ size_t new_len;
+ void * new_image_ptr;
+ H5C2_cache_entry_t * entry_ptr = NULL;
+
+ FUNC_ENTER_NOAPI_NOINIT(H5C2_flush_single_entry)
+
+ HDassert( f );
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( type_ptr );
+ HDassert( H5F_addr_defined(addr) );
+
+ destroy = ( (flags & H5C2__FLUSH_INVALIDATE_FLAG) != 0 );
+ clear_only = ( (flags & H5C2__FLUSH_CLEAR_ONLY_FLAG) != 0);
+
+ /* attempt to find the target entry in the hash table */
+ H5C2__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
+
+#if H5C2_DO_SANITY_CHECKS
+ if ( entry_ptr != NULL ) {
+
+ HDassert( ! ( ( destroy ) && ( entry_ptr->is_pinned ) ) );
+
+ if ( entry_ptr->in_slist ) {
+
+ if ( ( ( entry_ptr->flush_marker ) && ( ! entry_ptr->is_dirty ) )
+ ||
+ ( entry_ptr->addr != addr ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "entry in slist failed sanity checks.")
+ }
+ } else {
+
+ if ( ( entry_ptr->is_dirty ) ||
+ ( entry_ptr->flush_marker ) ||
+ ( entry_ptr->addr != addr ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "entry failed sanity checks.")
+ }
+ }
+ }
+#if 1
+ /* this should be useful for debugging from time to time.
+ * lets leave it in for now. -- JRM 12/15/04
+ */
+ else {
+ HDfprintf(stdout,
+ "H5C2_flush_single_entry(): non-existant entry. addr = 0x%lx\n",
+ (long)addr);
+ HDfflush(stdout);
+ }
+#endif
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+ if ( ( entry_ptr != NULL ) && ( entry_ptr->is_protected ) )
+ {
+ /* Attempt to flush a protected entry -- scream and die. */
+ HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, \
+ "Attempt to flush a protected entry.")
+ }
+
+ if ( ( entry_ptr != NULL ) &&
+ ( ( type_ptr == NULL ) || ( type_ptr->id == entry_ptr->type->id ) ) )
+ {
+ /* we have work to do */
+
+ /* We will set flush_in_progress back to FALSE at the end if the
+ * entry still exists at that point.
+ */
+ entry_ptr->flush_in_progress = TRUE;
+
+#ifdef H5_HAVE_PARALLEL
+#ifndef NDEBUG
+
+ /* If MPI based VFD is used, do special parallel I/O sanity checks.
+ * Note that we only do these sanity checks when the clear_only flag
+ * is not set, and the entry to be flushed is dirty. Don't bother
+ * otherwise as no file I/O can result.
+ */
+ if ( ( ! clear_only ) &&
+ ( entry_ptr->is_dirty ) &&
+ ( IS_H5FD_MPI(f) ) ) {
+
+ H5P_genplist_t *dxpl; /* Dataset transfer property list */
+ H5FD_mpio_xfer_t xfer_mode; /* I/O xfer mode property value */
+
+ /* Get the dataset transfer property list */
+ if ( NULL == (dxpl = H5I_object(dxpl_id)) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, \
+ "not a dataset creation property list")
+ }
+
+ /* Get the transfer mode property */
+ if( H5P_get(dxpl, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0 ) {
+
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, \
+ "can't retrieve xfer mode")
+ }
+
+ /* Sanity check transfer mode */
+ /* I'm surprised that this sanity check is working at
+ * present -- will need to look into it at some point.
+ *
+ * JRM -- 7/7/07
+ */
+
+ HDassert( xfer_mode == H5FD_MPIO_COLLECTIVE );
+ }
+
+#endif /* NDEBUG */
+#endif /* H5_HAVE_PARALLEL */
+
+ was_dirty = entry_ptr->is_dirty;
+ type_id = entry_ptr->type->id;
+
+ entry_ptr->flush_marker = FALSE;
+
+ if ( clear_only ) {
+ H5C2__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
+ } else {
+ H5C2__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
+ }
+
+ if ( destroy ) {
+ H5C2__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr)
+ }
+
+ /* Always remove the entry from the hash table on a destroy. On a
+ * flush with destroy, it is cheaper to discard the skip list all at
+ * once rather than remove the entries one by one, so we only delete
+ * from the slist only if requested.
+ *
+ * Note that it is possible that the entry will be renamed during
+ * its call to flush. This will upset H5C2_rename_entry() if we
+ * don't tell it that it doesn't have to worry about updating the
+ * index and SLIST. Use the destroy_in_progress field for this
+ * purpose.
+ */
+ if ( destroy ) {
+
+ entry_ptr->destroy_in_progress = TRUE;
+
+ H5C2__DELETE_FROM_INDEX(cache_ptr, entry_ptr)
+
+ if ( ( entry_ptr->in_slist ) &&
+ ( del_entry_from_slist_on_destroy ) ) {
+
+ H5C2__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr)
+ }
+ }
+
+ /* Update the replacement policy for the flush or eviction. */
+ if ( destroy ) { /* AKA eviction */
+
+#if 0 /* JRM */
+ /* This test code may come in handy -- lets keep it for a while.
+ *
+ * Note that it will cause spurious errors in the serial case
+ * unless we are maintaining the clean and dirty LRU lists.
+ */
+ {
+ if ( entry_ptr->is_dirty )
+ {
+ if ( cache_ptr->dLRU_head_ptr == NULL )
+ HDfprintf(stdout,
+ "%s: cache_ptr->dLRU_head_ptr == NULL.\n",
+ fcn_name);
+
+ if ( cache_ptr->dLRU_tail_ptr == NULL )
+ HDfprintf(stdout,
+ "%s: cache_ptr->dLRU_tail_ptr == NULL.\n",
+ fcn_name);
+
+ if ( cache_ptr->dLRU_list_len <= 0 )
+ HDfprintf(stdout,
+ "%s: cache_ptr->dLRU_list_len <= 0.\n",
+ fcn_name);
+
+ if ( cache_ptr->dLRU_list_size <= 0 )
+ HDfprintf(stdout,
+ "%s: cache_ptr->dLRU_list_size <= 0.\n",
+ fcn_name);
+
+ if ( cache_ptr->dLRU_list_size < entry_ptr->size )
+ HDfprintf(stdout,
+ "%s: cache_ptr->dLRU_list_size < entry_ptr->size.\n",
+ fcn_name);
+
+ if ( ( (cache_ptr->dLRU_list_size) == entry_ptr->size ) &&
+ ( ! ( (cache_ptr->dLRU_list_len) == 1 ) ) )
+ HDfprintf(stdout,
+ "%s: dLRU_list_size == size && dLRU_list_len != 1\n",
+ fcn_name);
+
+ if ( ( entry_ptr->aux_prev == NULL ) &&
+ ( cache_ptr->dLRU_head_ptr != entry_ptr ) )
+ HDfprintf(stdout,
+ "%s: entry_ptr->aux_prev == NULL && dLRU_head_ptr != entry_ptr\n",
+ fcn_name);
+
+ if ( ( entry_ptr->aux_next == NULL ) &&
+ ( cache_ptr->dLRU_tail_ptr != entry_ptr ) )
+ HDfprintf(stdout,
+ "%s: entry_ptr->aux_next == NULL && dLRU_tail_ptr != entry_ptr\n",
+ fcn_name);
+
+ if ( ( cache_ptr->dLRU_list_len == 1 ) &&
+ ( ! ( ( cache_ptr->dLRU_head_ptr == entry_ptr ) &&
+ ( cache_ptr->dLRU_tail_ptr == entry_ptr ) &&
+ ( entry_ptr->aux_next == NULL ) &&
+ ( entry_ptr->aux_prev == NULL ) &&
+ ( cache_ptr->dLRU_list_size == entry_ptr->size )
+ )
+ )
+ )
+ {
+ HDfprintf(stdout,
+ "%s: single entry dlru sanity check fails\n",
+ fcn_name);
+ }
+
+ }
+ else
+ {
+ if ( cache_ptr->cLRU_head_ptr == NULL )
+ HDfprintf(stdout,
+ "%s: cache_ptr->cLRU_head_ptr == NULL.\n",
+ fcn_name);
+
+ if ( cache_ptr->cLRU_tail_ptr == NULL )
+ HDfprintf(stdout,
+ "%s: cache_ptr->cLRU_tail_ptr == NULL.\n",
+ fcn_name);
+
+ if ( cache_ptr->cLRU_list_len <= 0 )
+ HDfprintf(stdout,
+ "%s: cache_ptr->cLRU_list_len <= 0.\n",
+ fcn_name);
+
+ if ( cache_ptr->cLRU_list_size <= 0 )
+ HDfprintf(stdout,
+ "%s: cache_ptr->cLRU_list_size <= 0.\n",
+ fcn_name);
+
+ if ( cache_ptr->cLRU_list_size < entry_ptr->size )
+ HDfprintf(stdout,
+ "%s: cache_ptr->cLRU_list_size < entry_ptr->size.\n",
+ fcn_name);
+
+ if ( ( (cache_ptr->cLRU_list_size) == entry_ptr->size ) &&
+ ( ! ( (cache_ptr->cLRU_list_len) == 1 ) ) )
+ HDfprintf(stdout,
+ "%s: cLRU_list_size == size && cLRU_list_len != 1\n",
+ fcn_name);
+
+ if ( ( entry_ptr->aux_prev == NULL ) &&
+ ( cache_ptr->cLRU_head_ptr != entry_ptr ) )
+ HDfprintf(stdout, "%s: entry_ptr->aux_prev == NULL && cLRU_head_ptr != entry_ptr\n", fcn_name);
+
+ if ( ( entry_ptr->aux_next == NULL ) &&
+ ( cache_ptr->cLRU_tail_ptr != entry_ptr ) )
+ HDfprintf(stdout, "%s: entry_ptr->aux_next == NULL && cLRU_tail_ptr != entry_ptr\n", fcn_name);
+
+ if ( ( cache_ptr->cLRU_list_len == 1 ) &&
+ ( ! ( ( cache_ptr->cLRU_head_ptr == entry_ptr ) &&
+ ( cache_ptr->cLRU_tail_ptr == entry_ptr ) &&
+ ( entry_ptr->aux_next == NULL ) &&
+ ( entry_ptr->aux_prev == NULL ) &&
+ ( cache_ptr->cLRU_list_size == entry_ptr->size )
+ )
+ )
+ )
+ {
+ HDfprintf(stdout,
+ "%s: single entry clru sanity check fails\n",
+ fcn_name);
+ }
+ }
+ }
+#endif /* JRM */
+
+ H5C2__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
+
+ } else {
+ /* We are either doing a flush or a clear.
+ *
+ * A clear and a flush are the same from the point of view of
+ * the replacement policy and the slist. Hence no
+ * differentiation between them.
+ * JRM -- 7/7/07
+ */
+
+ H5C2__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL)
+
+ if ( entry_ptr->in_slist )
+ {
+ H5C2__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr)
+ }
+ }
+
+ /* Clear the dirty flag only, if requested */
+ if ( clear_only )
+ {
+ if ( entry_ptr->is_dirty )
+ {
+#ifndef NDEBUG
+ /* only call the clear_dirty_bits callback if debugging
+ * is enabled.
+ */
+ if ( entry_ptr->type->clear_dirty_bits(addr,
+ entry_ptr->size,
+ (void *)entry_ptr)
+ != SUCCEED )
+ {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "clear_dirty_bits() failed.")
+ }
+#endif /* NDEBUG */
+ }
+ entry_ptr->is_dirty = FALSE;
+
+ }
+ else if ( entry_ptr->is_dirty )
+ {
+ /* The entry is dirty, and we are doing either a flush,
+ * or a flush destroy. In either case, serialize the
+ * entry and write it to disk.
+ *
+ * If the entry is clean, we do nothing at this point.
+ *
+ * If the serialize function changes the size or location
+ * of the entry, and we are not doing a flush destroy, we
+ * will have to touch up the cache to account for the
+ * change(s).
+ */
+
+#if H5C2_DO_SANITY_CHECKS
+ if ( ( cache_ptr->check_write_permitted == NULL ) &&
+ ( ! (cache_ptr->write_permitted) ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Write when writes are always forbidden!?!?!")
+ }
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+ if ( entry_ptr->image_ptr == NULL )
+ {
+ entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size);
+
+ if ( entry_ptr->image_ptr == NULL )
+ {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
+ "memory allocation failed for on disk image buffer.")
+ }
+ }
+
+ if ( entry_ptr->type->serialize(entry_ptr->addr,
+ entry_ptr->size,
+ entry_ptr->image_ptr,
+ (void *)entry_ptr,
+ &serialize_flags,
+ &new_addr,
+ &new_len,
+ &new_image_ptr) != SUCCEED )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to serialize entry")
+ }
+
+ if ( serialize_flags != 0 )
+ {
+ if ( destroy )
+ {
+ /* We have already removed the entry from the
+ * cache's data structures, so no need to update
+ * them for the re-size and/or rename. All we need
+ * to do is update the cache entry so we will have
+ * the correct values when we actually write the
+ * image of the entry to disk.
+ *
+ * Note that if the serialize function changes the
+ * size of the disk image of the entry, it must
+ * deallocate the old image, and allocate a new.
+ */
+
+ switch ( serialize_flags )
+ {
+ case H5C2__SERIALIZE_RESIZED_FLAG:
+ H5C2__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr,
+ entry_ptr,
+ new_len)
+ entry_ptr->size = new_len;
+ entry_ptr->image_ptr = new_image_ptr;
+ break;
+
+ case (H5C2__SERIALIZE_RESIZED_FLAG |
+ H5C2__SERIALIZE_RENAMED_FLAG):
+ H5C2__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr,
+ entry_ptr,
+ new_len)
+ entry_ptr->addr = new_addr;
+ entry_ptr->size = new_len;
+ entry_ptr->image_ptr = new_image_ptr;
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unexpected serialize flag(s)")
+ break;
+ }
+ }
+ else
+ {
+ /* The entry is not being destroyed, and thus has not
+ * been removed from the cache's data structures.
+ *
+ * Thus, in addition to updating the entry for the
+ * re-size and/or rename, we must also update the
+ * cache data structures.
+ */
+
+ switch ( serialize_flags )
+ {
+ case H5C2__SERIALIZE_RESIZED_FLAG:
+ H5C2__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr,
+ entry_ptr,
+ new_len)
+ /* The replacement policy code thinks the
+ * entry is already clean, so modify is_dirty
+ * to meet this expectation.
+ */
+ entry_ptr->is_dirty = FALSE;
+
+ /* update the hash table for the size change*/
+ H5C2__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), \
+ (entry_ptr->size),\
+ (new_len));
+
+ /* The entry can't be protected since we are in
+ * the process of flushing it. Thus we must
+ * update the replacement policy data structures
+ * for the size change. The macro deals with
+ * the pinned case.
+ */
+ H5C2__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, \
+ entry_ptr, \
+ new_len);
+
+ /* The entry can't be in the slist, so no need
+ * to update the slist for the size change.
+ */
+
+ /* finally, set is_dirty to TRUE again, and
+ * update the size and image_ptr.
+ */
+ entry_ptr->is_dirty = TRUE;
+ entry_ptr->size = new_len;
+ entry_ptr->image_ptr = new_image_ptr;
+ break;
+
+ case (H5C2__SERIALIZE_RESIZED_FLAG |
+ H5C2__SERIALIZE_RENAMED_FLAG):
+ H5C2__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr,
+ entry_ptr,
+ new_len)
+ /* The replacement policy code thinks the
+ * entry is already clean, so modify is_dirty
+ * to meet this expectation.
+ */
+ entry_ptr->is_dirty = FALSE;
+
+ /* first update the hash table for the rename */
+ H5C2__DELETE_FROM_INDEX(cache_ptr, entry_ptr)
+ entry_ptr->addr = new_addr;
+ H5C2__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
+
+ /* update the hash table for the size change */
+ H5C2__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), \
+ (entry_ptr->size),\
+ (new_len));
+
+ /* The entry can't be protected since we are in
+ * the process of flushing it. Thus we must
+ * update the replacement policy data structures
+ * for the size change. The macro deals with
+ * the pinned case.
+ */
+ H5C2__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, \
+ entry_ptr, \
+ new_len);
+
+ /* The entry can't be in the slist, so no need
+ * to update the slist for the size change.
+ */
+
+ /* finally, set is_dirty to TRUE again, and
+ * update the size and image_ptr.
+ */
+ entry_ptr->is_dirty = TRUE;
+
+ entry_ptr->size = new_len;
+ entry_ptr->image_ptr = new_image_ptr;
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unexpected serialize flag(s)")
+ break;
+ }
+ }
+ }
+
+ /* now write the image to disk */
+ if ( H5F_block_write(f, type_ptr->mem_type, entry_ptr->addr,
+ entry_ptr->size, dxpl_id,
+ entry_ptr->image_ptr) < 0 )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Can't write image to file.")
+ }
+
+#ifdef H5_HAVE_PARALLEL
+ if ( serialize_flags != 0 ) {
+
+ /* In the parallel case, resizes and renames in
+ * the serialize operation can cause problems.
+ * If they occur, scream and die.
+ *
+ * At present, in the parallel case, the aux_ptr
+ * will only be set if there is more than one
+ * process. Thus we can use this to detect
+ * the parallel case.
+ *
+ * This works for now, but if we start using the
+ * aux_ptr for other purposes, we will have to
+ * change this test accordingly.
+ *
+ * NB: While this test detects entryies that attempt
+ * to resize or rename themselves during a flush
+ * in the parallel case, it will not detect an
+ * entry that dirties, resizes, and/or renames
+ * other entries during its flush.
+ *
+ * From what Quincey tells me, this test is
+ * sufficient for now, as any flush routine that
+ * does the latter will also do the former.
+ *
+ * If that ceases to be the case, further
+ * tests will be necessary.
+ */
+ if ( cache_ptr->aux_ptr != NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "resize/rename in serialize occured in parallel case.")
+
+ }
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+ entry_ptr->is_dirty = FALSE;
+ }
+
+ if ( destroy ) /* time to discard the entry */
+ {
+ /* start by freeing the buffer for the on disk image */
+ if ( entry_ptr->image_ptr != NULL ) {
+
+ entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
+
+ if ( entry_ptr->image_ptr != NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "*image_ptr free failed.")
+ }
+ }
+
+ /* we are about to discard the in core representation --
+ * set the magic field to bad magic so we can detect a
+ * freed entry if we see one.
+ */
+ entry_ptr->magic = H5C2__H5C2_CACHE_ENTRY_T_BAD_MAGIC;
+
+ if ( type_ptr->free_icr(entry_ptr->addr, entry_ptr->size,
+ (void *)entry_ptr) != SUCCEED )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "free_icr callback failed.")
+ }
+ }
+ else /* just flushing or clearing */
+ {
+ entry_ptr->flush_in_progress = FALSE;
+ }
+
+ if ( cache_ptr->log_flush ) {
+
+ /* JRM */ /* may want to update this */
+ status = (cache_ptr->log_flush)(cache_ptr, addr, was_dirty,
+ flags, type_id);
+
+ if ( status < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "log_flush callback failed.")
+ }
+ }
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_flush_single_entry() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2_load_entry
+ *
+ * Purpose: Attempt to load the entry at the specified disk address
+ * and with the specified type into memory. If successful.
+ * return the in memory address of the entry. Return NULL
+ * on failure.
+ *
+ * Note that this function simply loads the entry into
+ * core. It does not insert it into the cache.
+ *
+ * Return: Non-NULL on success / NULL on failure.
+ *
+ * Programmer: John Mainzer, 5/18/04
+ *
+ * Modifications:
+ *
+ * JRM - 7/21/04
+ * Updated function for the addition of the hash table.
+ *
+ * JRM - 6/23/06
+ * Deleted assertion that verified that a newly loaded
+ * entry is clean. Due to a bug fix, this need not be
+ * the case, as our code will attempt to repair errors
+ * on load.
+ *
+ * JRM - 8/21/06
+ * Added initialization for the new flush_in_progress and
+ * destroy in progress fields.
+ *
+ * JRM - 3/29/07
+ * Added initialization for the new is_read_only and
+ * ro_ref_count fields.
+ *
+ * JRM - 6/25/07
+ * Reworked function to use the new client callback
+ * functions that are needed to implement metadata
+ * journaling. Removed skip_file_checks parameter.
+ *
+ * JRM -- 10/12/07
+ * Added initialization for the new magic field.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void *
+H5C2_load_entry(H5F_t * f,
+ hid_t dxpl_id,
+ const H5C2_class_t * type,
+ haddr_t addr,
+ size_t len,
+ hbool_t chk_len,
+ const void * udata_ptr)
+{
+ /* const char * fcn_name = "H5C2_load_entry()"; */
+ hbool_t dirty = FALSE;
+ haddr_t abs_eoa; /* Absolute end of file address */
+ haddr_t rel_eoa; /* Relative end of file address */
+ void * image_ptr = NULL;
+ void * thing = NULL;
+ void * ret_value = NULL;
+ H5C2_cache_entry_t * entry_ptr = NULL;
+ size_t new_len;
+
+ FUNC_ENTER_NOAPI_NOINIT(H5C2_load_entry)
+
+ HDassert( f );
+ HDassert( type );
+ HDassert( H5F_addr_defined(addr) );
+ HDassert( len > 0 );
+ HDassert( type->deserialize );
+ HDassert( ( ! chk_len ) || ( type->image_len ) );
+
+ /* Make certain we don't speculatively read off the end of the file */
+ if ( HADDR_UNDEF == (abs_eoa = H5F_get_eoa(f)) )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, \
+ "unable to determine file size")
+ }
+
+ /* Adjust absolute EOA address to relative EOA address */
+ rel_eoa = abs_eoa - H5F_get_base_addr(f);
+
+ HDassert( rel_eoa > addr );
+
+ if ( (rel_eoa - addr) < (haddr_t)len )
+ {
+ /* either the client is speculatively reading beyond the end of
+ * file, or there is a major screw up. Adjust the len in the
+ * former case, and scream and die in the latter
+ */
+ if ( chk_len ) /* i.e. possible speculative read beyond eof */
+ {
+ /* Quincey: Did I use this correctly? In the example I
+ * stole it from, the from type was hsize_t even though
+ * the source was a haddr_t. I changed the from to match
+ * the source. Is this as it should be?
+ */
+ /* JRM */
+ H5_ASSIGN_OVERFLOW(len, (rel_eoa - addr), \
+ /* from: */ haddr_t, /* to: */ size_t);
+ HDassert( len > 0 );
+ }
+ else
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, \
+ "tried to read beyond eof")
+
+ }
+ }
+
+ image_ptr = H5MM_malloc(len);
+
+ if ( image_ptr == NULL )
+ {
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, \
+ "memory allocation failed for on disk image buffer.")
+ }
+
+ if ( H5F_block_read(f, type->mem_type, addr, len, dxpl_id, image_ptr) < 0 )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't read image")
+ }
+
+ thing = type->deserialize(addr, len, image_ptr, udata_ptr, &dirty);
+
+ if ( thing == NULL )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't deserialize image")
+ }
+
+ if ( chk_len )
+ {
+ if ( type->image_len(thing, &new_len) != SUCCEED )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "image_len() failed")
+ }
+ else if ( new_len > len )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "new_len > len")
+ }
+ else if ( new_len <= 0 )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "new_len <= 0")
+ }
+ else if ( new_len < len )
+ {
+ thing = H5MM_realloc(thing, new_len);
+
+ if ( thing == NULL )
+ {
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, \
+ "thing null after H5MM_realloc().")
+ }
+ else
+ {
+ len = new_len;
+ }
+ }
+ }
+
+ entry_ptr = (H5C2_cache_entry_t *)thing;
+
+ /* In general, an entry should be clean just after it is loaded.
+ *
+ * However, when this code is used in the metadata cache, it is
+ * possible that object headers will be dirty at this point, as
+ * the deserialize function will alter object headers if necessary to
+ * fix an old bug.
+ *
+ * In the following assert:
+ *
+ * HDassert( ( dirty == FALSE ) || ( type->id == 4 ) );
+ *
+ * note that type id 4 is associated with object headers in the metadata
+ * cache.
+ *
+ * When we get to using H5C2 for other purposes, we may wish to
+ * tighten up the assert so that the loophole only applies to the
+ * metadata cache.
+ */
+
+ HDassert( ( dirty == FALSE ) || ( type->id == 4 ) );
+ HDassert( entry_ptr->size < H5C2_MAX_ENTRY_SIZE );
+
+ entry_ptr->magic = H5C2__H5C2_CACHE_ENTRY_T_MAGIC;
+ entry_ptr->addr = addr;
+ entry_ptr->size = len;
+ entry_ptr->image_ptr = image_ptr;
+ entry_ptr->type = type;
+ entry_ptr->is_dirty = dirty;
+ entry_ptr->dirtied = FALSE;
+ entry_ptr->is_protected = FALSE;
+ entry_ptr->is_read_only = FALSE;
+ entry_ptr->ro_ref_count = 0;
+ entry_ptr->is_pinned = FALSE;
+ entry_ptr->in_slist = FALSE;
+ entry_ptr->flush_marker = FALSE;
+#ifdef H5_HAVE_PARALLEL
+ entry_ptr->clear_on_unprotect = FALSE;
+#endif /* H5_HAVE_PARALLEL */
+ entry_ptr->flush_in_progress = FALSE;
+ entry_ptr->destroy_in_progress = FALSE;
+
+ entry_ptr->ht_next = NULL;
+ entry_ptr->ht_prev = NULL;
+
+ entry_ptr->next = NULL;
+ entry_ptr->prev = NULL;
+
+ entry_ptr->aux_next = NULL;
+ entry_ptr->aux_prev = NULL;
+
+ H5C2__RESET_CACHE_ENTRY_STATS(entry_ptr);
+
+ ret_value = thing;
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_load_entry() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2_make_space_in_cache
+ *
+ * Purpose: Attempt to evict cache entries until the index_size
+ * is at least needed_space below max_cache_size.
+ *
+ * In passing, also attempt to bring cLRU_list_size to a
+ * value greater than min_clean_size.
+ *
+ * Depending on circumstances, both of these goals may
+ * be impossible, as in parallel mode, we must avoid generating
+ * a write as part of a read (to avoid deadlock in collective
+ * I/O), and in all cases, it is possible (though hopefully
+ * highly unlikely) that the protected list may exceed the
+ * maximum size of the cache.
+ *
+ * Thus the function simply does its best, returning success
+ * unless an error is encountered.
+ *
+ * The primary_dxpl_id and secondary_dxpl_id parameters
+ * specify the dxpl_ids used on the first write occasioned
+ * by the call (primary_dxpl_id), and on all subsequent
+ * writes (secondary_dxpl_id). This is useful in the metadata
+ * cache, but may not be needed elsewhere. If so, just use the
+ * same dxpl_id for both parameters.
+ *
+ * Observe that this function cannot occasion a read.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/14/04
+ *
+ * Modifications:
+ *
+ * JRM --7/21/04
+ * Minor modifications in support of the addition of a hash
+ * table to facilitate lookups.
+ *
+ * JRM -- 11/22/04
+ * Added the first_flush_ptr parameter, which replaces the
+ * old first_flush local variable. This allows the function
+ * to coordinate on the first flush issue with other functions.
+ *
+ * JRM -- 12/13/04
+ * Added code to skip over epoch markers if present.
+ *
+ * JRM -- 1/3/06
+ * Modified function to work correctly when the the cache
+ * is not full. This case occurs when we need to flush to
+ * min clean size before the cache has filled.
+ *
+ * JRM -- 3/29/07
+ * Added sanity checks using the new is_read_only and
+ * ro_ref_count fields.
+ *
+ * JRM -- 8/24/07
+ * Reworked parameter list and code for the removal of the
+ * secondary dxpl id, and the decision to store the file
+ * pointer f in *cache_ptr.
+ *
+ * JRM -- 10/12/07
+ * Added code to detect the case in which the LRU list has
+ * been modified by a serialize callback, and cause the
+ * function to re-start its scan at the tail of the LRU.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C2_make_space_in_cache(hid_t dxpl_id,
+ H5C2_t * cache_ptr,
+ size_t space_needed,
+ hbool_t write_permitted)
+{
+ /* const char * fcn_name = "H5C2_make_space_in_cache()"; */
+ herr_t ret_value = SUCCEED; /* Return value */
+ herr_t result;
+ int32_t entries_examined = 0;
+ int32_t initial_list_len;
+#if H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+ size_t empty_space;
+#endif /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+ hbool_t prev_is_dirty = FALSE;
+ H5C2_cache_entry_t * entry_ptr;
+ H5C2_cache_entry_t * prev_ptr;
+ H5C2_cache_entry_t * next_ptr;
+
+ FUNC_ENTER_NOAPI_NOINIT(H5C2_make_space_in_cache)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+ if ( write_permitted ) {
+
+ initial_list_len = cache_ptr->LRU_list_len;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+
+ while ( ( (cache_ptr->index_size + space_needed)
+ >
+ cache_ptr->max_cache_size
+ )
+ &&
+ ( entries_examined <= (2 * initial_list_len) )
+ &&
+ ( entry_ptr != NULL )
+ )
+ {
+ HDassert( ! (entry_ptr->is_protected) );
+ HDassert( ! (entry_ptr->is_read_only) );
+ HDassert( (entry_ptr->ro_ref_count) == 0 );
+
+ next_ptr = entry_ptr->next;
+ prev_ptr = entry_ptr->prev;
+
+ if ( prev_ptr != NULL ) {
+
+ prev_is_dirty = prev_ptr->is_dirty;
+ }
+
+ if ( (entry_ptr->type)->id != H5C2__EPOCH_MARKER_TYPE ) {
+
+ if ( entry_ptr->is_dirty ) {
+
+ result = H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ entry_ptr->addr,
+ H5C2__NO_FLAGS_SET,
+ FALSE);
+ } else {
+
+ result =
+ H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ entry_ptr->addr,
+ H5C2__FLUSH_INVALIDATE_FLAG,
+ TRUE);
+ }
+ } else {
+
+ /* Skip epoch markers. Set result to SUCCEED to avoid
+ * triggering the error code below.
+ */
+ result = SUCCEED;
+ }
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to flush entry 1")
+ }
+
+ if ( prev_ptr != NULL ) {
+
+ if ( prev_ptr->magic != H5C2__H5C2_CACHE_ENTRY_T_MAGIC ) {
+
+ /* something horrible has happened to *prev_ptr --
+ * scream and die.
+ */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "*prev_ptr corrupt 1")
+
+ } else if ( ( prev_ptr->is_dirty != prev_is_dirty )
+ ||
+ ( prev_ptr->next != next_ptr )
+ ||
+ ( prev_ptr->is_protected )
+ ||
+ ( prev_ptr->is_pinned ) ) {
+
+ /* something has happened to the LRU -- start over
+ * from the tail.
+ */
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+
+ } else {
+
+ entry_ptr = prev_ptr;
+
+ }
+ } else {
+
+ entry_ptr = NULL;
+
+ }
+
+ entries_examined++;
+
+ }
+
+#if H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+
+ initial_list_len = cache_ptr->dLRU_list_len;
+ entry_ptr = cache_ptr->dLRU_tail_ptr;
+
+ if ( cache_ptr->index_size < cache_ptr->max_cache_size ) {
+
+ empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
+
+ } else {
+
+ empty_space = 0;
+ }
+
+ while ( ( (cache_ptr->cLRU_list_size + empty_space)
+ < cache_ptr->min_clean_size ) &&
+ ( entries_examined <= initial_list_len ) &&
+ ( entry_ptr != NULL )
+ )
+ {
+ HDassert( ! (entry_ptr->is_protected) );
+ HDassert( ! (entry_ptr->is_read_only) );
+ HDassert( (entry_ptr->ro_ref_count) == 0 );
+ HDassert( entry_ptr->is_dirty );
+ HDassert( entry_ptr->in_slist );
+
+ prev_ptr = entry_ptr->aux_prev;
+
+ next_ptr = entry_ptr->aux_next;
+
+ if ( prev_ptr != NULL ) {
+
+ HDassert( prev_ptr->is_dirty );
+ }
+
+ result = H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ entry_ptr->addr,
+ H5C2__NO_FLAGS_SET,
+ FALSE);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to flush entry 2")
+ }
+
+ if ( prev_ptr != NULL ) {
+
+ if (prev_ptr->magic != H5C2__H5C2_CACHE_ENTRY_T_MAGIC) {
+
+ /* something horrible has happened to *prev_ptr --
+ * scream and die.
+ */
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "*prev_ptr corrupt 2")
+
+ } else if ( ( ! ( prev_ptr->is_dirty ) )
+ ||
+ ( prev_ptr->aux_next != next_ptr )
+ ||
+ ( prev_ptr->is_protected )
+ ||
+ ( prev_ptr->is_pinned ) ) {
+
+ /* something has happened to the dirty LRU -- start over
+ * from the tail.
+ */
+
+#if 0 /* This debuging code may be useful in the future -- keep it for now. */
+ if ( ! ( prev_ptr->is_dirty ) ) {
+ HDfprintf(stdout, "%s: ! prev_ptr->is_dirty\n",
+ fcn_name);
+ }
+ if ( prev_ptr->aux_next != next_ptr ) {
+ HDfprintf(stdout, "%s: prev_ptr->next != next_ptr\n",
+ fcn_name);
+ }
+ if ( prev_ptr->is_protected ) {
+ HDfprintf(stdout, "%s: prev_ptr->is_protected\n",
+ fcn_name);
+ }
+ if ( prev_ptr->is_pinned ) {
+ HDfprintf(stdout, "%s:prev_ptr->is_pinned\n",
+ fcn_name);
+ }
+
+ HDfprintf(stdout, "%s: re-starting scan of dirty list\n",
+ fcn_name);
+#endif /* JRM */
+ entry_ptr = cache_ptr->dLRU_tail_ptr;
+
+ } else {
+
+ entry_ptr = prev_ptr;
+
+ }
+ } else {
+
+ entry_ptr = NULL;
+
+ }
+
+ entries_examined++;
+ }
+
+#endif /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+ } else {
+
+ HDassert( H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS );
+
+ initial_list_len = cache_ptr->cLRU_list_len;
+ entry_ptr = cache_ptr->cLRU_tail_ptr;
+
+ while ( ( (cache_ptr->index_size + space_needed)
+ >
+ cache_ptr->max_cache_size
+ )
+ &&
+ ( entries_examined <= initial_list_len )
+ &&
+ ( entry_ptr != NULL )
+ )
+ {
+ HDassert( ! (entry_ptr->is_protected) );
+ HDassert( ! (entry_ptr->is_read_only) );
+ HDassert( (entry_ptr->ro_ref_count) == 0 );
+ HDassert( ! (entry_ptr->is_dirty) );
+
+ prev_ptr = entry_ptr->aux_prev;
+
+ result = H5C2_flush_single_entry(cache_ptr->f,
+ dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ entry_ptr->addr,
+ H5C2__FLUSH_INVALIDATE_FLAG,
+ TRUE);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to flush entry")
+ }
+
+ /* we are scanning the clean LRU, so the serialize function
+ * will not be called on any entry -- thus there is no
+ * concern about the list being modified out from under
+ * this function.
+ */
+
+ entry_ptr = prev_ptr;
+ entries_examined++;
+ }
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_make_space_in_cache() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2_validate_lru_list
+ *
+ * Purpose: Debugging function that scans the LRU list for errors.
+ *
+ * If an error is detected, the function generates a
+ * diagnostic and returns FAIL. If no error is detected,
+ * the function returns SUCCEED.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: John Mainzer, 7/14/05
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+
+static herr_t
+H5C2_validate_lru_list(H5C2_t * cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ int32_t len = 0;
+ size_t size = 0;
+ H5C2_cache_entry_t * entry_ptr = NULL;
+
+ FUNC_ENTER_NOAPI_NOINIT(H5C2_validate_lru_list)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+
+ if ( ( ( cache_ptr->LRU_head_ptr == NULL )
+ ||
+ ( cache_ptr->LRU_tail_ptr == NULL )
+ )
+ &&
+ ( cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr )
+ ) {
+
+ HDfprintf(stdout,"H5C2_validate_lru_list: Check 1 failed.\n");
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
+ }
+
+ if ( ( cache_ptr->LRU_list_len < 0 ) || ( cache_ptr->LRU_list_size < 0 ) ) {
+
+ HDfprintf(stdout,"H5C2_validate_lru_list: Check 2 failed.\n");
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
+ }
+
+ if ( ( cache_ptr->LRU_list_len == 1 )
+ &&
+ ( ( cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr )
+ ||
+ ( cache_ptr->LRU_head_ptr == NULL )
+ ||
+ ( cache_ptr->LRU_head_ptr->size != cache_ptr->LRU_list_size )
+ )
+ ) {
+
+ HDfprintf(stdout,"H5C2_validate_lru_list: Check 3 failed.\n");
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
+ }
+
+ if ( ( cache_ptr->LRU_list_len >= 1 )
+ &&
+ ( ( cache_ptr->LRU_head_ptr == NULL )
+ ||
+ ( cache_ptr->LRU_head_ptr->prev != NULL )
+ ||
+ ( cache_ptr->LRU_tail_ptr == NULL )
+ ||
+ ( cache_ptr->LRU_tail_ptr->next != NULL )
+ )
+ ) {
+
+ HDfprintf(stdout,"H5C2_validate_lru_list: Check 4 failed.\n");
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
+ }
+
+ entry_ptr = cache_ptr->LRU_head_ptr;
+ while ( entry_ptr != NULL )
+ {
+
+ if ( ( entry_ptr != cache_ptr->LRU_head_ptr ) &&
+ ( ( entry_ptr->prev == NULL ) ||
+ ( entry_ptr->prev->next != entry_ptr ) ) ) {
+
+ HDfprintf(stdout,"H5C2_validate_lru_list: Check 5 failed.\n");
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
+ }
+
+ if ( ( entry_ptr != cache_ptr->LRU_tail_ptr ) &&
+ ( ( entry_ptr->next == NULL ) ||
+ ( entry_ptr->next->prev != entry_ptr ) ) ) {
+
+ HDfprintf(stdout,"H5C2_validate_lru_list: Check 6 failed.\n");
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
+ }
+
+ len++;
+ size += entry_ptr->size;
+ entry_ptr = entry_ptr->next;
+ }
+
+ if ( ( cache_ptr->LRU_list_len != len ) ||
+ ( cache_ptr->LRU_list_size != size ) ) {
+
+ HDfprintf(stdout,"H5C2_validate_lru_list: Check 7 failed.\n");
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
+ }
+
+done:
+
+ if ( ret_value != SUCCEED ) {
+
+ HDassert(0);
+ }
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_validate_lru_list() */
+
+#endif /* H5C2_DO_EXTREME_SANITY_CHECKS */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C2_verify_not_in_index
+ *
+ * Purpose: Debugging function that scans the hash table to verify
+ * that the specified instance of H5C2_cache_entry_t is not
+ * present.
+ *
+ * If an error is detected, the function generates a
+ * diagnostic and returns FAIL. If no error is detected,
+ * the function returns SUCCEED.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: John Mainzer, 7/14/05
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#if H5C2_DO_EXTREME_SANITY_CHECKS
+
+static herr_t
+H5C2_verify_not_in_index(H5C2_t * cache_ptr,
+ H5C2_cache_entry_t * entry_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ int32_t i;
+ int32_t depth;
+ H5C2_cache_entry_t * scan_ptr = NULL;
+
+ FUNC_ENTER_NOAPI_NOINIT(H5C2_verify_not_in_index)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( entry_ptr != NULL );
+
+ for ( i = 0; i < H5C2__HASH_TABLE_LEN; i++ )
+ {
+ depth = 0;
+ scan_ptr = cache_ptr->index[i];
+
+ while ( scan_ptr != NULL )
+ {
+ if ( scan_ptr == entry_ptr ) {
+
+ HDfprintf(stdout,
+ "H5C2_verify_not_in_index: entry in index (%d/%d)\n",
+ i, depth);
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Entry already in index.")
+ }
+ depth++;
+ scan_ptr = scan_ptr->ht_next;
+ }
+ }
+
+done:
+
+ if ( ret_value != SUCCEED ) {
+
+ HDassert(0);
+ }
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C2_verify_not_in_index() */
+
+#endif /* H5C2_DO_EXTREME_SANITY_CHECKS */
diff --git a/src/H5C2pkg.h b/src/H5C2pkg.h
new file mode 100644
index 0000000..a1753b4
--- /dev/null
+++ b/src/H5C2pkg.h
@@ -0,0 +1,949 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Programmer: John Mainzer -- 10/12/04
+ *
+ * Purpose: This file contains declarations which are normally visible
+ * only within the H5C2 package (just H5C2.c at present).
+ *
+ * Source files outside the H5C2 package should include
+ * H5C2private.h instead.
+ *
+ * The one exception to this rule is test/cache2.c. The test
+ * code is easier to write if it can look at the cache's
+ * internal data structures. Indeed, this is the main
+ * reason why this file was created.
+ */
+
+#ifndef H5C2_PACKAGE
+#error "Do not include this file outside the H5C2 package!"
+#endif
+
+#ifndef _H5C2pkg_H
+#define _H5C2pkg_H
+
+
+/* Get package's private header */
+#include "H5C2private.h"
+
+
+/* Get needed headers */
+#include "H5SLprivate.h" /* Skip lists */
+
+/* With the introduction of the fractal heap, it is now possible for
+ * entries to be dirtied, resized, and/or renamed in the flush callbacks.
+ * As a result, on flushes, it may be necessary to make multiple passes
+ * through the slist before it is empty. The H5C2__MAX_PASSES_ON_FLUSH
+ * #define is used to set an upper limit on the number of passes.
+ * The current value was obtained via personal communication with
+ * Quincey. I have applied a fudge factor of 2.
+ */
+
+#define H5C2__MAX_PASSES_ON_FLUSH 4
+
+
+#define H5C2__HASH_TABLE_LEN (64 * 1024) /* must be a power of 2 */
+
+
+/****************************************************************************
+ *
+ * structure H5C2_t
+ *
+ * Catchall structure for all variables specific to an instance of the cache.
+ *
+ * While the individual fields of the structure are discussed below, the
+ * following overview may be helpful.
+ *
+ * Entries in the cache are stored in an instance of H5TB_TREE, indexed on
+ * the entry's disk address. While the H5TB_TREE is less efficient than
+ * hash table, it keeps the entries in address sorted order. As flushes
+ * in parallel mode are more efficient if they are issued in increasing
+ * address order, this is a significant benefit. Also the H5TB_TREE code
+ * was readily available, which reduced development time.
+ *
+ * While the cache was designed with multiple replacement policies in mind,
+ * at present only a modified form of LRU is supported.
+ *
+ * JRM - 4/26/04
+ *
+ * Profiling has indicated that searches in the instance of H5TB_TREE are
+ * too expensive. To deal with this issue, I have augmented the cache
+ * with a hash table in which all entries will be stored. Given the
+ * advantages of flushing entries in increasing address order, the TBBT
+ * is retained, but only dirty entries are stored in it. At least for
+ * now, we will leave entries in the TBBT after they are flushed.
+ *
+ * Note that index_size and index_len now refer to the total size of
+ * and number of entries in the hash table.
+ *
+ * JRM - 7/19/04
+ *
+ * The TBBT has since been replaced with a skip list. This change
+ * greatly predates this note.
+ *
+ * JRM - 9/26/05
+ *
+ * magic: Unsigned 32 bit integer always set to H5C2__H5C2_T_MAGIC.
+ * This field is used to validate pointers to instances of
+ * H5C2_t.
+ *
+ * f: Pointer to the instance of H5F_t associated with this
+ * instance of the metadata cache. This field is set at
+ * create, and then used until the file is closed (at which
+ * point, the cache will be shut down as well).
+ *
+ * flush_in_progress: Boolean flag indicating whether a flush is in
+ * progress.
+ *
+ * trace_file_ptr: File pointer pointing to the trace file, which is used
+ * to record cache operations for use in simulations and design
+ * studies. This field will usually be NULL, indicating that
+ * no trace file should be recorded.
+ *
+ * Since much of the code supporting the parallel metadata
+ * cache is in H5AC, we don't write the trace file from
+ * H5C2. Instead, H5AC reads the trace_file_ptr as needed.
+ *
+ * When we get to using H5C2 in other places, we may add
+ * code to write trace file data at the H5C2 level as well.
+ *
+ * aux_ptr: Pointer to void used to allow wrapper code to associate
+ * its data with an instance of H5C2_t. The H5C2 cache code
+ * sets this field to NULL, and otherwise leaves it alone.
+ *
+ * max_type_id: Integer field containing the maximum type id number assigned
+ * to a type of entry in the cache. All type ids from 0 to
+ * max_type_id inclusive must be defined. The names of the
+ * types are stored in the type_name_table discussed below, and
+ * indexed by the ids.
+ *
+ * type_name_table_ptr: Pointer to an array of pointer to char of length
+ * max_type_id + 1. The strings pointed to by the entries
+ * in the array are the names of the entry types associated
+ * with the indexing type IDs.
+ *
+ * max_cache_size: Nominal maximum number of bytes that may be stored in the
+ * cache. This value should be viewed as a soft limit, as the
+ * cache can exceed this value under the following circumstances:
+ *
+ * a) All entries in the cache are protected, and the cache is
+ * asked to insert a new entry. In this case the new entry
+ * will be created. If this causes the cache to exceed
+ * max_cache_size, it will do so. The cache will attempt
+ * to reduce its size as entries are unprotected.
+ *
+ * b) When running in parallel mode, the cache may not be
+ * permitted to flush a dirty entry in response to a read.
+ * If there are no clean entries available to evict, the
+ * cache will exceed its maximum size. Again the cache
+ * will attempt to reduce its size to the max_cache_size
+ * limit on the next cache write.
+ *
+ * c) When an entry increases in size, the cache may exceed
+ * the max_cache_size limit until the next time the cache
+ * attempts to load or insert an entry.
+ *
+ * min_clean_size: Nominal minimum number of clean bytes in the cache.
+ * The cache attempts to maintain this number of bytes of
+ * clean data so as to avoid case b) above. Again, this is
+ * a soft limit.
+ *
+ *
+ * In addition to the call back functions required for each entry, the
+ * cache requires the following call back functions for this instance of
+ * the cache as a whole:
+ *
+ * check_write_permitted: In certain applications, the cache may not
+ * be allowed to write to disk at certain time. If specified,
+ * the check_write_permitted function is used to determine if
+ * a write is permissible at any given point in time.
+ *
+ * If no such function is specified (i.e. this field is NULL),
+ * the cache uses the following write_permitted field to
+ * determine whether writes are permitted.
+ *
+ * write_permitted: If check_write_permitted is NULL, this boolean flag
+ * indicates whether writes are permitted.
+ *
+ * log_flush: If provided, this function is called whenever a dirty
+ * entry is flushed to disk.
+ *
+ *
+ * In cases where memory is plentiful, and performance is an issue, it
+ * is useful to disable all cache evictions, and thereby postpone metadata
+ * writes. The following field is used to implement this.
+ *
+ * evictions_enabled: Boolean flag that is initialized to TRUE. When
+ * this flag is set to FALSE, the metadata cache will not
+ * attempt to evict entries to make space for newly protected
+ * entries, and instead the will grow without limit.
+ *
+ * Needless to say, this feature must be used with care.
+ *
+ *
+ * The cache requires an index to facilitate searching for entries. The
+ * following fields support that index.
+ *
+ * index_len: Number of entries currently in the hash table used to index
+ * the cache.
+ *
+ * index_size: Number of bytes of cache entries currently stored in the
+ * hash table used to index the cache.
+ *
+ * This value should not be mistaken for footprint of the
+ * cache in memory. The average cache entry is small, and
+ * the cache has a considerable overhead. Multiplying the
+ * index_size by two should yield a conservative estimate
+ * of the cache's memory footprint.
+ *
+ * index: Array of pointer to H5C2_cache_entry_t of size
+ * H5C2__HASH_TABLE_LEN. At present, this value is a power
+ * of two, not the usual prime number.
+ *
+ * I hope that the variable size of cache elements, the large
+ * hash table size, and the way in which HDF5 allocates space
+ * will combine to avoid problems with periodicity. If so, we
+ * can use a trivial hash function (a bit-and and a 3 bit left
+ * shift) with some small savings.
+ *
+ * If not, it will become evident in the statistics. Changing
+ * to the usual prime number length hash table will require
+ * changing the H5C2__HASH_FCN macro and the deletion of the
+ * H5C2__HASH_MASK #define. No other changes should be required.
+ *
+ *
+ * When we flush the cache, we need to write entries out in increasing
+ * address order. An instance of a skip list is used to store dirty entries in
+ * sorted order. Whether it is cheaper to sort the dirty entries as needed,
+ * or to maintain the list is an open question. At a guess, it depends
+ * on how frequently the cache is flushed. We will see how it goes.
+ *
+ * For now at least, I will not remove dirty entries from the list as they
+ * are flushed. (this has been changed -- dirty entries are now removed from
+ * the skip list as they are flushed. JRM - 10/25/05)
+ *
+ * slist_len: Number of entries currently in the skip list
+ * used to maintain a sorted list of dirty entries in the
+ * cache.
+ *
+ * slist_size: Number of bytes of cache entries currently stored in the
+ * skip list used to maintain a sorted list of
+ * dirty entries in the cache.
+ *
+ * slist_ptr: pointer to the instance of H5SL_t used maintain a sorted
+ * list of dirty entries in the cache. This sorted list has
+ * two uses:
+ *
+ * a) It allows us to flush dirty entries in increasing address
+ * order, which results in significant savings.
+ *
+ * b) It facilitates checking for adjacent dirty entries when
+ * attempting to evict entries from the cache. While we
+ * don't use this at present, I hope that this will allow
+ * some optimizations when I get to it.
+ *
+ * With the addition of the fractal heap, the cache must now deal with
+ * the case in which entries may be dirtied, renamed, or have their sizes
+ * changed during a flush. To allow sanity checks in this situation, the
+ * following two fields have been added. They are only compiled in when
+ * H5C2_DO_SANITY_CHECKS is TRUE.
+ *
+ * slist_len_increase: Number of entries that have been added to the
+ * slist since the last time this field was set to zero.
+ *
+ * slist_size_increase: Total size of all entries that have been added
+ * to the slist since the last time this field was set to
+ * zero.
+ *
+ *
+ * When a cache entry is protected, it must be removed from the LRU
+ * list(s) as it cannot be either flushed or evicted until it is unprotected.
+ * The following fields are used to implement the protected list (pl).
+ *
+ * pl_len: Number of entries currently residing on the protected list.
+ *
+ * pl_size: Number of bytes of cache entries currently residing on the
+ * protected list.
+ *
+ * pl_head_ptr: Pointer to the head of the doubly linked list of protected
+ * entries. Note that cache entries on this list are linked
+ * by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * pl_tail_ptr: Pointer to the tail of the doubly linked list of protected
+ * entries. Note that cache entries on this list are linked
+ * by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ *
+ * For very frequently used entries, the protect/unprotect overhead can
+ * become burdensome. To avoid this overhead, I have modified the cache
+ * to allow entries to be "pinned". A pinned entry is similar to a
+ * protected entry, in the sense that it cannot be evicted, and that
+ * the entry can be modified at any time.
+ *
+ * Pinning an entry has the following implications:
+ *
+ * 1) A pinned entry cannot be evicted. Thus unprotected
+ * pinned entries reside in the pinned entry list, instead
+ * of the LRU list(s) (or other lists maintained by the current
+ * replacement policy code).
+ *
+ * 2) A pinned entry can be accessed or modified at any time.
+ * Therefore, the cache must check with the entry owner
+ * before flushing it. If permission is denied, the
+ * cache just skips the entry in the flush.
+ *
+ * 3) A pinned entry can be marked as dirty (and possibly
+ * change size) while it is unprotected.
+ *
+ * 4) The flush-destroy code must allow pinned entries to
+ * be unpinned (and possibly unprotected) during the
+ * flush.
+ *
+ * Since pinned entries cannot be evicted, they must be kept on a pinned
+ * entry list, instead of being entrusted to the replacement policy code.
+ *
+ * Maintaining the pinned entry list requires the following fields:
+ *
+ * pel_len: Number of entries currently residing on the pinned
+ * entry list.
+ *
+ * pel_size: Number of bytes of cache entries currently residing on
+ * the pinned entry list.
+ *
+ * pel_head_ptr: Pointer to the head of the doubly linked list of pinned
+ * but not protected entries. Note that cache entries on
+ * this list are linked by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * pel_tail_ptr: Pointer to the tail of the doubly linked list of pinned
+ * but not protected entries. Note that cache entries on
+ * this list are linked by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ *
+ * The cache must have a replacement policy, and the fields supporting this
+ * policy must be accessible from this structure.
+ *
+ * While there has been interest in several replacement policies for
+ * this cache, the initial development schedule is tight. Thus I have
+ * elected to support only a modified LRU policy for the first cut.
+ *
+ * To further simplify matters, I have simply included the fields needed
+ * by the modified LRU in this structure. When and if we add support for
+ * other policies, it will probably be easiest to just add the necessary
+ * fields to this structure as well -- we only create one instance of this
+ * structure per file, so the overhead is not excessive.
+ *
+ *
+ * Fields supporting the modified LRU policy:
+ *
+ * See most any OS text for a discussion of the LRU replacement policy.
+ *
+ * When operating in parallel mode, we must ensure that a read does not
+ * cause a write. If it does, the process will hang, as the write will
+ * be collective and the other processes will not know to participate.
+ *
+ * To deal with this issue, I have modified the usual LRU policy by adding
+ * clean and dirty LRU lists to the usual LRU list.
+ *
+ * The clean LRU list is simply the regular LRU list with all dirty cache
+ * entries removed.
+ *
+ * Similarly, the dirty LRU list is the regular LRU list with all the clean
+ * cache entries removed.
+ *
+ * When reading in parallel mode, we evict from the clean LRU list only.
+ * This implies that we must try to ensure that the clean LRU list is
+ * reasonably well stocked at all times.
+ *
+ * We attempt to do this by trying to flush enough entries on each write
+ * to keep the cLRU_list_size >= min_clean_size.
+ *
+ * Even if we start with a completely clean cache, a sequence of protects
+ * without unprotects can empty the clean LRU list. In this case, the
+ * cache must grow temporarily. At the next write, we will attempt to
+ * evict enough entries to reduce index_size to less than max_cache_size.
+ * While this will usually be possible, all bets are off if enough entries
+ * are protected.
+ *
+ * Discussions of the individual fields used by the modified LRU replacement
+ * policy follow:
+ *
+ * LRU_list_len: Number of cache entries currently on the LRU list.
+ *
+ * Observe that LRU_list_len + pl_len must always equal
+ * index_len.
+ *
+ * LRU_list_size: Number of bytes of cache entries currently residing on the
+ * LRU list.
+ *
+ * Observe that LRU_list_size + pl_size must always equal
+ * index_size.
+ *
+ * LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache
+ * entries on this list are linked by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * LRU_tail_ptr: Pointer to the tail of the doubly linked LRU list. Cache
+ * entries on this list are linked by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * cLRU_list_len: Number of cache entries currently on the clean LRU list.
+ *
+ * Observe that cLRU_list_len + dLRU_list_len must always
+ * equal LRU_list_len.
+ *
+ * cLRU_list_size: Number of bytes of cache entries currently residing on
+ * the clean LRU list.
+ *
+ * Observe that cLRU_list_size + dLRU_list_size must always
+ * equal LRU_list_size.
+ *
+ * cLRU_head_ptr: Pointer to the head of the doubly linked clean LRU list.
+ * Cache entries on this list are linked by their aux_next and
+ * aux_prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * cLRU_tail_ptr: Pointer to the tail of the doubly linked clean LRU list.
+ * Cache entries on this list are linked by their aux_next and
+ * aux_prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * dLRU_list_len: Number of cache entries currently on the dirty LRU list.
+ *
+ * Observe that cLRU_list_len + dLRU_list_len must always
+ * equal LRU_list_len.
+ *
+ * dLRU_list_size: Number of cache entries currently on the dirty LRU list.
+ *
+ * Observe that cLRU_list_len + dLRU_list_len must always
+ * equal LRU_list_len.
+ *
+ * dLRU_head_ptr: Pointer to the head of the doubly linked dirty LRU list.
+ * Cache entries on this list are linked by their aux_next and
+ * aux_prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * dLRU_tail_ptr: Pointer to the tail of the doubly linked dirty LRU list.
+ * Cache entries on this list are linked by their aux_next and
+ * aux_prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ *
+ * Automatic cache size adjustment:
+ *
+ * While the default cache size is adequate for most cases, we can run into
+ * cases where the default is too small. Ideally, we will let the user
+ * adjust the cache size as required. However, this is not possible in all
+ * cases. Thus I have added automatic cache size adjustment code.
+ *
+ * The configuration for the automatic cache size adjustment is stored in
+ * the structure described below:
+ *
+ * size_increase_possible: Depending on the configuration data given
+ * in the resize_ctl field, it may or may not be possible
+ * to increase the size of the cache. Rather than test for
+ * all the ways this can happen, we simply set this flag when
+ * we receive a new configuration.
+ *
+ * size_decrease_possible: Depending on the configuration data given
+ * in the resize_ctl field, it may or may not be possible
+ * to decrease the size of the cache. Rather than test for
+ * all the ways this can happen, we simply set this flag when
+ * we receive a new configuration.
+ *
+ * cache_full: Boolean flag used to keep track of whether the cache is
+ * full, so we can refrain from increasing the size of a
+ * cache which hasn't used up the space alotted to it.
+ *
+ * The field is initialized to FALSE, and then set to TRUE
+ * whenever we attempt to make space in the cache.
+ *
+ * resize_enabled: This is another convenience flag which is set whenever
+ * a new set of values for resize_ctl are provided. Very
+ * simply,
+ *
+ * resize_enabled = size_increase_possible ||
+ * size_decrease_possible;
+ *
+ * size_decreased: Boolean flag set to TRUE whenever the maximun cache
+ * size is decreased. The flag triggers a call to
+ * H5C2_make_space_in_cache() on the next call to H5C2_protect().
+ *
+ * resize_ctl: Instance of H5C2_auto_size_ctl_t containing configuration
+ * data for automatic cache resizing.
+ *
+ * epoch_markers_active: Integer field containing the number of epoch
+ * markers currently in use in the LRU list. This value
+ * must be in the range [0, H5C2__MAX_EPOCH_MARKERS - 1].
+ *
+ * epoch_marker_active: Array of boolean of length H5C2__MAX_EPOCH_MARKERS.
+ * This array is used to track which epoch markers are currently
+ * in use.
+ *
+ * epoch_marker_ringbuf: Array of int of length H5C2__MAX_EPOCH_MARKERS + 1.
+ *
+ * To manage the epoch marker cache entries, it is necessary
+ * to track their order in the LRU list. This is done with
+ * epoch_marker_ringbuf. When markers are inserted at the
+ * head of the LRU list, the index of the marker in the
+ * epoch_markers array is inserted at the tail of the ring
+ * buffer. When it becomes the epoch_marker_active'th marker
+ * in the LRU list, it will have worked its way to the head
+ * of the ring buffer as well. This allows us to remove it
+ * without scanning the LRU list if such is required.
+ *
+ * epoch_marker_ringbuf_first: Integer field containing the index of the
+ * first entry in the ring buffer.
+ *
+ * epoch_marker_ringbuf_last: Integer field containing the index of the
+ * last entry in the ring buffer.
+ *
+ * epoch_marker_ringbuf_size: Integer field containing the number of entries
+ * in the ring buffer.
+ *
+ * epoch_markers: Array of instances of H5C2_cache_entry_t of length
+ * H5C2__MAX_EPOCH_MARKERS. The entries are used as markers
+ * in the LRU list to identify cache entries that haven't
+ * been accessed for some (small) specified number of
+ * epochs. These entries (if any) can then be evicted and
+ * the cache size reduced -- ideally without evicting any
+ * of the current working set. Needless to say, the epoch
+ * length and the number of epochs before an unused entry
+ * must be chosen so that all, or almost all, the working
+ * set will be accessed before the limit.
+ *
+ * Epoch markers only appear in the LRU list, never in
+ * the index or slist. While they are of type
+ * H5C2__EPOCH_MARKER_TYPE, and have associated class
+ * functions, these functions should never be called.
+ *
+ * The addr fields of these instances of H5C2_cache_entry_t
+ * are set to the index of the instance in the epoch_markers
+ * array, the size is set to 0, and the type field points
+ * to the constant structure epoch_marker_class defined
+ * in H5C2.c. The next and prev fields are used as usual
+ * to link the entry into the LRU list.
+ *
+ * All other fields are unused.
+ *
+ *
+ * Cache hit rate collection fields:
+ *
+ * We supply the current cache hit rate on request, so we must keep a
+ * simple cache hit rate computation regardless of whether statistics
+ * collection is enabled. The following fields support this capability.
+ *
+ * cache_hits: Number of cache hits since the last time the cache hit
+ * rate statistics were reset. Note that when automatic cache
+ * re-sizing is enabled, this field will be reset every automatic
+ * resize epoch.
+ *
+ * cache_accesses: Number of times the cache has been accessed while
+ * since the last since the last time the cache hit rate statistics
+ * were reset. Note that when automatic cache re-sizing is enabled,
+ * this field will be reset every automatic resize epoch.
+ *
+ *
+ * Statistics collection fields:
+ *
+ * When enabled, these fields are used to collect statistics as described
+ * below. The first set are collected only when H5C2_COLLECT_CACHE_STATS
+ * is true.
+ *
+ * hits: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type id
+ * equal to the array index has been in cache when requested in
+ * the current epoch.
+ *
+ * misses: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type id
+ * equal to the array index has not been in cache when
+ * requested in the current epoch.
+ *
+ * write_protects: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry with
+ * type id equal to the array index has been write protected
+ * in the current epoch.
+ *
+ * Observe that (hits + misses) = (write_protects + read_protects).
+ *
+ * read_protects: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry with
+ * type id equal to the array index has been read protected in
+ * the current epoch.
+ *
+ * Observe that (hits + misses) = (write_protects + read_protects).
+ *
+ * max_read_protects: Array of int32 of length H5C2__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to maximum number of simultaneous read
+ * protects on any entry with type id equal to the array index
+ * in the current epoch.
+ *
+ * insertions: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been inserted into the
+ * cache in the current epoch.
+ *
+ * pinned_insertions: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
+ * with type id equal to the array index has been inserted
+ * pinned into the cache in the current epoch.
+ *
+ * clears: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been cleared in the current
+ * epoch.
+ *
+ * flushes: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type id
+ * equal to the array index has been written to disk in the
+ * current epoch.
+ *
+ * evictions: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type id
+ * equal to the array index has been evicted from the cache in
+ * the current epoch.
+ *
+ * renames: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been renamed in the current
+ * epoch.
+ *
+ * entry_flush_renames: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
+ * with type id equal to the array index has been renamed
+ * during its flush callback in the current epoch.
+ *
+ * cache_flush_renames: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
+ * with type id equal to the array index has been renamed
+ * during a cache flush in the current epoch.
+ *
+ * pins: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been pinned in the current
+ * epoch.
+ *
+ * unpins: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been unpinned in the current
+ * epoch.
+ *
+ * dirty_pins: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been marked dirty while pinned
+ * in the current epoch.
+ *
+ * pinned_flushes: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry
+ * with type id equal to the array index has been flushed while
+ * pinned in the current epoch.
+ *
+ * pinned_cleared: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry
+ * with type id equal to the array index has been cleared while
+ * pinned in the current epoch.
+ *
+ * size_increases: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
+ * with type id equal to the array index has increased in
+ * size in the current epoch.
+ *
+ * size_decreases: Array of int64 of length H5C2__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
+ * with type id equal to the array index has decreased in
+ * size in the current epoch.
+ *
+ * entry_flush_size_changes: Array of int64 of length
+ * H5C2__MAX_NUM_TYPE_IDS + 1. The cells are used to record
+ * the number of times an entry with type id equal to the
+ * array index has changed size while in its flush callback.
+ *
+ * cache_flush_size_changes: Array of int64 of length
+ * H5C2__MAX_NUM_TYPE_IDS + 1. The cells are used to record
+ * the number of times an entry with type id equal to the
+ * array index has changed size during a cache flush
+ *
+ * total_ht_insertions: Number of times entries have been inserted into the
+ * hash table in the current epoch.
+ *
+ * total_ht_deletions: Number of times entries have been deleted from the
+ * hash table in the current epoch.
+ *
+ * successful_ht_searches: int64 containing the total number of successful
+ * searches of the hash table in the current epoch.
+ *
+ * total_successful_ht_search_depth: int64 containing the total number of
+ * entries other than the targets examined in successful
+ * searches of the hash table in the current epoch.
+ *
+ * failed_ht_searches: int64 containing the total number of unsuccessful
+ * searches of the hash table in the current epoch.
+ *
+ * total_failed_ht_search_depth: int64 containing the total number of
+ * entries examined in unsuccessful searches of the hash
+ * table in the current epoch.
+ *
+ * max_index_len: Largest value attained by the index_len field in the
+ * current epoch.
+ *
+ * max_index_size: Largest value attained by the index_size field in the
+ * current epoch.
+ *
+ * max_slist_len: Largest value attained by the slist_len field in the
+ * current epoch.
+ *
+ * max_slist_size: Largest value attained by the slist_size field in the
+ * current epoch.
+ *
+ * max_pl_len: Largest value attained by the pl_len field in the
+ * current epoch.
+ *
+ * max_pl_size: Largest value attained by the pl_size field in the
+ * current epoch.
+ *
+ * max_pel_len: Largest value attained by the pel_len field in the
+ * current epoch.
+ *
+ * max_pel_size: Largest value attained by the pel_size field in the
+ * current epoch.
+ *
+ * The remaining stats are collected only when both H5C2_COLLECT_CACHE_STATS
+ * and H5C2_COLLECT_CACHE_ENTRY_STATS are true.
+ *
+ * max_accesses: Array of int32 of length H5C2__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the maximum number of times any single
+ * entry with type id equal to the array index has been
+ * accessed in the current epoch.
+ *
+ * min_accesses: Array of int32 of length H5C2__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the minimum number of times any single
+ * entry with type id equal to the array index has been
+ * accessed in the current epoch.
+ *
+ * max_clears: Array of int32 of length H5C2__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the maximum number of times any single
+ * entry with type id equal to the array index has been cleared
+ * in the current epoch.
+ *
+ * max_flushes: Array of int32 of length H5C2__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the maximum number of times any single
+ * entry with type id equal to the array index has been
+ * flushed in the current epoch.
+ *
+ * max_size: Array of size_t of length H5C2__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the maximum size of any single entry
+ * with type id equal to the array index that has resided in
+ * the cache in the current epoch.
+ *
+ * max_pins: Array of size_t of length H5C2__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the maximum number of times that any single
+ * entry with type id equal to the array index that has been
+ * marked as pinned in the cache in the current epoch.
+ *
+ *
+ * Fields supporting testing:
+ *
+ * For test purposes, it is useful to turn off some asserts and sanity
+ * checks. The following flags support this.
+ *
+ * skip_file_checks: Boolean flag used to skip sanity checks on file
+ * parameters passed to the cache. In the test bed, there
+ * is no reason to have a file open, as the cache proper
+ * just passes these parameters through without using them.
+ *
+ * When this flag is set, all sanity checks on the file
+ * parameters are skipped. The field defaults to FALSE.
+ *
+ * skip_dxpl_id_checks: Boolean flag used to skip sanity checks on the
+ * dxpl_id parameters passed to the cache. These are not
+ * used directly by the cache, so skipping the checks
+ * simplifies the test bed.
+ *
+ * When this flag is set, all sanity checks on the dxpl_id
+ * parameters are skipped. The field defaults to FALSE.
+ *
+ * prefix Array of char used to prefix debugging output. The
+ * field is intended to allow marking of output of with
+ * the processes mpi rank.
+ *
+ ****************************************************************************/
+
+#define H5C2__H5C2_T_MAGIC 0x005CAC0F
+#define H5C2__MAX_NUM_TYPE_IDS 16
+#define H5C2__PREFIX_LEN 32
+
+struct H5C2_t
+{
+ uint32_t magic;
+
+ H5F_t * f;
+
+ hbool_t flush_in_progress;
+
+ FILE * trace_file_ptr;
+
+ void * aux_ptr;
+
+ int32_t max_type_id;
+ const char * (* type_name_table_ptr);
+
+ size_t max_cache_size;
+ size_t min_clean_size;
+
+ H5C2_write_permitted_func_t check_write_permitted;
+ hbool_t write_permitted;
+
+ H5C2_log_flush_func_t log_flush;
+
+ hbool_t evictions_enabled;
+
+ int32_t index_len;
+ size_t index_size;
+ H5C2_cache_entry_t * (index[H5C2__HASH_TABLE_LEN]);
+
+
+ int32_t slist_len;
+ size_t slist_size;
+ H5SL_t * slist_ptr;
+#if H5C2_DO_SANITY_CHECKS
+ int64_t slist_len_increase;
+ int64_t slist_size_increase;
+#endif /* H5C2_DO_SANITY_CHECKS */
+
+ int32_t pl_len;
+ size_t pl_size;
+ H5C2_cache_entry_t * pl_head_ptr;
+ H5C2_cache_entry_t * pl_tail_ptr;
+
+ int32_t pel_len;
+ size_t pel_size;
+ H5C2_cache_entry_t * pel_head_ptr;
+ H5C2_cache_entry_t * pel_tail_ptr;
+
+ int32_t LRU_list_len;
+ size_t LRU_list_size;
+ H5C2_cache_entry_t * LRU_head_ptr;
+ H5C2_cache_entry_t * LRU_tail_ptr;
+
+ int32_t cLRU_list_len;
+ size_t cLRU_list_size;
+ H5C2_cache_entry_t * cLRU_head_ptr;
+ H5C2_cache_entry_t * cLRU_tail_ptr;
+
+ int32_t dLRU_list_len;
+ size_t dLRU_list_size;
+ H5C2_cache_entry_t * dLRU_head_ptr;
+ H5C2_cache_entry_t * dLRU_tail_ptr;
+
+ hbool_t size_increase_possible;
+ hbool_t size_decrease_possible;
+ hbool_t resize_enabled;
+ hbool_t cache_full;
+ hbool_t size_decreased;
+ H5C2_auto_size_ctl_t resize_ctl;
+
+ int32_t epoch_markers_active;
+ hbool_t epoch_marker_active[H5C2__MAX_EPOCH_MARKERS];
+ int32_t epoch_marker_ringbuf[H5C2__MAX_EPOCH_MARKERS+1];
+ int32_t epoch_marker_ringbuf_first;
+ int32_t epoch_marker_ringbuf_last;
+ int32_t epoch_marker_ringbuf_size;
+ H5C2_cache_entry_t epoch_markers[H5C2__MAX_EPOCH_MARKERS];
+
+ int64_t cache_hits;
+ int64_t cache_accesses;
+
+#if H5C2_COLLECT_CACHE_STATS
+
+ /* stats fields */
+ int64_t hits[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t misses[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t write_protects[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t read_protects[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int32_t max_read_protects[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t insertions[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t pinned_insertions[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t clears[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t flushes[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t evictions[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t renames[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t entry_flush_renames[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t cache_flush_renames[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t pins[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t unpins[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t dirty_pins[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t pinned_flushes[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t pinned_clears[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t size_increases[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t size_decreases[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t entry_flush_size_changes
+ [H5C2__MAX_NUM_TYPE_IDS + 1];
+ int64_t cache_flush_size_changes
+ [H5C2__MAX_NUM_TYPE_IDS + 1];
+
+ int64_t total_ht_insertions;
+ int64_t total_ht_deletions;
+ int64_t successful_ht_searches;
+ int64_t total_successful_ht_search_depth;
+ int64_t failed_ht_searches;
+ int64_t total_failed_ht_search_depth;
+
+ int32_t max_index_len;
+ size_t max_index_size;
+
+ int32_t max_slist_len;
+ size_t max_slist_size;
+
+ int32_t max_pl_len;
+ size_t max_pl_size;
+
+ int32_t max_pel_len;
+ size_t max_pel_size;
+
+#if H5C2_COLLECT_CACHE_ENTRY_STATS
+
+ int32_t max_accesses[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int32_t min_accesses[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int32_t max_clears[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int32_t max_flushes[H5C2__MAX_NUM_TYPE_IDS + 1];
+ size_t max_size[H5C2__MAX_NUM_TYPE_IDS + 1];
+ int32_t max_pins[H5C2__MAX_NUM_TYPE_IDS + 1];
+
+#endif /* H5C2_COLLECT_CACHE_ENTRY_STATS */
+
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+ hbool_t skip_file_checks;
+ hbool_t skip_dxpl_id_checks;
+ char prefix[H5C2__PREFIX_LEN];
+};
+
+#endif /* _H5C2pkg_H */
+
diff --git a/src/H5C2private.h b/src/H5C2private.h
new file mode 100644
index 0000000..a949f6d
--- /dev/null
+++ b/src/H5C2private.h
@@ -0,0 +1,1350 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5C2private.h
+ * 6/3/04
+ * John Mainzer
+ *
+ * Purpose: Constants and typedefs available to the rest of the
+ * library.
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifndef _H5C2private_H
+#define _H5C2private_H
+
+#include "H5C2public.h" /* public prototypes */
+
+/* Pivate headers needed by this header */
+#include "H5private.h" /* Generic Functions */
+#include "H5Fprivate.h" /* File access */
+
+
+#define H5C2_DO_SANITY_CHECKS 1
+#define H5C2_DO_EXTREME_SANITY_CHECKS 0
+
+/* This sanity checking constant was picked out of the air. Increase
+ * or decrease it if appropriate. Its purposes is to detect corrupt
+ * object sizes, so it probably doesn't matter if it is a bit big.
+ *
+ * JRM - 5/17/04
+ */
+#define H5C2_MAX_ENTRY_SIZE ((size_t)(10 * 1024 * 1024))
+
+/* H5C2_COLLECT_CACHE_STATS controls overall collection of statistics
+ * on cache activity. In general, this #define should be set to 0.
+ */
+#define H5C2_COLLECT_CACHE_STATS 1
+
+/* H5C2_COLLECT_CACHE_ENTRY_STATS controls collection of statistics
+ * in individual cache entries.
+ *
+ * H5C2_COLLECT_CACHE_ENTRY_STATS should only be defined to true if
+ * H5C2_COLLECT_CACHE_STATS is also defined to true.
+ */
+#if H5C2_COLLECT_CACHE_STATS
+
+#define H5C2_COLLECT_CACHE_ENTRY_STATS 1
+
+#else
+
+#define H5C2_COLLECT_CACHE_ENTRY_STATS 0
+
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+
+#ifdef H5_HAVE_PARALLEL
+
+/* we must maintain the clean and dirty LRU lists when we are compiled
+ * with parallel support.
+ */
+#define H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 1
+
+#else /* H5_HAVE_PARALLEL */
+
+/* The clean and dirty LRU lists don't buy us anything here -- we may
+ * want them on for testing on occasion, but in general they should be
+ * off.
+ */
+#define H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 0
+
+#endif /* H5_HAVE_PARALLEL */
+
+
+/* Typedef for the main structure for the cache (defined in H5C2pkg.h) */
+
+typedef struct H5C2_t H5C2_t;
+
+
+/***************************************************************************
+ *
+ * Struct H5C2_class_t
+ *
+ * Instances of H5C2_class_t are used to specify the callback functions
+ * used by the metadata cache for each class of metadata cache entry.
+ * The fields of the structure are discussed below:
+ *
+ * id: Integer field containing the unique ID of the class of metadata
+ * cache entries.
+ *
+ * name: Pointer to a string containing the name of the class of metadata
+ * cache entries.
+ *
+ * mem_type: Instance of H5FD_mem_t, that is used to supply the
+ * mem type passed into H5F_block_read().
+ *
+ * deserialize: Pointer to the deserialize function.
+ *
+ * This function must be able to read an on disk image of a metadata
+ * cache entry, allocate and load the equivalent in core representation,
+ * and return a pointer to that representation.
+ *
+ * The typedef for the deserialize callback is as follows:
+ *
+ * typedef void *(*H5C_deserialize_func_t)(haddr_t addr,
+ * size_t len,
+ * const void * image_ptr,
+ * void * udata_ptr,
+ * boolean * dirty_ptr);
+ *
+ * The parameters of the deserialize callback are as follows:
+ *
+ * addr: Base address in file of the image to be deserialized.
+ *
+ * This parameter is supplied mainly for sanity checking.
+ * Sanity checks should be performed when compiled in debug
+ * mode, but the parameter may be unused when compiled in
+ * production mode.
+ *
+ * len: Length in bytes of the in file image to be deserialized.
+ *
+ * This parameter is supplied mainly for sanity checking.
+ * Sanity checks should be performed when compiled in debug
+ * mode, but the parameter may be unused when compiled in
+ * production mode.
+ *
+ * image_ptr: Pointer to a buffer of length len containing the
+ * contents of the file starting at addr and continuing
+ * for len bytes.
+ *
+ * udata_ptr: Pointer to user data provided in the protect call, which
+ * must be passed through to the deserialize callback.
+ *
+ * dirty_ptr: Pointer to boolean which the deserialize function
+ * must use to mark the entry dirty if it has to modify
+ * the entry to clean up file corruption left over from
+ * an old bug in the HDF5 library.
+ *
+ * Processing in the deserialize function should proceed as follows:
+ *
+ * If the image contains valid data, and is of the correct length,
+ * the deserialize function must allocate space for an in core
+ * represntation of that data, load the contents of the image into
+ * the space allocated for the in core representation, and return
+ * a pointer to the in core representation. Observe that an
+ * instance of H5C2_cache_entry_t must be the first item in this
+ * representation. It will have to be initialized appropriately
+ * after the callback returns.
+ *
+ * Note that the structure of the in core representation is otherwise
+ * up to the cache client. All that is required is that the pointer
+ * returned be sufficient for the clients purposes when it is returned
+ * on a protect call.
+ *
+ * If the deserialize function has to clean up file corruption
+ * left over from an old bug in the HDF5 library, it must set
+ * *dirty_ptr to TRUE. If it doesn't, no action is needed as
+ * *dirty_ptr will be set to FALSE before the deserialize call.
+ *
+ * If the operation fails for any reason (i.e. bad data in buffer, bad
+ * buffer length, malloc failure, etc.) the function must return NULL and
+ * push error information on the error stack with the error API routines.
+ *
+ * If the protect call which occasioned the call to the deserialize
+ * callback had the check length flag set, after the deserialize call
+ * returns, the cache must call the image_len callback (see below) and
+ * update its on disk image length accordingly.
+ *
+ *
+ * image_len: Pointer to the image length callback.
+ *
+ * In the best of all possible worlds, we would not have this callback.
+ * It exists to allow clients to reduce the size of the on disk image of
+ * an entry in the deserialize callback.
+ *
+ * The typedef for the image_len callback is as follows:
+ *
+ * typedef herr_t (*H5C_image_len_func_t)(void *thing,
+ * size_t *image_len_ptr);
+ *
+ * The parameters of the image_len callback are as follows:
+ *
+ * thing: Pointer to the in core representation of the entry.
+ *
+ * image_len_ptr: Pointer to size_t in which the callback will return
+ * the length of the on disk image of the cache entry.
+ *
+ * Processing in the image_len function should proceed as follows:
+ *
+ * If successful, the function will place the length of the on disk
+ * image associated with the in core representation provided in the
+ * thing parameter in *image_len_ptr, and then return SUCCEED.
+ *
+ * On failure, the function must return FAIL and push error information
+ * onto the error stack with the error API routines.
+ *
+ *
+ * serialize: Pointer to the serialize callback.
+ *
+ * The serialize callback is invoked by the metadata cache whenever
+ * it needs a current on disk image of the metadata entry for purposes
+ * either constructing a journal or flushing the entry to disk.
+ *
+ * At this point, one would think that the base address and length of
+ * the length of the entry's image on disk would be well known.
+ * However, that need not be the case as fractal heap blocks can
+ * change size (and therefor possible location as well) on
+ * serialization if compression is enabled. In the old H5C code,
+ * this happened on a flush, and occasioned a rename in the midst
+ * of the flush. To avoid this in H5C2, the serialize callback
+ * will return the new base address, length, and image pointer to
+ * the caller when necessary. The caller must then update the
+ * metadata cache's internal structures accordingly.
+ *
+ * The typedef for the serialize callback is as follows:
+ *
+ * typedef herr_t (*H5C_serialize_func_t)(haddr_t addr,
+ * size_t len,
+ * void * image_ptr,
+ * void * thing,
+ * unsigned * flags_ptr,
+ * haddr_t * new_addr_ptr,
+ * size_t * new_len_ptr,
+ * void ** new_image_ptr_ptr);
+ *
+ * The parameters of the serialize callback are as follows:
+ *
+ * addr: Base address in file of the entry to be serialized.
+ *
+ * This parameter is supplied mainly for sanity checking.
+ * Sanity checks should be performed when compiled in debug
+ * mode, but the parameter may be unused when compiled in
+ * production mode.
+ *
+ * len: Length in bytes of the in file image of the entry to be
+ * serialized. Also the size of *image_ptr (below).
+ *
+ * This parameter is supplied mainly for sanity checking.
+ * Sanity checks should be performed when compiled in debug
+ * mode, but the parameter may be unused when compiled in
+ * production mode.
+ *
+ * image_ptr: Pointer to a buffer of length len bytes into which a
+ * serialized image of the target metadata cache entry is
+ * to be written.
+ *
+ * Note that this buffer will not in general be initialized
+ * to any particular value. Thus the serialize function may
+ * not assume any initial value and must set each byte in
+ * the buffer.
+ *
+ * thing: Pointer to void containing the address of the in core
+ * representation of the target metadata cache entry.
+ * This is the same pointer returned by a protect of the
+ * addr and len given above.
+ *
+ * flags_ptr: Pointer to an unsigned integer used to return flags
+ * indicating whether the resize function resized or renamed
+ * the entry. If the entry was neither resized or renamed,
+ * the serialize function must set *flags_ptr to zero.
+ * H5C2__SERIALIZE_RESIZED_FLAG and H5C2__SERIALIZE_RENAMED_FLAG
+ * must be set to indicate a resize and a rename respectively.
+ *
+ * If the H5C2__SERIALIZE_RESIZED_FLAG is set, the new length
+ * and image pointer must be stored in *new_len_ptr and
+ * *new_image_ptr_ptr respectively.
+ *
+ * If the H5C2__SERIALIZE_RENAMED_FLAG flag is also set, the
+ * new image base address must be stored in *new_addr_ptr.
+ * Observe that the H5C2__SERIALIZE_RENAMED_FLAG must not
+ * appear without the H5C2__SERIALIZE_RESIZED_FLAG.
+ *
+ * Except as noted above, the locations pointed to by the
+ * remaining parameters are undefined, and should be ignored
+ * by the caller.
+ *
+ * new_addr_ptr: Pointer to haddr_t. If the entry is renamed by
+ * the serialize function, the new on disk base address must
+ * be stored in *new_addr_ptr. If the entry is not renamed
+ * by the serialize function, *new_addr_ptr is undefined.
+ *
+ * new_len_ptr: Pointer to size_t. If the entry is resized by the
+ * serialize function, the new length of the on disk image
+ * must be stored in *new_len_ptr. If the entry is not
+ * resized by the serialize function, *new_len_ptr is
+ * undefined.
+ *
+ * new_image_ptr_ptr: Pointer to pointer to void. If the entry is
+ * resized by the serialize function, the pointer to the
+ * new buffer containing the on disk image must be stored
+ * in *new_image_ptr_ptr. If the entry is not resized by
+ * the serialize function, *new_image_ptr_ptr is undefined.
+ *
+ * Processing in the serialize function should proceed as follows:
+ *
+ * The serialize function must examine the in core representation
+ * indicated by the thing parameter, and write a serialized image
+ * of its contents into the provided buffer.
+ *
+ * If the serialize function does not change the size or location
+ * of the on disk image, it must set *flags_ptr to zero.
+ *
+ * If the size of the on disk image must be changed, the serialize
+ * function must free the old image buffer (base address in image_ptr),
+ * allocate a new one, load the image into the new buffer, load the
+ * base address of the new buffer into *new_image_ptr_ptr, load the
+ * length of the new image into *new_len_ptr, and set the
+ * H5C2__SERIALIZE_RESIZED_FLAG in *flags_ptr.
+ *
+ * If in addition, the base address of the on disk image must
+ * be changed, the serialize function must also set *new_addr_ptr
+ * to the new base address, and set the H5C2__SERIALIZE_RENAMED_FLAG
+ * in *flags_ptr.
+ *
+ * If it is successful, the function must return SUCCEED.
+ *
+ * If it fails for any reason, the function must return FAIL and
+ * push error information on the error stack with the error API
+ * routines.
+ *
+ *
+ * free_icr: Pointer to the free ICR Callback.
+ *
+ * The free ICR callback is invoked by the metadata cache when it
+ * wishes to evict an entry, and needs the client to free the memory
+ * allocated for the in core representation.
+ *
+ * The typedef for the free ICR callback is as follows:
+ *
+ * typedef herr_t (*N5C_free_icr_func_t)(haddr_t addr,
+ * size_t len,
+ * void * thing);
+ *
+ * The parameters of the free ICR callback are as follows:
+ *
+ * addr: Base address in file of the entry being evicted.
+ *
+ * This parameter is supplied mainly for sanity checking.
+ * Sanity checks should be performed when compiled in debug
+ * mode, but the parameter may be unused when compiled in
+ * production mode.
+ *
+ * len: Length of the in file image of the entry being evicted
+ * in bytes.
+ *
+ * This parameter is supplied mainly for sanity checking.
+ * Sanity checks should be performed when compiled in debug
+ * mode, but the parameter may be unused when compiled in
+ * production mode.
+ *
+ * thing: Pointer to void containing the address of the in core
+ * representation of the target metadata cache entry. This
+ * is the same pointer that would be returned by a protect
+ * of the addr and len above.
+ *
+ * Processing in the free ICR function should proceed as follows:
+ *
+ * The free ICR function must free all memory allocated to the
+ * in core representation.
+ *
+ * If the function is successful, it must return SUCCEED.
+ *
+ * If it fails for any reason, the function must return FAIL and
+ * push error information on the error stack with the error API
+ * routines.
+ *
+ * At least when compiled with debug, it would be useful if the
+ * free ICR call would fail if the in core representation has been
+ * modified since the last serialize of clear callback.
+ *
+ *
+ * clear_dirty_bits: Pointer to the clear dirty bits callback.
+ *
+ * For sanity checking purposes, it will be useful if cache clients
+ * track whether an in core representation has been modified since
+ * the last time it was serialized. This data is used to flag an
+ * error if the cache attempts to free an in core representation
+ * that has not been serialized since the last time it was modified.
+ *
+ * If this happens, either the client forgot to tell the cache that
+ * an entry is dirty, or the cache forgot to flush a dirty entry
+ * before evicting it. In either case we want to know before we
+ * get file corruption complaints.
+ *
+ * However, in some cases, we want to mark an entry as clean even
+ * though it has not been flushed to disk -- most particularly in
+ * the parallel case. Thus we need some way to tell the client
+ * that a free of the associated ICR is OK even though it has
+ * been modified since the last serialization. Hence the clear
+ * dirty bits callback.
+ *
+ * Since the clear dirty bits callback is purely for sanity checking,
+ * it is called only when we compile with debug.
+ *
+ * The typedef for the clear callback is as follows:
+ *
+ * typedef herr_t (*N5C_clear_dirty_bits_func_t)(haddr_t addr,
+ * size_t len,
+ * void * thing);
+ *
+ * The parameters of the clear callback are as follows:
+ *
+ * addr: Base address in file of the entry whose dirty bits
+ * are being cleared
+ *
+ * len: Length in bytes of the in file image of the entry
+ * whose dirty bits are being cleared.
+ *
+ * thing: Pointer to void containing the address of the in
+ * core representation of the target metadata cache entry.
+ * This is the same pointer that would be returned by a
+ * protect of the addr and len above.
+ *
+ * Processing in the clear callback function should proceed as follows:
+ *
+ * The function must clear any dirty bits associated with the ICR.
+ *
+ * If successful, the function must return SUCCEED.
+ *
+ * If it fails for any reason, the function must return FAIL and
+ * push error information on the error stack with the error API
+ * routines.
+ *
+ ***************************************************************************/
+typedef void *(*H5C2_deserialize_func_t)(haddr_t addr,
+ size_t len,
+ const void * image_ptr,
+ const void * udata_ptr,
+ hbool_t * dirty_ptr);
+
+typedef herr_t (*H5C2_image_len_func_t)(void *thing,
+ size_t *image_len_ptr);
+
+#define H5C2__SERIALIZE_RESIZED_FLAG 0x1
+#define H5C2__SERIALIZE_RENAMED_FLAG 0x2
+
+typedef herr_t (*H5C2_serialize_func_t)(haddr_t addr,
+ size_t len,
+ void * image_ptr,
+ void * thing,
+ unsigned * flags_ptr,
+ haddr_t * new_addr_ptr,
+ size_t * new_len_ptr,
+ void ** new_image_ptr_ptr);
+
+typedef herr_t (*H5C2_free_icr_func_t)(haddr_t addr,
+ size_t len,
+ void * thing);
+
+typedef herr_t (*H5C2_clear_dirty_bits_func_t)(haddr_t addr,
+ size_t len,
+ void * thing);
+
+typedef struct H5C2_class_t {
+ int id;
+ const char * name;
+ H5FD_mem_t mem_type;
+ H5C2_deserialize_func_t deserialize;
+ H5C2_image_len_func_t image_len;
+ H5C2_serialize_func_t serialize;
+ H5C2_free_icr_func_t free_icr;
+ H5C2_clear_dirty_bits_func_t clear_dirty_bits;
+} H5C2_class_t;
+
+
+/* Type defintions of call back functions used by the cache as a whole */
+
+typedef herr_t (*H5C2_write_permitted_func_t)(const H5F_t *f,
+ hid_t dxpl_id,
+ hbool_t * write_permitted_ptr);
+
+typedef herr_t (*H5C2_log_flush_func_t)(H5C2_t * cache_ptr,
+ haddr_t addr,
+ hbool_t was_dirty,
+ unsigned flags,
+ int type_id);
+
+/* Upper and lower limits on cache size. These limits are picked
+ * out of a hat -- you should be able to change them as necessary.
+ *
+ * However, if you need a very big cache, you should also increase the
+ * size of the hash table (H5C2__HASH_TABLE_LEN in H5C2pkg.h). The current
+ * upper bound on cache size is rather large for the current hash table
+ * size.
+ */
+
+#define H5C2__MAX_MAX_CACHE_SIZE ((size_t)(128 * 1024 * 1024))
+#define H5C2__MIN_MAX_CACHE_SIZE ((size_t)(1024))
+
+
+/* Default max cache size and min clean size are give here to make
+ * them generally accessable.
+ */
+
+#define H5C2__DEFAULT_MAX_CACHE_SIZE ((size_t)(4 * 1024 * 1024))
+#define H5C2__DEFAULT_MIN_CLEAN_SIZE ((size_t)(2 * 1024 * 1024))
+
+
+/****************************************************************************
+ *
+ * structure H5C2_cache_entry_t
+ *
+ * Instances of the H5C2_cache_entry_t structure are used to store cache
+ * entries in a hash table and sometimes in a skip list.
+ * See H5SL.c for the particulars of the skip list.
+ *
+ * In typical application, this structure is the first field in a
+ * structure to be cached. For historical reasons, the external module
+ * is responsible for managing the is_dirty field (this is no longer
+ * completely true. See the comment on the is_dirty field for details).
+ * All other fields are managed by the cache.
+ *
+ * The fields of this structure are discussed individually below:
+ *
+ * JRM - 4/26/04
+ *
+ * magic: Unsigned 32 bit integer that must always be set to
+ * H5C2__H5C2_CACHE_ENTRY_T_MAGIC when the entry is valid.
+ * The field must be set to H5C2__H5C2_CACHE_ENTRY_T_BAD_MAGIC
+ * just before the entry is freed.
+ *
+ * This is necessary, as the LRU list can be changed out
+ * from under H5C2_make_space_in_cache() by the serialize
+ * callback which may change the size of an existing entry,
+ * and/or load a new entry while serializing the target entry.
+ *
+ * This in turn can cause a recursive call to
+ * H5C2_make_space_in_cache() which may either flush or evict
+ * the next entry that the first invocation of that function
+ * was about to examine.
+ *
+ * The magic field allows H5C2_make_space_in_cache() to
+ * detect this case, and re-start its scan from the bottom
+ * of the LRU when this situation occurs.
+ *
+ * addr: Base address of the cache entry on disk.
+ *
+ * size: Length of the cache entry on disk. Note that unlike normal
+ * caches, the entries in this cache are of variable length.
+ * The entries should never overlap, and when we do writebacks,
+ * we will want to writeback adjacent entries where possible.
+ *
+ * NB: At present, entries need not be contiguous on disk. Until
+ * we fix this, we can't do much with writing back adjacent
+ * entries.
+ *
+ * Update: This has now been changed -- all metadata cache
+ * entries must now be associated with a single contiguous
+ * block of memory on disk. The image of this block (i.e.
+ * the on disk image) is stored in *image_ptr (discussed below).
+ *
+ * image_ptr: Pointer to void. When not NULL, this field points to a
+ * dynamically allocated block of size bytes in which the
+ * on disk image of the metadata cache entry is stored.
+ *
+ * If the entry is dirty, the serialize callback must be used
+ * to update this image before it is written to disk
+ *
+ * type: Pointer to the instance of H5C2_class_t containing pointers
+ * to the methods for cache entries of the current type. This
+ * field should be NULL when the instance of H5C2_cache_entry_t
+ * is not in use.
+ *
+ * The name is not particularly descriptive, but is retained
+ * to avoid changes in existing code.
+ *
+ * is_dirty: Boolean flag indicating whether the contents of the cache
+ * entry has been modified since the last time it was written
+ * to disk.
+ *
+ * NOTE: For historical reasons, this field is not maintained
+ * by the cache. Instead, the module using the cache
+ * sets this flag when it modifies the entry, and the
+ * flush and clear functions supplied by that module
+ * reset the dirty when appropriate.
+ *
+ * This is a bit quirky, so we may want to change this
+ * someday. However it will require a change in the
+ * cache interface.
+ *
+ * Update: Management of the is_dirty field has been largely
+ * moved into the cache. The only remaining exceptions
+ * are the flush and clear functions supplied by the
+ * modules using the cache. These still clear the
+ * is_dirty field as before. -- JRM 7/5/05
+ *
+ * Update: Management of the is_dirty field is now entirely
+ * in the cache. -- JRM 7/5/07
+ *
+ * dirtied: Boolean flag used to indicate that the entry has been
+ * dirtied while protected.
+ *
+ * This field is set to FALSE in the protect call, and may
+ * be set to TRUE by the
+ * H5C2_mark_pinned_or_protected_entry_dirty()
+ * call at an time prior to the unprotect call.
+ *
+ * The H5C2_mark_pinned_or_protected_entry_dirty() call exists
+ * as a convenience function for the fractal heap code which
+ * may not know if an entry is protected or pinned, but knows
+ * that is either protected or pinned. The dirtied field was
+ * added as in the parallel case, it is necessary to know
+ * whether a protected entry was dirty prior to the protect call.
+ *
+ * is_protected: Boolean flag indicating whether this entry is protected
+ * (or locked, to use more conventional terms). When it is
+ * protected, the entry cannot be flushed or accessed until
+ * it is unprotected (or unlocked -- again to use more
+ * conventional terms).
+ *
+ * Note that protected entries are removed from the LRU lists
+ * and inserted on the protected list.
+ *
+ * is_read_only: Boolean flag that is only meaningful if is_protected is
+ * TRUE. In this circumstance, it indicates whether the
+ * entry has been protected read only, or read/write.
+ *
+ * If the entry has been protected read only (i.e. is_protected
+ * and is_read_only are both TRUE), we allow the entry to be
+ * protected more than once.
+ *
+ * In this case, the number of readers is maintained in the
+ * ro_ref_count field (see below), and unprotect calls simply
+ * decrement that field until it drops to zero, at which point
+ * the entry is actually unprotected.
+ *
+ * ro_ref_count: Integer field used to maintain a count of the number of
+ * outstanding read only protects on this entry. This field
+ * must be zero whenever either is_protected or is_read_only
+ * are TRUE.
+ *
+ * is_pinned: Boolean flag indicating whether the entry has been pinned
+ * in the cache.
+ *
+ * For very hot entries, the protect / unprotect overhead
+ * can become excessive. Thus the cache has been extended
+ * to allow an entry to be "pinned" in the cache.
+ *
+ * Pinning an entry in the cache has several implications:
+ *
+ * 1) A pinned entry cannot be evicted. Thus unprotected
+ * pinned entries must be stored in the pinned entry
+ * list, instead of being managed by the replacement
+ * policy code (LRU at present).
+ *
+ * 2) A pinned entry can be accessed or modified at any time.
+ * Therefore, the cache must check with the entry owner
+ * before flushing it. If permission is denied, the
+ * cache does not flush the entry.
+ *
+ * 3) A pinned entry can be marked as dirty (and possibly
+ * change size) while it is unprotected.
+ *
+ * 4) The flush-destroy code must allow pinned entries to
+ * be unpinned (and possibly unprotected) during the
+ * flush.
+ *
+ * JRM -- 3/16/06
+ *
+ * in_slist: Boolean flag indicating whether the entry is in the skip list
+ * As a general rule, entries are placed in the list when they
+ * are marked dirty. However they may remain in the list after
+ * being flushed.
+ *
+ * Update: Dirty entries are now removed from the skip list
+ * when they are flushed.
+ *
+ * flush_marker: Boolean flag indicating that the entry is to be flushed
+ * the next time H5C2_flush_cache() is called with the
+ * H5AC__FLUSH_MARKED_ENTRIES_FLAG. The flag is reset when
+ * the entry is flushed for whatever reason.
+ *
+ * clear_on_unprotect: Boolean flag used only in PHDF5. When H5C2 is used
+ * to implement the metadata cache In the parallel case, only
+ * the cache with mpi rank 0 is allowed to actually write to
+ * file -- all other caches must retain dirty entries until they
+ * are advised that the entry is clean.
+ *
+ * This flag is used in the case that such an advisory is
+ * received when the entry is protected. If it is set when an
+ * entry is unprotected, and the dirtied flag is not set in
+ * the unprotect, the entry's is_dirty flag is reset by flushing
+ * it with the H5C2__FLUSH_CLEAR_ONLY_FLAG.
+ *
+ * flush_in_progress: Boolean flag that is set to true iff the entry
+ * is in the process of being flushed. This allows the cache
+ * to detect when a call is the result of a flush callback.
+ *
+ * destroy_in_progress: Boolean flag that is set to true iff the entry
+ * is in the process of being flushed and destroyed.
+ *
+ *
+ * Fields supporting the hash table:
+ *
+ * Fields in the cache are indexed by a more or less conventional hash table.
+ * If there are multiple entries in any hash bin, they are stored in a doubly
+ * linked list.
+ *
+ * ht_next: Next pointer used by the hash table to store multiple
+ * entries in a single hash bin. This field points to the
+ * next entry in the doubly linked list of entries in the
+ * hash bin, or NULL if there is no next entry.
+ *
+ * ht_prev: Prev pointer used by the hash table to store multiple
+ * entries in a single hash bin. This field points to the
+ * previous entry in the doubly linked list of entries in
+ * the hash bin, or NULL if there is no previuos entry.
+ *
+ *
+ * Fields supporting replacement policies:
+ *
+ * The cache must have a replacement policy, and it will usually be
+ * necessary for this structure to contain fields supporting that policy.
+ *
+ * While there has been interest in several replacement policies for
+ * this cache, the initial development schedule is tight. Thus I have
+ * elected to support only a modified LRU policy for the first cut.
+ *
+ * When additional replacement policies are added, the fields in this
+ * section will be used in different ways or not at all. Thus the
+ * documentation of these fields is repeated for each replacement policy.
+ *
+ * Modified LRU:
+ *
+ * When operating in parallel mode, we must ensure that a read does not
+ * cause a write. If it does, the process will hang, as the write will
+ * be collective and the other processes will not know to participate.
+ *
+ * To deal with this issue, I have modified the usual LRU policy by adding
+ * clean and dirty LRU lists to the usual LRU list. When reading in
+ * parallel mode, we evict from the clean LRU list only. This implies
+ * that we must try to ensure that the clean LRU list is reasonably well
+ * stocked. See the comments on H5C2_t in H5C2pkg.h for more details.
+ *
+ * Note that even if we start with a completely clean cache, a sequence
+ * of protects without unprotects can empty the clean LRU list. In this
+ * case, the cache must grow temporarily. At the next write, we will
+ * attempt to evict enough entries to get the cache down to its nominal
+ * maximum size.
+ *
+ * The use of the replacement policy fields under the Modified LRU policy
+ * is discussed below:
+ *
+ * next: Next pointer in either the LRU or the protected list,
+ * depending on the current value of protected. If there
+ * is no next entry on the list, this field should be set
+ * to NULL.
+ *
+ * prev: Prev pointer in either the LRU or the protected list,
+ * depending on the current value of protected. If there
+ * is no previous entry on the list, this field should be
+ * set to NULL.
+ *
+ * aux_next: Next pointer on either the clean or dirty LRU lists.
+ * This entry should be NULL when protected is true. When
+ * protected is false, and dirty is true, it should point
+ * to the next item on the dirty LRU list. When protected
+ * is false, and dirty is false, it should point to the
+ * next item on the clean LRU list. In either case, when
+ * there is no next item, it should be NULL.
+ *
+ * aux_prev: Previous pointer on either the clean or dirty LRU lists.
+ * This entry should be NULL when protected is true. When
+ * protected is false, and dirty is true, it should point
+ * to the previous item on the dirty LRU list. When protected
+ * is false, and dirty is false, it should point to the
+ * previous item on the clean LRU list. In either case, when
+ * there is no previous item, it should be NULL.
+ *
+ *
+ * Cache entry stats collection fields:
+ *
+ * These fields should only be compiled in when both H5C2_COLLECT_CACHE_STATS
+ * and H5C2_COLLECT_CACHE_ENTRY_STATS are true. When present, they allow
+ * collection of statistics on individual cache entries.
+ *
+ * accesses: int32_t containing the number of times this cache entry has
+ * been referenced in its lifetime.
+ *
+ * clears: int32_t containing the number of times this cache entry has
+ * been cleared in its life time.
+ *
+ * flushes: int32_t containing the number of times this cache entry has
+ * been flushed to file in its life time.
+ *
+ * pins: int32_t containing the number of times this cache entry has
+ * been pinned in cache in its life time.
+ *
+ ****************************************************************************/
+
+#define H5C2__H5C2_CACHE_ENTRY_T_MAGIC 0x005CAC0A
+#define H5C2__H5C2_CACHE_ENTRY_T_BAD_MAGIC 0xDeadBeef
+
+typedef struct H5C2_cache_entry_t
+{
+ uint32_t magic;
+ haddr_t addr;
+ size_t size;
+ void * image_ptr;
+ const H5C2_class_t * type;
+ hbool_t is_dirty;
+ hbool_t dirtied;
+ hbool_t is_protected;
+ hbool_t is_read_only;
+ int ro_ref_count;
+ hbool_t is_pinned;
+ hbool_t in_slist;
+ hbool_t flush_marker;
+#ifdef H5_HAVE_PARALLEL
+ hbool_t clear_on_unprotect;
+#endif /* H5_HAVE_PARALLEL */
+ hbool_t flush_in_progress;
+ hbool_t destroy_in_progress;
+
+ /* fields supporting the hash table: */
+
+ struct H5C2_cache_entry_t * ht_next;
+ struct H5C2_cache_entry_t * ht_prev;
+
+ /* fields supporting replacement policies: */
+
+ struct H5C2_cache_entry_t * next;
+ struct H5C2_cache_entry_t * prev;
+ struct H5C2_cache_entry_t * aux_next;
+ struct H5C2_cache_entry_t * aux_prev;
+
+#if H5C2_COLLECT_CACHE_ENTRY_STATS
+
+ /* cache entry stats fields */
+
+ int32_t accesses;
+ int32_t clears;
+ int32_t flushes;
+ int32_t pins;
+
+#endif /* H5C2_COLLECT_CACHE_ENTRY_STATS */
+
+} H5C2_cache_entry_t;
+
+
+/****************************************************************************
+ *
+ * structure H5C2_auto_size_ctl_t
+ *
+ * Instances of H5C2_auto_size_ctl_t are used to get and set the control
+ * fields for automatic cache re-sizing.
+ *
+ * The fields of the structure are discussed individually below:
+ *
+ * version: Integer field containing the version number of this version
+ * of the H5C2_auto_size_ctl_t structure. Any instance of
+ * H5C2_auto_size_ctl_t passed to the cache must have a known
+ * version number, or an error will be flagged.
+ *
+ * report_fcn: Pointer to the function that is to be called to report
+ * activities each time the auto cache resize code is executed. If the
+ * field is NULL, no call is made.
+ *
+ * If the field is not NULL, it must contain the address of a function
+ * of type H5C2_auto_resize_report_fcn.
+ *
+ * set_initial_size: Boolean flag indicating whether the size of the
+ * initial size of the cache is to be set to the value given in
+ * the initial_size field. If set_initial_size is FALSE, the
+ * initial_size field is ignored.
+ *
+ * initial_size: If enabled, this field contain the size the cache is
+ * to be set to upon receipt of this structure. Needless to say,
+ * initial_size must lie in the closed interval [min_size, max_size].
+ *
+ * min_clean_fraction: double in the range 0 to 1 indicating the fraction
+ * of the cache that is to be kept clean. This field is only used
+ * in parallel mode. Typical values are 0.1 to 0.5.
+ *
+ * max_size: Maximum size to which the cache can be adjusted. The
+ * supplied value must fall in the closed interval
+ * [MIN_MAX_CACHE_SIZE, MAX_MAX_CACHE_SIZE]. Also, max_size must
+ * be greater than or equal to min_size.
+ *
+ * min_size: Minimum size to which the cache can be adjusted. The
+ * supplied value must fall in the closed interval
+ * [MIN_MAX_CACHE_SIZE, MAX_MAX_CACHE_SIZE]. Also, min_size must
+ * be less than or equal to max_size.
+ *
+ * epoch_length: Number of accesses on the cache over which to collect
+ * hit rate stats before running the automatic cache resize code,
+ * if it is enabled.
+ *
+ * At the end of an epoch, we discard prior hit rate data and start
+ * collecting afresh. The epoch_length must lie in the closed
+ * interval [H5C2__MIN_AR_EPOCH_LENGTH, H5C2__MAX_AR_EPOCH_LENGTH].
+ *
+ *
+ * Cache size increase control fields:
+ *
+ * incr_mode: Instance of the H5C2_cache_incr_mode enumerated type whose
+ * value indicates how we determine whether the cache size should be
+ * increased. At present there are two possible values:
+ *
+ * H5C2_incr__off: Don't attempt to increase the size of the cache
+ * automatically.
+ *
+ * When this increment mode is selected, the remaining fields
+ * in the cache size increase section ar ignored.
+ *
+ * H5C2_incr__threshold: Attempt to increase the size of the cache
+ * whenever the average hit rate over the last epoch drops
+ * below the value supplied in the lower_hr_threshold
+ * field.
+ *
+ * Note that this attempt will fail if the cache is already
+ * at its maximum size, or if the cache is not already using
+ * all available space.
+ *
+ * lower_hr_threshold: Lower hit rate threshold. If the increment mode
+ * (incr_mode) is H5C2_incr__threshold and the hit rate drops below the
+ * value supplied in this field in an epoch, increment the cache size by
+ * size_increment. Note that cache size may not be incremented above
+ * max_size, and that the increment may be further restricted by the
+ * max_increment field if it is enabled.
+ *
+ * When enabled, this field must contain a value in the range [0.0, 1.0].
+ * Depending on the incr_mode selected, it may also have to be less than
+ * upper_hr_threshold.
+ *
+ * increment: Double containing the multiplier used to derive the new
+ * cache size from the old if a cache size increment is triggered.
+ * The increment must be greater than 1.0, and should not exceed 2.0.
+ *
+ * The new cache size is obtained by multiplying the current max cache
+ * size by the increment, and then clamping to max_size and to stay
+ * within the max_increment as necessary.
+ *
+ * apply_max_increment: Boolean flag indicating whether the max_increment
+ * field should be used to limit the maximum cache size increment.
+ *
+ * max_increment: If enabled by the apply_max_increment field described
+ * above, this field contains the maximum number of bytes by which the
+ * cache size can be increased in a single re-size.
+ *
+ *
+ * Cache size decrease control fields:
+ *
+ * decr_mode: Instance of the H5C2_cache_decr_mode enumerated type whose
+ * value indicates how we determine whether the cache size should be
+ * decreased. At present there are four possibilities.
+ *
+ * H5C2_decr__off: Don't attempt to decrease the size of the cache
+ * automatically.
+ *
+ * When this increment mode is selected, the remaining fields
+ * in the cache size decrease section are ignored.
+ *
+ * H5C2_decr__threshold: Attempt to decrease the size of the cache
+ * whenever the average hit rate over the last epoch rises
+ * above the value supplied in the upper_hr_threshold
+ * field.
+ *
+ * H5C2_decr__age_out: At the end of each epoch, search the cache for
+ * entries that have not been accessed for at least the number
+ * of epochs specified in the epochs_before_eviction field, and
+ * evict these entries. Conceptually, the maximum cache size
+ * is then decreased to match the new actual cache size. However,
+ * this reduction may be modified by the min_size, the
+ * max_decrement, and/or the empty_reserve.
+ *
+ * H5C2_decr__age_out_with_threshold: Same as age_out, but we only
+ * attempt to reduce the cache size when the hit rate observed
+ * over the last epoch exceeds the value provided in the
+ * upper_hr_threshold field.
+ *
+ * upper_hr_threshold: Upper hit rate threshold. The use of this field
+ * varies according to the current decr_mode:
+ *
+ * H5C2_decr__off or H5C2_decr__age_out: The value of this field is
+ * ignored.
+ *
+ * H5C2_decr__threshold: If the hit rate exceeds this threshold in any
+ * epoch, attempt to decrement the cache size by size_decrement.
+ *
+ * Note that cache size may not be decremented below min_size.
+ *
+ * Note also that if the upper_threshold is 1.0, the cache size
+ * will never be reduced.
+ *
+ * H5C2_decr__age_out_with_threshold: If the hit rate exceeds this
+ * threshold in any epoch, attempt to reduce the cache size
+ * by evicting entries that have not been accessed for more
+ * than the specified number of epochs.
+ *
+ * decrement: This field is only used when the decr_mode is
+ * H5C2_decr__threshold.
+ *
+ * The field is a double containing the multiplier used to derive the
+ * new cache size from the old if a cache size decrement is triggered.
+ * The decrement must be in the range 0.0 (in which case the cache will
+ * try to contract to its minimum size) to 1.0 (in which case the
+ * cache will never shrink).
+ *
+ * apply_max_decrement: Boolean flag used to determine whether decrements
+ * in cache size are to be limited by the max_decrement field.
+ *
+ * max_decrement: Maximum number of bytes by which the cache size can be
+ * decreased in a single re-size. Note that decrements may also be
+ * restricted by the min_size of the cache, and (in age out modes) by
+ * the empty_reserve field.
+ *
+ * epochs_before_eviction: Integer field used in H5C2_decr__age_out and
+ * H5C2_decr__age_out_with_threshold decrement modes.
+ *
+ * This field contains the number of epochs an entry must remain
+ * unaccessed before it is evicted in an attempt to reduce the
+ * cache size. If applicable, this field must lie in the range
+ * [1, H5C2__MAX_EPOCH_MARKERS].
+ *
+ * apply_empty_reserve: Boolean field controlling whether the empty_reserve
+ * field is to be used in computing the new cache size when the
+ * decr_mode is H5C2_decr__age_out or H5C2_decr__age_out_with_threshold.
+ *
+ * empty_reserve: To avoid a constant racheting down of cache size by small
+ * amounts in the H5C2_decr__age_out and H5C2_decr__age_out_with_threshold
+ * modes, this field allows one to require that any cache size
+ * reductions leave the specified fraction of unused space in the cache.
+ *
+ * The value of this field must be in the range [0.0, 1.0]. I would
+ * expect typical values to be in the range of 0.01 to 0.1.
+ *
+ ****************************************************************************/
+
+#define H5C2_RESIZE_CFG__VALIDATE_GENERAL 0x1
+#define H5C2_RESIZE_CFG__VALIDATE_INCREMENT 0x2
+#define H5C2_RESIZE_CFG__VALIDATE_DECREMENT 0x4
+#define H5C2_RESIZE_CFG__VALIDATE_INTERACTIONS 0x8
+#define H5C2_RESIZE_CFG__VALIDATE_ALL \
+( \
+ H5C2_RESIZE_CFG__VALIDATE_GENERAL | \
+ H5C2_RESIZE_CFG__VALIDATE_INCREMENT | \
+ H5C2_RESIZE_CFG__VALIDATE_DECREMENT | \
+ H5C2_RESIZE_CFG__VALIDATE_INTERACTIONS \
+)
+
+#define H5C2__CURR_AUTO_SIZE_CTL_VER 1
+#define H5C2__CURR_AUTO_RESIZE_RPT_FCN_VER 1
+
+#define H5C2__MAX_EPOCH_MARKERS 10
+
+#define H5C2__DEF_AR_UPPER_THRESHHOLD 0.9999
+#define H5C2__DEF_AR_LOWER_THRESHHOLD 0.9
+#define H5C2__DEF_AR_MAX_SIZE ((size_t)(16 * 1024 * 1024))
+#define H5C2__DEF_AR_INIT_SIZE ((size_t)( 1 * 1024 * 1024))
+#define H5C2__DEF_AR_MIN_SIZE ((size_t)( 1 * 1024 * 1024))
+#define H5C2__DEF_AR_MIN_CLEAN_FRAC 0.5
+#define H5C2__DEF_AR_INCREMENT 2.0
+#define H5C2__DEF_AR_MAX_INCREMENT ((size_t)( 2 * 1024 * 1024))
+#define H5C2__DEF_AR_DECREMENT 0.9
+#define H5C2__DEF_AR_MAX_DECREMENT ((size_t)( 1 * 1024 * 1024))
+#define H5C2__DEF_AR_EPCHS_B4_EVICT 3
+#define H5C2__DEF_AR_EMPTY_RESERVE 0.05
+#define H5C2__MIN_AR_EPOCH_LENGTH 100
+#define H5C2__DEF_AR_EPOCH_LENGTH 50000
+#define H5C2__MAX_AR_EPOCH_LENGTH 1000000
+
+enum H5C2_resize_status
+{
+ in_spec2,
+ increase2,
+ decrease2,
+ at_max_size2,
+ at_min_size2,
+ increase_disabled2,
+ decrease_disabled2,
+ not_full2
+}; /* enum H5C2_resize_conditions */
+
+typedef void (*H5C2_auto_resize_rpt_fcn)(H5C2_t * cache_ptr,
+ int32_t version,
+ double hit_rate,
+ enum H5C2_resize_status status,
+ size_t old_max_cache_size,
+ size_t new_max_cache_size,
+ size_t old_min_clean_size,
+ size_t new_min_clean_size);
+
+typedef struct H5C2_auto_size_ctl_t
+{
+ /* general configuration fields: */
+ int32_t version;
+ H5C2_auto_resize_rpt_fcn rpt_fcn;
+
+ hbool_t set_initial_size;
+ size_t initial_size;
+
+ double min_clean_fraction;
+
+ size_t max_size;
+ size_t min_size;
+
+ int64_t epoch_length;
+
+
+ /* size increase control fields: */
+ enum H5C2_cache_incr_mode incr_mode;
+
+ double lower_hr_threshold;
+
+ double increment;
+
+ hbool_t apply_max_increment;
+ size_t max_increment;
+
+
+ /* size decrease control fields: */
+ enum H5C2_cache_decr_mode decr_mode;
+
+ double upper_hr_threshold;
+
+ double decrement;
+
+ hbool_t apply_max_decrement;
+ size_t max_decrement;
+
+ int32_t epochs_before_eviction;
+
+ hbool_t apply_empty_reserve;
+ double empty_reserve;
+
+} H5C2_auto_size_ctl_t;
+
+
+/*
+ * Library prototypes.
+ */
+
+/* #defines of flags used in the flags parameters in some of the
+ * following function calls. Note that not all flags are applicable
+ * to all function calls. Flags that don't apply to a particular
+ * function are ignored in that function.
+ *
+ * These flags apply to all function calls:
+ *
+ * H5C2__NO_FLAGS_SET (generic "no flags set" for all fcn calls)
+ *
+ *
+ * These flags apply to H5C2_insert_entry():
+ *
+ * H5C2__SET_FLUSH_MARKER_FLAG
+ * H5C2__PIN_ENTRY_FLAG
+ *
+ * These flags apply to H5C2_protect()
+ *
+ * H5C2__READ_ONLY_FLAG
+ * H5C2__CHECK_SIZE_FLAG
+ *
+ * These flags apply to H5C2_unprotect():
+ *
+ * H5C2__SET_FLUSH_MARKER_FLAG
+ * H5C2__DELETED_FLAG
+ * H5C2__DIRTIED_FLAG
+ * H5C2__SIZE_CHANGED_FLAG
+ * H5C2__PIN_ENTRY_FLAG
+ * H5C2__UNPIN_ENTRY_FLAG
+ *
+ *
+ * These flags apply to H5C2_flush_cache():
+ *
+ * H5C2__FLUSH_INVALIDATE_FLAG
+ * H5C2__FLUSH_CLEAR_ONLY_FLAG
+ * H5C2__FLUSH_MARKED_ENTRIES_FLAG
+ * H5C2__FLUSH_IGNORE_PROTECTED_FLAG (can't use this flag in combination
+ * with H5C2__FLUSH_INVALIDATE_FLAG)
+ *
+ * These flags apply to H5C2_flush_single_entry():
+ *
+ * H5C2__FLUSH_INVALIDATE_FLAG
+ * H5C2__FLUSH_CLEAR_ONLY_FLAG
+ * H5C2__FLUSH_MARKED_ENTRIES_FLAG
+ */
+
+#define H5C2__NO_FLAGS_SET 0x0000
+#define H5C2__SET_FLUSH_MARKER_FLAG 0x0001
+#define H5C2__DELETED_FLAG 0x0002
+#define H5C2__DIRTIED_FLAG 0x0004
+#define H5C2__SIZE_CHANGED_FLAG 0x0008
+#define H5C2__PIN_ENTRY_FLAG 0x0010
+#define H5C2__UNPIN_ENTRY_FLAG 0x0020
+#define H5C2__FLUSH_INVALIDATE_FLAG 0x0040
+#define H5C2__FLUSH_CLEAR_ONLY_FLAG 0x0080
+#define H5C2__FLUSH_MARKED_ENTRIES_FLAG 0x0100
+#define H5C2__FLUSH_IGNORE_PROTECTED_FLAG 0x0200
+#define H5C2__READ_ONLY_FLAG 0x0400
+#define H5C2__CHECK_SIZE_FLAG 0x0800
+
+H5_DLL H5C2_t * H5C2_create(const H5F_t * f,
+ size_t max_cache_size,
+ size_t min_clean_size,
+ int max_type_id,
+ const char * (* type_name_table_ptr),
+ H5C2_write_permitted_func_t check_write_permitted,
+ hbool_t write_permitted,
+ H5C2_log_flush_func_t log_flush,
+ void * aux_ptr);
+
+H5_DLL void H5C2_def_auto_resize_rpt_fcn(H5C2_t * cache_ptr,
+ int32_t version,
+ double hit_rate,
+ enum H5C2_resize_status status,
+ size_t old_max_cache_size,
+ size_t new_max_cache_size,
+ size_t old_min_clean_size,
+ size_t new_min_clean_size);
+
+H5_DLL herr_t H5C2_dest(H5C2_t * cache_ptr,
+ hid_t dxpl_id);
+
+H5_DLL herr_t H5C2_dest_empty(H5C2_t * cache_ptr);
+
+H5_DLL herr_t H5C2_expunge_entry(H5C2_t * cache_ptr,
+ hid_t dxpl_id,
+ const H5C2_class_t * type,
+ haddr_t addr);
+
+H5_DLL herr_t H5C2_flush_cache(H5C2_t * cache_ptr,
+ hid_t dxpl_id,
+ unsigned flags);
+
+
+H5_DLL herr_t H5C2_flush_to_min_clean(H5C2_t * cache_ptr,
+ hid_t dxpl_id);
+
+H5_DLL herr_t H5C2_get_cache_auto_resize_config(H5C2_t * cache_ptr,
+ H5C2_auto_size_ctl_t *config_ptr);
+
+H5_DLL herr_t H5C2_get_cache_size(H5C2_t * cache_ptr,
+ size_t * max_size_ptr,
+ size_t * min_clean_size_ptr,
+ size_t * cur_size_ptr,
+ int32_t * cur_num_entries_ptr);
+
+H5_DLL herr_t H5C2_get_cache_hit_rate(H5C2_t * cache_ptr,
+ double * hit_rate_ptr);
+
+H5_DLL herr_t H5C2_get_entry_status(H5C2_t * cache_ptr,
+ haddr_t addr,
+ size_t * size_ptr,
+ hbool_t * in_cache_ptr,
+ hbool_t * is_dirty_ptr,
+ hbool_t * is_protected_ptr,
+ hbool_t * is_pinned_ptr);
+
+H5_DLL herr_t H5C2_get_evictions_enabled(H5C2_t * cache_ptr,
+ hbool_t * evictions_enabled_ptr);
+
+H5_DLL herr_t H5C2_get_trace_file_ptr(H5C2_t * cache_ptr,
+ FILE ** trace_file_ptr_ptr);
+
+H5_DLL herr_t H5C2_insert_entry(H5C2_t * cache_ptr,
+ hid_t dxpl_id,
+ const H5C2_class_t * type,
+ haddr_t addr,
+ size_t len,
+ void * thing,
+ unsigned int flags);
+
+H5_DLL herr_t H5C2_mark_entries_as_clean(H5C2_t * cache_ptr,
+ hid_t dxpl_id,
+ int32_t ce_array_len,
+ haddr_t * ce_array_ptr);
+
+H5_DLL herr_t H5C2_mark_pinned_entry_dirty(H5C2_t * cache_ptr,
+ void * thing,
+ hbool_t size_changed,
+ size_t new_size);
+
+H5_DLL herr_t H5C2_mark_pinned_or_protected_entry_dirty(H5C2_t * cache_ptr,
+ void * thing);
+
+H5_DLL herr_t H5C2_rename_entry(H5C2_t * cache_ptr,
+ const H5C2_class_t * type,
+ haddr_t old_addr,
+ haddr_t new_addr);
+
+H5_DLL herr_t H5C2_pin_protected_entry(H5C2_t * cache_ptr,
+ void * thing);
+
+H5_DLL void * H5C2_protect(H5C2_t * cache_ptr,
+ hid_t dxpl_id,
+ const H5C2_class_t * type,
+ haddr_t addr,
+ size_t len,
+ const void * udata,
+ unsigned flags);
+
+H5_DLL herr_t H5C2_reset_cache_hit_rate_stats(H5C2_t * cache_ptr);
+
+H5_DLL herr_t H5C2_resize_pinned_entry(H5C2_t * cache_ptr,
+ void * thing,
+ size_t new_size);
+
+H5_DLL herr_t H5C2_set_cache_auto_resize_config(H5C2_t * cache_ptr,
+ H5C2_auto_size_ctl_t *config_ptr);
+
+H5_DLL herr_t H5C2_set_evictions_enabled(H5C2_t * cache_ptr,
+ hbool_t evictions_enabled);
+
+H5_DLL herr_t H5C2_set_prefix(H5C2_t * cache_ptr, char * prefix);
+
+H5_DLL herr_t H5C2_set_skip_flags(H5C2_t * cache_ptr,
+ hbool_t skip_file_checks,
+ hbool_t skip_dxpl_id_checks);
+
+H5_DLL herr_t H5C2_set_trace_file_ptr(H5C2_t * cache_ptr,
+ FILE * trace_file_ptr);
+
+H5_DLL herr_t H5C2_stats(H5C2_t * cache_ptr,
+ const char * cache_name,
+ hbool_t display_detailed_stats);
+
+H5_DLL void H5C2_stats__reset(H5C2_t * cache_ptr);
+
+H5_DLL herr_t H5C2_unpin_entry(H5C2_t * cache_ptr, void * thing);
+
+H5_DLL herr_t H5C2_unprotect(H5C2_t * cache_ptr,
+ hid_t dxpl_id,
+ const H5C2_class_t * type,
+ haddr_t addr,
+ void * thing,
+ unsigned int flags,
+ size_t new_size);
+
+H5_DLL herr_t H5C2_validate_resize_config(H5C2_auto_size_ctl_t * config_ptr,
+ unsigned int tests);
+
+#endif /* !_H5C2private_H */
+
diff --git a/src/H5C2public.h b/src/H5C2public.h
new file mode 100644
index 0000000..e1adff3
--- /dev/null
+++ b/src/H5C2public.h
@@ -0,0 +1,55 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5C2public.h
+ * June 4, 2005
+ * John Mainzer
+ *
+ * Purpose: Public include file for cache functions.
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef _H5C2public_H
+#define _H5C2public_H
+
+/* Public headers needed by this file */
+#include "H5public.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum H5C2_cache_incr_mode
+{
+ H5C2_incr__off,
+ H5C2_incr__threshold
+};
+
+enum H5C2_cache_decr_mode
+{
+ H5C2_decr__off,
+ H5C2_decr__threshold,
+ H5C2_decr__age_out,
+ H5C2_decr__age_out_with_threshold
+};
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/test/cache2.c b/test/cache2.c
new file mode 100644
index 0000000..12dfc01
--- /dev/null
+++ b/test/cache2.c
@@ -0,0 +1,26769 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Programmer: John Mainzer
+ * 6/9/04
+ *
+ * This file contains tests for the cache implemented in
+ * H5C.c
+ */
+#include "h5test.h"
+#include "H5Iprivate.h"
+#include "H5ACprivate.h"
+#include "cache2_common.h"
+
+/* private function declarations: */
+
+static void smoke_check_1(void);
+static void smoke_check_2(void);
+static void smoke_check_3(void);
+static void smoke_check_4(void);
+static void smoke_check_5(void);
+static void smoke_check_6(void);
+static void smoke_check_7(void);
+static void smoke_check_8(void);
+static void smoke_check_9(void);
+static void smoke_check_10(void);
+static void write_permitted_check(void);
+static void check_insert_entry(void);
+static void check_flush_cache(void);
+static void check_flush_cache__empty_cache(H5C2_t * cache_ptr);
+static void check_flush_cache__multi_entry(H5C2_t * cache_ptr);
+static void check_flush_cache__multi_entry_test(H5C2_t * cache_ptr,
+ int test_num,
+ unsigned int flush_flags,
+ int spec_size,
+ struct flush_cache_test_spec spec[]);
+static void check_flush_cache__pe_multi_entry_test(H5C2_t * cache_ptr,
+ int test_num,
+ unsigned int flush_flags,
+ int spec_size,
+ struct pe_flush_cache_test_spec spec[]);
+static void check_flush_cache__single_entry(H5C2_t * cache_ptr);
+static void check_flush_cache__single_entry_test(H5C2_t * cache_ptr,
+ int test_num,
+ int entry_type,
+ int entry_idx,
+ hbool_t insert_flag,
+ hbool_t dirty_flag,
+ unsigned int flags,
+ unsigned int flush_flags,
+ hbool_t expected_deserialized,
+ hbool_t expected_cleared,
+ hbool_t expected_serialized,
+ hbool_t expected_destroyed);
+static void check_flush_cache__pinned_single_entry_test(H5C2_t * cache_ptr,
+ int test_num,
+ int entry_type,
+ int entry_idx,
+ hbool_t dirty_flag,
+ hbool_t mark_dirty,
+ hbool_t pop_mark_dirty_prot,
+ hbool_t pop_mark_dirty_pinned,
+ hbool_t unprotect_unpin,
+ unsigned int flags,
+ unsigned int flush_flags,
+ hbool_t expected_cleared,
+ hbool_t expected_serialized,
+ hbool_t expected_destroyed);
+static void check_flush_cache__flush_ops(H5C2_t * cache_ptr);
+static void check_flush_cache__flush_op_test(H5C2_t * cache_ptr,
+ int test_num,
+ unsigned int flush_flags,
+ int spec_size,
+ struct fo_flush_cache_test_spec spec[],
+ int init_expected_index_len,
+ size_t init_expected_index_size,
+ int expected_index_len,
+ size_t expected_index_size,
+ int check_size,
+ struct fo_flush_entry_check check[]);
+static void check_flush_cache__flush_op_eviction_test(H5C2_t * cache_ptr);
+static void check_flush_protected_err(void);
+static void check_get_entry_status(void);
+static void check_expunge_entry(void);
+static void check_multiple_read_protect(void);
+static void check_rename_entry(void);
+static void check_rename_entry__run_test(H5C2_t * cache_ptr, int test_num,
+ struct rename_entry_test_spec * spec_ptr);
+static void check_pin_protected_entry(void);
+static void check_resize_entry(void);
+static void check_evictions_enabled(void);
+static void check_destroy_pinned_err(void);
+static void check_destroy_protected_err(void);
+static void check_duplicate_insert_err(void);
+static void check_rename_err(void);
+static void check_double_pin_err(void);
+static void check_double_unpin_err(void);
+static void check_pin_entry_errs(void);
+static void check_double_protect_err(void);
+static void check_double_unprotect_err(void);
+static void check_mark_entry_dirty_errs(void);
+static void check_expunge_entry_errs(void);
+static void check_resize_entry_errs(void);
+static void check_unprotect_ro_dirty_err(void);
+static void check_protect_ro_rw_err(void);
+static void check_check_evictions_enabled_err(void);
+static void check_auto_cache_resize(void);
+static void check_auto_cache_resize_disable(void);
+static void check_auto_cache_resize_epoch_markers(void);
+static void check_auto_cache_resize_input_errs(void);
+static void check_auto_cache_resize_aux_fcns(void);
+
+
+/**************************************************************************/
+/**************************************************************************/
+/********************************* tests: *********************************/
+/**************************************************************************/
+/**************************************************************************/
+
+/*-------------------------------------------------------------------------
+ * Function: smoke_check_1()
+ *
+ * Purpose: A basic functional test, inserts, destroys, and renames in
+ * the mix, along with repeated protects and unprotects.
+ * All entries are marked as clean.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/16/04
+ *
+ * Modifications:
+ *
+ * JRM -- 1/18/05
+ * Added code to skip this test if the skip_long_tests2 global
+ * is true.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+smoke_check_1(void)
+{
+ const char * fcn_name = "smoke_check_1";
+ hbool_t show_progress = FALSE;
+ hbool_t dirty_inserts = FALSE;
+ int dirty_unprotects = FALSE;
+ int dirty_destroys = FALSE;
+ hbool_t display_stats = FALSE;
+ int32_t lag = 10;
+ int mile_stone = 1;
+ H5C2_t * cache_ptr = NULL;
+
+ TESTING("smoke check #1 -- all clean, ins, dest, ren, 4/2 MB cache");
+
+ if ( skip_long_tests2 ) {
+
+ SKIPPED();
+
+ HDfprintf(stdout, " Long tests disabled.\n");
+
+ return;
+ }
+
+ pass2 = TRUE;
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ reset_entries2();
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ cache_ptr = setup_cache2((size_t)(4 * 1024 * 1024),
+ (size_t)(2 * 1024 * 1024));
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ TRUE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ TRUE,
+ /* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush and destroy all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ TRUE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ col_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ FALSE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ col_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ takedown_cache2(cache_ptr, display_stats, TRUE);
+
+ if ( show_progress ) /* 11 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ verify_clean2();
+ verify_unprotected2();
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* smoke_check_1() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: smoke_check_2()
+ *
+ * Purpose: A basic functional test, with inserts, destroys, and
+ * renames in the mix, along with some repeated protects
+ * and unprotects. About half the entries are marked as
+ * dirty.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ * JRM -- 1/18/05
+ * Added code to skip this test if the skip_long_tests2 global
+ * is true.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+smoke_check_2(void)
+{
+ const char * fcn_name = "smoke_check_2";
+ hbool_t show_progress = FALSE;
+ hbool_t dirty_inserts = TRUE;
+ int dirty_unprotects = TRUE;
+ int dirty_destroys = TRUE;
+ hbool_t display_stats = FALSE;
+ int32_t lag = 10;
+ int mile_stone = 1;
+ H5C2_t * cache_ptr = NULL;
+
+ TESTING("smoke check #2 -- ~1/2 dirty, ins, dest, ren, 4/2 MB cache");
+
+ if ( skip_long_tests2 ) {
+
+ SKIPPED();
+
+ HDfprintf(stdout, " Long tests disabled.\n");
+
+ return;
+ }
+
+ pass2 = TRUE;
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ reset_entries2();
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ cache_ptr = setup_cache2((size_t)(4 * 1024 * 1024),
+ (size_t)(2 * 1024 * 1024));
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ TRUE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ TRUE,
+ /* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush and destroy all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ TRUE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ col_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ FALSE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ col_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ takedown_cache2(cache_ptr, display_stats, TRUE);
+
+ if ( show_progress ) /* 11 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ verify_clean2();
+ verify_unprotected2();
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* smoke_check_2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: smoke_check_3()
+ *
+ * Purpose: A basic functional test on a tiny cache, with inserts,
+ * destroys, and renames in the mix, along with repeated
+ * protects and unprotects. All entries are marked as clean.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/16/04
+ *
+ * Modifications:
+ *
+ * JRM -- 1/18/05
+ * Added code to skip this test if the skip_long_tests2 global
+ * is true.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+smoke_check_3(void)
+{
+ const char * fcn_name = "smoke_check_3";
+ hbool_t show_progress = FALSE;
+ hbool_t dirty_inserts = FALSE;
+ int dirty_unprotects = FALSE;
+ int dirty_destroys = FALSE;
+ hbool_t display_stats = FALSE;
+ int32_t lag = 10;
+ int mile_stone = 1;
+ H5C2_t * cache_ptr = NULL;
+
+ TESTING("smoke check #3 -- all clean, ins, dest, ren, 2/1 KB cache");
+
+ if ( skip_long_tests2 ) {
+
+ SKIPPED();
+
+ HDfprintf(stdout, " Long tests disabled.\n");
+
+ return;
+ }
+
+ pass2 = TRUE;
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ reset_entries2();
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ TRUE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ TRUE,
+ /* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush and destroy all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ TRUE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ col_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ FALSE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ col_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ takedown_cache2(cache_ptr, display_stats, TRUE);
+
+ if ( show_progress ) /* 11 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ verify_clean2();
+ verify_unprotected2();
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* smoke_check_3() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: smoke_check_4()
+ *
+ * Purpose: A basic functional test on a tiny cache, with inserts,
+ * destroys, and renames in the mix, along with repeated
+ * protects and unprotects. About half the entries are
+ * marked as dirty.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ * JRM -- 1/18/05
+ * Added code to skip this test if the skip_long_tests2 global
+ * is true.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+smoke_check_4(void)
+{
+ const char * fcn_name = "smoke_check_4";
+ hbool_t show_progress = FALSE;
+ hbool_t dirty_inserts = TRUE;
+ int dirty_unprotects = TRUE;
+ int dirty_destroys = TRUE;
+ hbool_t display_stats = FALSE;
+ int32_t lag = 10;
+ int mile_stone = 1;
+ H5C2_t * cache_ptr = NULL;
+
+ TESTING("smoke check #4 -- ~1/2 dirty, ins, dest, ren, 2/1 KB cache");
+
+ if ( skip_long_tests2 ) {
+
+ SKIPPED();
+
+ HDfprintf(stdout, " Long tests disabled.\n");
+
+ return;
+ }
+
+ pass2 = TRUE;
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ reset_entries2();
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ TRUE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ TRUE,
+ /* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush and destroy all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ TRUE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ col_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ FALSE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ col_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ takedown_cache2(cache_ptr, display_stats, TRUE);
+
+ if ( show_progress ) /* 11 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ verify_clean2();
+ verify_unprotected2();
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* smoke_check_4() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: smoke_check_5()
+ *
+ * Purpose: A basic functional test on a cache with automatic cache
+ * resizing enabled, with inserts in the mix, along with
+ * repeated protects and unprotects. All entries are marked
+ * as clean.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 10/14/04
+ *
+ * Modifications:
+ *
+ * JRM -- 1/18/05
+ * Added code to skip this test if the skip_long_tests2 global
+ * is true.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+smoke_check_5(void)
+{
+ const char * fcn_name = "smoke_check_5";
+ herr_t result;
+ hbool_t show_progress = FALSE;
+ hbool_t dirty_inserts = FALSE;
+ int dirty_unprotects = FALSE;
+ hbool_t display_stats = FALSE;
+ int32_t max_index = 1024;
+ int mile_stone = 1;
+ H5C2_t * cache_ptr = NULL;
+ H5C2_auto_size_ctl_t auto_size_ctl =
+ {
+ /* int32_t version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+#if 1
+ /* H5C2_auto_resize_report_fcn rpt_fcn = */ NULL,
+#else
+ /* H5C2_auto_resize_report_fcn rpt_fcn = */ H5C2_def_auto_resize_rpt_fcn,
+#endif
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (2 * 1024 * 1024),
+
+ /* double min_clean_fraction = */ 0.1,
+
+ /* size_t max_size = */ (32 * 1024 * 1025),
+ /* size_t min_size = */ (512 * 1024),
+
+ /* int64_t epoch_length = */ 50000,
+
+
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+
+ /* double lower_hr_threshold = */ 0.75,
+
+ /* double increment = */ 2.0,
+
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+
+
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__threshold,
+
+ /* double upper_hr_threshold = */ 0.995,
+
+ /* double decrement = */ 0.9,
+
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+
+ /* int32_t epochs_before_eviction = */ 3,
+
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.5
+ };
+
+ TESTING("smoke check #5 -- all clean, ins, prot, unprot, AR cache 1");
+
+ if ( skip_long_tests2 ) {
+
+ SKIPPED();
+
+ HDfprintf(stdout, " Long tests disabled.\n");
+
+ return;
+ }
+
+ if ( run_full_test2 ) {
+
+ max_index = (10 * 1024) - 1;
+ }
+
+ pass2 = TRUE;
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ reset_entries2();
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ if ( pass2 ) {
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 1.\n";
+ }
+ }
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts);
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_row_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts);
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts);
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush and destroy all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ TRUE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_col_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ FALSE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_col_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ takedown_cache2(cache_ptr, display_stats, TRUE);
+
+ if ( show_progress ) /* 11 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ verify_clean2();
+ verify_unprotected2();
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* smoke_check_5() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: smoke_check_6()
+ *
+ * Purpose: A basic functional test on a cache with automatic cache
+ * resizing enabled, with inserts in the mix, along with
+ * repeated protects and unprotects. About one half of all
+ * entries are marked as dirty.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 10/25/04
+ *
+ * Modifications:
+ *
+ * JRM -- 1/18/05
+ * Added code to skip this test if the skip_long_tests2 global
+ * is true.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+smoke_check_6(void)
+{
+ const char * fcn_name = "smoke_check_6";
+ herr_t result;
+ hbool_t show_progress = FALSE;
+ hbool_t dirty_inserts = TRUE;
+ int dirty_unprotects = FALSE;
+ hbool_t display_stats = FALSE;
+ int mile_stone = 1;
+ int32_t max_index = 1024;
+ H5C2_t * cache_ptr = NULL;
+ H5C2_auto_size_ctl_t auto_size_ctl =
+ {
+ /* int32_t version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+#if 1
+ /* H5C2_auto_resize_report_fcn rpt_fcn = */ NULL,
+#else
+ /* H5C2_auto_resize_report_fcn rpt_fcn = */ H5C2_def_auto_resize_rpt_fcn,
+#endif
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (2 * 1024 * 1024),
+
+ /* double min_clean_fraction = */ 0.1,
+
+ /* size_t max_size = */ (32 * 1024 * 1025),
+ /* size_t min_size = */ (512 * 1024),
+
+ /* int64_t epoch_length = */ 50000,
+
+
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+
+ /* double lower_hr_threshold = */ 0.75,
+
+ /* double increment = */ 2.0,
+
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+
+
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__threshold,
+
+ /* double upper_hr_threshold = */ 0.995,
+
+ /* double decrement = */ 0.9,
+
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+
+ /* int32_t epochs_before_eviction = */ 3,
+
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.05
+ };
+
+ TESTING("smoke check #6 -- ~1/2 dirty, ins, prot, unprot, AR cache 1");
+
+ pass2 = TRUE;
+
+ if ( skip_long_tests2 ) {
+
+ SKIPPED();
+
+ HDfprintf(stdout, " Long tests disabled.\n");
+
+ return;
+ }
+
+ if ( run_full_test2 ) {
+
+ max_index = (10 * 1024) - 1;
+ }
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ reset_entries2();
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ if ( pass2 ) {
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 1.\n";
+ }
+ }
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts);
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_row_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts);
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts);
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush and destroy all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ TRUE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_col_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ FALSE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_col_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ takedown_cache2(cache_ptr, display_stats, TRUE);
+
+ if ( show_progress ) /* 11 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ verify_clean2();
+ verify_unprotected2();
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* smoke_check_6() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: smoke_check_7()
+ *
+ * Purpose: A basic functional test on a cache with automatic cache
+ * resizing enabled, with inserts in the mix, along with
+ * repeated protects and unprotects. All entries are marked
+ * as clean.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 12/2/04
+ *
+ * Modifications:
+ *
+ * JRM -- 1/18/05
+ * Added code to skip this test if the skip_long_tests2 global
+ * is true.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+smoke_check_7(void)
+{
+ const char * fcn_name = "smoke_check_7";
+ herr_t result;
+ hbool_t show_progress = FALSE;
+ hbool_t dirty_inserts = FALSE;
+ int dirty_unprotects = FALSE;
+ hbool_t display_stats = FALSE;
+ int mile_stone = 1;
+ int32_t max_index = 1024;
+ H5C2_t * cache_ptr = NULL;
+ H5C2_auto_size_ctl_t auto_size_ctl =
+ {
+ /* int32_t version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+#if 1
+ /* H5C2_auto_resize_report_fcn rpt_fcn = */ NULL,
+#else
+ /* H5C2_auto_resize_report_fcn rpt_fcn = */ H5C2_def_auto_resize_rpt_fcn,
+#endif
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (2 * 1024 * 1024),
+
+ /* double min_clean_fraction = */ 0.1,
+
+ /* size_t max_size = */ (32 * 1024 * 1025),
+ /* size_t min_size = */ (512 * 1024),
+
+ /* int64_t epoch_length = */ 100000,
+
+
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+
+ /* double lower_hr_threshold = */ 0.75,
+
+ /* double increment = */ 2.0,
+
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (8 * 1024 * 1024),
+
+
+ /* enum H5C2_cache_decr_mode decr_mode = */
+ H5C2_decr__age_out_with_threshold,
+
+ /* double upper_hr_threshold = */ 0.995,
+
+ /* double decrement = */ 0.9,
+
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+
+ /* int32_t epochs_before_eviction = */ 3,
+
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1
+ };
+
+ TESTING("smoke check #7 -- all clean, ins, prot, unprot, AR cache 2");
+
+ if ( skip_long_tests2 ) {
+
+ SKIPPED();
+
+ HDfprintf(stdout, " Long tests disabled.\n");
+
+ return;
+ }
+
+ if ( run_full_test2 ) {
+
+ max_index = (10 * 1024) - 1;
+ }
+
+ pass2 = TRUE;
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ reset_entries2();
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ if ( pass2 ) {
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 1.\n";
+ }
+ }
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts);
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_row_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts);
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts);
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush and destroy all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ TRUE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_col_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ FALSE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_col_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ takedown_cache2(cache_ptr, display_stats, TRUE);
+
+ if ( show_progress ) /* 11 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ verify_clean2();
+ verify_unprotected2();
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* smoke_check_7() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: smoke_check_8()
+ *
+ * Purpose: A basic functional test on a cache with automatic cache
+ * resizing enabled, with inserts in the mix, along with
+ * repeated protects and unprotects. About one half of all
+ * entries are marked as dirty.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 10/25/04
+ *
+ * Modifications:
+ *
+ * JRM -- 1/18/05
+ * Added code to skip this test if the skip_long_tests2 global
+ * is true.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+smoke_check_8(void)
+{
+ const char * fcn_name = "smoke_check_8";
+ herr_t result;
+ hbool_t show_progress = FALSE;
+ hbool_t dirty_inserts = TRUE;
+ int dirty_unprotects = FALSE;
+ hbool_t display_stats = FALSE;
+ int mile_stone = 1;
+ int32_t max_index = 1024;
+ H5C2_t * cache_ptr = NULL;
+ H5C2_auto_size_ctl_t auto_size_ctl =
+ {
+ /* int32_t version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+#if 1
+ /* H5C2_auto_resize_report_fcn rpt_fcn = */ NULL,
+#else
+ /* H5C2_auto_resize_report_fcn rpt_fcn = */ H5C2_def_auto_resize_rpt_fcn,
+#endif
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (2 * 1024 * 1024),
+
+ /* double min_clean_fraction = */ 0.1,
+
+ /* size_t max_size = */ (32 * 1024 * 1025),
+ /* size_t min_size = */ (512 * 1024),
+
+ /* int64_t epoch_length = */ 100000,
+
+
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+
+ /* double lower_hr_threshold = */ 0.75,
+
+ /* double increment = */ 2.0,
+
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+
+
+ /* enum H5C2_cache_decr_mode decr_mode = */
+ H5C2_decr__age_out_with_threshold,
+
+ /* double upper_hr_threshold = */ 0.995,
+
+ /* double decrement = */ 0.9,
+
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+
+ /* int32_t epochs_before_eviction = */ 3,
+
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1
+ };
+
+ TESTING("smoke check #8 -- ~1/2 dirty, ins, prot, unprot, AR cache 2");
+
+ if ( skip_long_tests2 ) {
+
+ SKIPPED();
+
+ HDfprintf(stdout, " Long tests disabled.\n");
+
+ return;
+ }
+
+ if ( run_full_test2 ) {
+
+ max_index = (10 * 1024) - 1;
+ }
+
+ pass2 = TRUE;
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ reset_entries2();
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ if ( pass2 ) {
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 1.\n";
+ }
+ }
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts);
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_row_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts);
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts);
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush and destroy all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ TRUE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_col_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ FALSE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ hl_col_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* max_index */ max_index,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ FALSE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ takedown_cache2(cache_ptr, display_stats, TRUE);
+
+ if ( show_progress ) /* 11 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ verify_clean2();
+ verify_unprotected2();
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* smoke_check_8() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: smoke_check_9()
+ *
+ * Purpose: A repeat of smoke check 1, only with the cache corked
+ * part of the time.
+ *
+ * Recall that smoke check 1 is a basic functional test,
+ * with inserts, destroys, and renames in the mix, along
+ * with repeated protects and unprotects. All entries are
+ * marked as clean.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 8/1/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+smoke_check_9(void)
+{
+ const char * fcn_name = "smoke_check_9";
+ herr_t result;
+ hbool_t show_progress = FALSE;
+ hbool_t dirty_inserts = FALSE;
+ int dirty_unprotects = FALSE;
+ int dirty_destroys = FALSE;
+ hbool_t display_stats = FALSE;
+ hbool_t display_detailed_stats = FALSE;
+ int32_t lag = 10;
+ int mile_stone = 1;
+ H5C2_t * cache_ptr = NULL;
+
+ TESTING("smoke check #9 -- all clean, ins, dest, ren, 4/2 MB, corked");
+
+ if ( skip_long_tests2 ) {
+
+ SKIPPED();
+
+ HDfprintf(stdout, " Long tests disabled.\n");
+
+ return;
+ }
+
+ pass2 = TRUE;
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ reset_entries2();
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ cache_ptr = setup_cache2((size_t)(4 * 1024 * 1024),
+ (size_t)(2 * 1024 * 1024));
+
+ /* disable evictions */
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, FALSE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "can't disable evictions 1.\n";
+ }
+ }
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d -- evictions disabled\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ display_detailed_stats,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ TRUE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ /* enable evictions */
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, TRUE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "can't enable evictions 1.\n";
+ }
+ }
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d -- evictions enabled \n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ display_detailed_stats,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ TRUE,
+ /* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, FALSE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "can't disable evictions 2.\n";
+ }
+ }
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d -- evictions disabled \n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ display_detailed_stats,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, TRUE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "can't enable evictions 2.\n";
+ }
+ }
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d -- evictions enabled \n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush and destroy all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ TRUE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 11 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, FALSE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "can't disable evictions 3.\n";
+ }
+ }
+
+ if ( show_progress ) /* 12 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d -- evictions disabled\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ col_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ display_detailed_stats,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 13 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ FALSE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 14 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, TRUE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "can't enable evictions 3.\n";
+ }
+ }
+
+ if ( show_progress ) /* 15 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d -- evictions enabled\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ col_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ display_detailed_stats,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 16 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, FALSE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "can't disable evictions 4.\n";
+ }
+ }
+
+
+ if ( show_progress ) /* 17 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d -- evictions disabled\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ takedown_cache2(cache_ptr, display_stats, TRUE);
+
+ if ( show_progress ) /* 18 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ verify_clean2();
+ verify_unprotected2();
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* smoke_check_9() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: smoke_check_10()
+ *
+ * Purpose: A repeat of smoke check 2, only with the cache corked
+ * part of the time.
+ *
+ * Recall that smoke check 2 is a basic functional test,
+ * with inserts, destroys, and renames in the mix, along
+ * with some repeated protects and unprotects. About half
+ * the entries are marked as dirty.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 8/1/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+smoke_check_10(void)
+{
+ const char * fcn_name = "smoke_check_10";
+ herr_t result;
+ hbool_t show_progress = FALSE;
+ hbool_t dirty_inserts = TRUE;
+ int dirty_unprotects = TRUE;
+ int dirty_destroys = TRUE;
+ hbool_t display_stats = FALSE;
+ hbool_t display_detailed_stats = FALSE;
+ int32_t lag = 10;
+ int mile_stone = 1;
+ H5C2_t * cache_ptr = NULL;
+
+ TESTING("smoke check #10 -- ~1/2 dirty, ins, dest, ren, 4/2 MB, corked");
+
+ if ( skip_long_tests2 ) {
+
+ SKIPPED();
+
+ HDfprintf(stdout, " Long tests disabled.\n");
+
+ return;
+ }
+
+ pass2 = TRUE;
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ reset_entries2();
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ cache_ptr = setup_cache2((size_t)(4 * 1024 * 1024),
+ (size_t)(2 * 1024 * 1024));
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d -- evictions enabled\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ display_detailed_stats,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ TRUE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, FALSE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "can't disable evictions 1.\n";
+ }
+ }
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d -- evictions disabled\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ display_detailed_stats,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ TRUE,
+ /* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, TRUE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "can't enable evictions 1.\n";
+ }
+ }
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d -- evictions enabled\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ display_detailed_stats,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, FALSE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "can't disable evictions 2.\n";
+ }
+ }
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d -- evictions disabled\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush and destroy all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ TRUE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, TRUE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "can't enable evictions 2.\n";
+ }
+ }
+
+ if ( show_progress ) /* 11 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d -- evictions enabled\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ col_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ display_detailed_stats,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 12 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, FALSE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "can't disable evictions 3.\n";
+ }
+ }
+
+ if ( show_progress ) /* 13 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d -- evictions disabled\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ FALSE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 14 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, TRUE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "can't enable evictions 3.\n";
+ }
+ }
+
+ if ( show_progress ) /* 15 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d -- evictions enabled\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ col_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ display_detailed_stats,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 16 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, FALSE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "can't disable evictions 4.\n";
+ }
+ }
+
+ if ( show_progress ) /* 17 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d -- evictions disabled\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ takedown_cache2(cache_ptr, display_stats, TRUE);
+
+ if ( show_progress ) /* 18 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ verify_clean2();
+ verify_unprotected2();
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* smoke_check_10() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: write_permitted_check()
+ *
+ * Purpose: A basic test of the write permitted function. In essence,
+ * we load the cache up with dirty entryies, set
+ * write_permitted2 to FALSE, and then protect a bunch of
+ * entries. If there are any writes while write_permitted2 is
+ * FALSE, the test will fail.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+write_permitted_check(void)
+{
+
+#if H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+
+ const char * fcn_name = "write_permitted_check";
+ hbool_t show_progress = FALSE;
+ hbool_t display_stats = FALSE;
+ int32_t lag = 10;
+ int mile_stone = 1;
+ H5C2_t * cache_ptr = NULL;
+
+#endif /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+ TESTING("write permitted check -- 1/0 MB cache");
+
+#if H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+
+ pass2 = TRUE;
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ reset_entries2();
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ cache_ptr = setup_cache2((size_t)(1 * 1024 * 1024),
+ (size_t)(0));
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ TRUE,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ TRUE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ TRUE,
+ /* dirty_unprotects */ TRUE);
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ write_permitted2 = FALSE;
+
+ row_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ FALSE,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ TRUE,
+ /* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ FALSE,
+ /* dirty_unprotects */ NO_CHANGE);
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ write_permitted2 = TRUE;
+
+ row_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ TRUE,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
+ /* dirty_destroys */ TRUE,
+ /* dirty_unprotects */ TRUE);
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* flush and destroy all entries in the cache: */
+
+ flush_cache2(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ TRUE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ col_major_scan_forward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ TRUE,
+ /* dirty_unprotects */ TRUE);
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ write_permitted2 = FALSE;
+
+ col_major_scan_backward2(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ FALSE,
+ /* dirty_unprotects */ NO_CHANGE);
+
+ write_permitted2 = TRUE;
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ takedown_cache2(cache_ptr, display_stats, TRUE);
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ verify_clean2();
+ verify_unprotected2();
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+#else /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+ SKIPPED();
+
+ HDfprintf(stdout, " Clean and dirty LRU lists disabled.\n");
+
+#endif /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+ return;
+
+} /* write_permitted_check() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_insert_entry()
+ *
+ * Purpose: Verify that H5C2_insert_entry behaves as expected.
+ * Test the behaviour with different flags.
+ *
+ * This test was added primarily to test basic insert
+ * pinned entry functionallity, but I through in explicit
+ * tests for other functionallity that is tested implicitly
+ * elsewhere.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 8/10/06
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_insert_entry(void)
+{
+ const char * fcn_name = "check_insert_entry";
+ int entry_type = PICO_ENTRY_TYPE;
+ int i;
+ int point = 0;
+ int subpoint = 0;
+ herr_t result;
+ hbool_t in_cache;
+ hbool_t is_dirty;
+ hbool_t is_protected;
+ hbool_t is_pinned;
+ hbool_t show_progress = FALSE;
+ size_t entry_size;
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+ struct H5C2_cache_entry_t * search_ptr;
+
+
+ TESTING("H5C2_insert_entry() functionality");
+
+ pass2 = TRUE;
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: point = %d\n", fcn_name, point++); /* 0 */
+ }
+
+ /* Allocate a cache, and insert entries into it using all
+ * combinations of flags. Verify that the entries are inserted,
+ * and that the flags have the desired effects.
+ *
+ * Note that the dirty parameter in insert_entry is no longer
+ * used, as we have decided that all inserted entries are
+ * dirty by definition. (Which sounds very reasonable, but didn't
+ * used to be the case.)
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024 * 1024),
+ (size_t)(1 * 1024 * 1024));
+ }
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: point = %d\n", fcn_name, point++); /* 1 */
+ }
+
+ if ( pass2 ) {
+
+ insert_entry2(cache_ptr, entry_type, 0, TRUE, H5C2__NO_FLAGS_SET);
+ insert_entry2(cache_ptr, entry_type, 1, TRUE,
+ H5C2__SET_FLUSH_MARKER_FLAG);
+ insert_entry2(cache_ptr, entry_type, 2, TRUE, H5C2__PIN_ENTRY_FLAG);
+ insert_entry2(cache_ptr, entry_type, 3, TRUE,
+ (H5C2__SET_FLUSH_MARKER_FLAG | H5C2__PIN_ENTRY_FLAG));
+ }
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: point = %d\n", fcn_name, point++); /* 2 */
+ }
+
+ /* Verify that the entries are inserted as desired. */
+
+ i = 0;
+ base_addr = entries2[0];
+ while ( ( pass2 ) && ( i < 4 ) )
+ {
+ subpoint = 0;
+
+ entry_ptr = &(base_addr[i]);
+
+ /* Start by checking everything we can via H5C2_get_entry_status() */
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s:%d point = %d.%d\n",
+ fcn_name, i, point, subpoint++);
+ }
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr, &entry_size,
+ &in_cache, &is_dirty, &is_protected,
+ &is_pinned);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s:%d point = %d.%d\n",
+ fcn_name, i, point, subpoint++);
+ }
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5AC_get_entry_status() reports failure.";
+ }
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s:%d point = %d.%d\n",
+ fcn_name, i, point, subpoint++);
+ }
+
+ if ( pass2 ) {
+
+ /* check the universals */
+ if ( ( ! in_cache ) || ( ! is_dirty ) || ( is_protected ) ||
+ ( entry_size != entry_sizes2[entry_type] ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected insert results 1.";
+ }
+ }
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s:%d point = %d.%d\n",
+ fcn_name, i, point, subpoint++);
+ }
+
+ if ( pass2 ) {
+
+ /* verify that the pinned flag got set correctly */
+ if ( ( i == 2 ) || ( i == 3 ) ) {
+
+ if ( ! is_pinned ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected insert results 2.";
+ }
+ } else if ( is_pinned ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected insert results 3.";
+
+ } else if ( is_pinned != ((entry_ptr->header).is_pinned) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected insert results 4.";
+ }
+ }
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s:%d point = %d.%d\n",
+ fcn_name, i, point, subpoint++);
+ }
+
+ /* Thats all we can get from H5AC_get_entry_status().
+ * Now start looking at the cache data structures directly.
+ */
+
+ if ( pass2 ) {
+
+ /* Verify that the flush marker got set correctly */
+ if ( ( i == 1 ) || ( i == 3 ) ) {
+
+ if ( ! ((entry_ptr->header).flush_marker) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected insert results 5.";
+ }
+ } else if ( (entry_ptr->header).flush_marker ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected insert results 6.";
+ }
+ }
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s:%d point = %d.%d\n",
+ fcn_name, i, point, subpoint++);
+ }
+
+ if ( pass2 ) {
+
+ /* Verify that pinned entries are in the pinned entry list */
+ if ( (entry_ptr->header).is_pinned ) {
+
+ search_ptr = cache_ptr->pel_head_ptr;
+
+ while ( ( search_ptr != NULL ) &&
+ ( search_ptr !=
+ (struct H5C2_cache_entry_t *)entry_ptr ) )
+ {
+ search_ptr = search_ptr->next;
+ }
+
+ if ( search_ptr == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected insert results 7.";
+ }
+ }
+ }
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s:%d point = %d.%d\n",
+ fcn_name, i, point, subpoint++);
+ }
+
+ if ( pass2 ) {
+
+ /* Verify that unpinned entries are in the LRU list */
+ if ( ! ((entry_ptr->header).is_pinned) ) {
+
+ search_ptr = cache_ptr->LRU_head_ptr;
+
+ while ( ( search_ptr != NULL ) &&
+ ( search_ptr !=
+ (struct H5C2_cache_entry_t *)entry_ptr ) )
+ {
+ search_ptr = search_ptr->next;
+ }
+
+ if ( search_ptr == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected insert results 8.";
+ }
+ }
+ }
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s:%d point = %d.%d\n",
+ fcn_name, i, point, subpoint++);
+ }
+
+#if H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+ if ( pass2 ) {
+
+ /* Verify that unpinned entries are in the dirty LRU list */
+ if ( ! ((entry_ptr->header).is_pinned) ) {
+
+ search_ptr = cache_ptr->dLRU_head_ptr;
+
+ while ( ( search_ptr != NULL ) &&
+ ( search_ptr !=
+ (struct H5C2_cache_entry_t *)entry_ptr ) )
+ {
+ search_ptr = search_ptr->aux_next;
+ }
+
+ if ( search_ptr == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected insert results 9.";
+ }
+ }
+ }
+#endif /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s:%d point = %d.%d\n",
+ fcn_name, i, point, subpoint++);
+ }
+
+ i++;
+
+ } /* while */
+
+ if ( show_progress ) {
+ point++;
+ HDfprintf(stdout, "%s: point = %d\n", fcn_name, point++);
+ }
+
+ /* So much for looking at the individual entries. Now verify
+ * that the various counts and sized in the cache header are
+ * as expected.
+ */
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 4 ) ||
+ ( cache_ptr->index_size != 4 * entry_sizes2[entry_type] ) ||
+ ( cache_ptr->slist_len != 4 ) ||
+ ( cache_ptr->slist_size != 4 * entry_sizes2[entry_type] ) ||
+ ( cache_ptr->pl_len != 0 ) ||
+ ( cache_ptr->pl_size != (size_t)0 ) ||
+ ( cache_ptr->pel_len != 2 ) ||
+ ( cache_ptr->pel_size != 2 * entry_sizes2[entry_type] ) ||
+ ( cache_ptr->LRU_list_len != 2 ) ||
+ ( cache_ptr->LRU_list_size != 2 * entry_sizes2[entry_type] ) ||
+#if H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+ ( cache_ptr->dLRU_list_len != 2 ) ||
+ ( cache_ptr->dLRU_list_size != 2 * entry_sizes2[entry_type] ) ||
+#endif /* H5C2_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+ ( cache_ptr->cLRU_list_len != 0 ) ||
+ ( cache_ptr->cLRU_list_size != (size_t)0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected insert results 10.";
+ }
+ }
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: point = %d\n", fcn_name, point++);
+ }
+
+ /* Finally, if stats collection is enabled, verify that the expected
+ * stats are collected.
+ */
+#if H5C2_COLLECT_CACHE_STATS
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->insertions[entry_type] != 4 ) ||
+ ( cache_ptr->pinned_insertions[entry_type] != 2 ) ||
+ ( cache_ptr->pins[entry_type] != 2 ) ||
+ ( cache_ptr->unpins[entry_type] != 0 ) ||
+ ( cache_ptr->dirty_pins[entry_type] != 0 ) ||
+ ( cache_ptr->max_index_len != 4 ) ||
+ ( cache_ptr->max_index_size != 4 * entry_sizes2[entry_type] ) ||
+ ( cache_ptr->max_slist_len != 4 ) ||
+ ( cache_ptr->max_slist_size != 4 * entry_sizes2[entry_type] ) ||
+ ( cache_ptr->max_pl_len != 0 ) ||
+ ( cache_ptr->max_pl_size != (size_t)0 ) ||
+ ( cache_ptr->max_pel_len != 2 ) ||
+ ( cache_ptr->max_pel_size != 2 * entry_sizes2[entry_type] ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected insert results 11.";
+ }
+ }
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: point = %d\n", fcn_name, point++);
+ }
+
+ /* Unpin the pinned entries so we can take down the cache cleanly. */
+
+ if ( pass2 ) {
+
+ unpin_entry2(cache_ptr, entry_type, 2);
+ unpin_entry2(cache_ptr, entry_type, 3);
+ }
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: point = %d\n", fcn_name, point++);
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: point = %d\n", fcn_name, point++);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_insert_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_flush_cache()
+ *
+ * Purpose: Verify that flush_cache behaves as expected. In particular,
+ * test the behaviour with different flags.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 1/10/05
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_flush_cache(void)
+{
+ const char * fcn_name = "check_flush_cache";
+ hbool_t show_progress = FALSE;
+ int mile_stone = 0;
+ H5C2_t * cache_ptr = NULL;
+
+ TESTING("H5C2_flush_cache() functionality");
+
+ pass2 = TRUE;
+
+ if ( show_progress ) { /* 0 */
+ HDfprintf(stdout, "\n%s: mile_stone = %d.\n", fcn_name, mile_stone++);
+ }
+
+ /* allocate a cache, and flush it under various circumstances.
+ * To the extent possible, verify that the desired actions took
+ * place.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024 * 1024),
+ (size_t)(1 * 1024 * 1024));
+ }
+
+ if ( show_progress ) { /* 1 */
+ HDfprintf(stdout, "%s: mile_stone = %d.\n", fcn_name, mile_stone++);
+ }
+
+ /* first test behaviour on an empty cache. Can't do much sanity
+ * checking in this case, so simply check the return values.
+ */
+
+ if ( pass2 ) {
+
+ check_flush_cache__empty_cache(cache_ptr);
+ }
+
+ if ( show_progress ) { /* 2 */
+ HDfprintf(stdout, "%s: mile_stone = %d.\n", fcn_name, mile_stone++);
+ }
+
+ /* now do a series of similar tests with a cache with a single entry.
+ * Start with a clean entry, with no flags set.
+ */
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry(cache_ptr);
+ }
+
+ if ( show_progress ) { /* 3 */
+ HDfprintf(stdout, "%s: mile_stone = %d.\n", fcn_name, mile_stone++);
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__multi_entry(cache_ptr);
+ }
+
+ if ( show_progress ) { /* 4 */
+ HDfprintf(stdout, "%s: mile_stone = %d.\n", fcn_name, mile_stone++);
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__flush_ops(cache_ptr);
+ }
+
+ if ( show_progress ) { /* 5 */
+ HDfprintf(stdout, "%s: mile_stone = %d.\n", fcn_name, mile_stone++);
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( show_progress ) { /* 6 */
+ HDfprintf(stdout, "%s: mile_stone = %d.\n", fcn_name, mile_stone++);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_flush_cache() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_flush_cache__empty_cache()
+ *
+ * Purpose: Verify that flush_cache behaves as expected with an empty
+ * cache.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 1/12/05
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_flush_cache__empty_cache(H5C2_t * cache_ptr)
+{
+ /* const char * fcn_name = "check_flush_cache__empty_cache"; */
+ herr_t result;
+
+ if ( cache_ptr == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "cache_ptr NULL on entry to empty cache case.";
+ }
+ else if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "cache not empty at beginning of empty cache case.";
+ }
+
+
+ /* Test behaviour on an empty cache. Can't do much sanity
+ * checking in this case, so simply check the return values.
+ */
+
+ if ( pass2 ) {
+
+ result = H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ H5C2__NO_FLAGS_SET);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "flush with flags = 0x00 failed on empty cache.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ H5C2__FLUSH_INVALIDATE_FLAG);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "flush with flags = 0x04 failed on empty cache.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ H5C2__FLUSH_CLEAR_ONLY_FLAG);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "flush with flags = 0x08 failed on empty cache.\n";
+ }
+ }
+
+
+ if ( pass2 ) {
+
+ result = H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ H5C2__FLUSH_MARKED_ENTRIES_FLAG);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "flush with flags = 0x10 failed on empty cache.\n";
+ }
+ }
+
+} /* check_flush_cache__empty_cache() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_flush_cache__multi_entry()
+ *
+ * Purpose: Verify that flush_cache behaves as expected when the cache
+ * contains multiple elements.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 1/14/05
+ *
+ * Modifications:
+ *
+ * JRM -- 4/5/06
+ * Added pinned entry tests.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_flush_cache__multi_entry(H5C2_t * cache_ptr)
+{
+ /* const char * fcn_name = "check_flush_cache__multi_entry"; */
+
+ if ( cache_ptr == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "cache_ptr NULL on entry to multi entry case.";
+ }
+ else if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "cache not empty at beginning of multi entry case.";
+ }
+
+ if ( pass2 )
+ {
+ int test_num = 1;
+ unsigned int flush_flags = H5C2__NO_FLAGS_SET;
+ int spec_size = 8;
+ struct flush_cache_test_spec spec[8] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 75,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 25,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 50,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 20,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 6,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 30,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 7,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 40,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__multi_entry_test(cache_ptr, test_num,
+ flush_flags, spec_size, spec);
+ }
+
+
+ if ( pass2 )
+ {
+ int test_num = 2;
+ unsigned int flush_flags = H5C2__FLUSH_INVALIDATE_FLAG;
+ int spec_size = 8;
+ struct flush_cache_test_spec spec[8] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 75,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 25,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 50,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 20,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 6,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 30,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 7,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 40,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+
+ check_flush_cache__multi_entry_test(cache_ptr, test_num,
+ flush_flags, spec_size, spec);
+ }
+
+
+ if ( pass2 )
+ {
+ int test_num = 3;
+ unsigned int flush_flags = H5C2__FLUSH_CLEAR_ONLY_FLAG;
+ int spec_size = 8;
+ struct flush_cache_test_spec spec[8] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 75,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 25,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 50,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 20,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 6,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 30,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 7,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 40,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__multi_entry_test(cache_ptr, test_num,
+ flush_flags, spec_size, spec);
+ }
+
+
+ if ( pass2 )
+ {
+ int test_num = 4;
+ unsigned int flush_flags = H5C2__FLUSH_MARKED_ENTRIES_FLAG;
+ int spec_size = 8;
+ struct flush_cache_test_spec spec[8] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 75,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 25,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 50,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 20,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 6,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 30,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 7,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 40,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__multi_entry_test(cache_ptr, test_num,
+ flush_flags, spec_size, spec);
+ }
+
+
+ if ( pass2 )
+ {
+ int test_num = 5;
+ unsigned int flush_flags = H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG;
+ int spec_size = 8;
+ struct flush_cache_test_spec spec[8] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 75,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 25,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 50,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 20,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 6,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 30,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 7,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 40,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+
+ check_flush_cache__multi_entry_test(cache_ptr, test_num,
+ flush_flags, spec_size, spec);
+ }
+
+
+ if ( pass2 )
+ {
+ int test_num = 6;
+ unsigned int flush_flags = H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_MARKED_ENTRIES_FLAG;
+ int spec_size = 8;
+ struct flush_cache_test_spec spec[8] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 75,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 25,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 50,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 20,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 6,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 30,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 7,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 40,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+
+ check_flush_cache__multi_entry_test(cache_ptr, test_num,
+ flush_flags, spec_size, spec);
+ }
+
+
+ if ( pass2 )
+ {
+ int test_num = 7;
+ unsigned int flush_flags = H5C2__FLUSH_CLEAR_ONLY_FLAG |
+ H5C2__FLUSH_MARKED_ENTRIES_FLAG;
+ int spec_size = 8;
+ struct flush_cache_test_spec spec[8] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 75,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 25,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 50,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 20,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 6,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 30,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 7,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 40,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__multi_entry_test(cache_ptr, test_num,
+ flush_flags, spec_size, spec);
+ }
+
+
+ if ( pass2 )
+ {
+ int test_num = 8;
+ unsigned int flush_flags = H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG |
+ H5C2__FLUSH_MARKED_ENTRIES_FLAG;
+ int spec_size = 8;
+ struct flush_cache_test_spec spec[8] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 75,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 25,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 50,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 20,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 6,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 30,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 7,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 40,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+
+ check_flush_cache__multi_entry_test(cache_ptr, test_num,
+ flush_flags, spec_size, spec);
+ }
+
+
+ /* verify that all other flags are ignored */
+ if ( pass2 )
+ {
+ int test_num = 9;
+ unsigned int flush_flags = (unsigned)
+ ~(H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG |
+ H5C2__FLUSH_MARKED_ENTRIES_FLAG);
+ int spec_size = 8;
+ struct flush_cache_test_spec spec[8] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 75,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 25,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 50,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 20,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 6,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 30,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 7,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 40,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__multi_entry_test(cache_ptr, test_num,
+ flush_flags, spec_size, spec);
+ }
+
+ /* Now do pinned entry tests:
+ *
+ * For the most part, this test is directed at testing the ability
+ * of the flush routine to unravel collections of pinned entries.
+ */
+
+ if ( pass2 )
+ {
+ int test_num = 1;
+ unsigned int flush_flags = H5C2__NO_FLAGS_SET;
+ int spec_size = 8;
+ struct pe_flush_cache_test_spec spec[8] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 0,
+ /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 75,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 1,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 25,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 2,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, 75, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 50,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 3,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, 75, 25, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 4,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, 75, 25, 50, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 20,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 5,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ MONSTER_ENTRY_TYPE,
+ -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, 75, 25, 50, 10, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 6,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 30,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 6,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ MONSTER_ENTRY_TYPE,
+ MONSTER_ENTRY_TYPE,
+ -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, 75, 25, 50, 10, 20, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 7,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 40,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 7,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ MONSTER_ENTRY_TYPE,
+ MONSTER_ENTRY_TYPE,
+ MONSTER_ENTRY_TYPE,
+ -1},
+ /* pin_idx[MAX_PINS] = */ {100, 75, 25, 50, 10, 20, 30, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__pe_multi_entry_test(cache_ptr, test_num,
+ flush_flags, spec_size, spec);
+ }
+
+
+ if ( pass2 )
+ {
+ int test_num = 2;
+ unsigned int flush_flags = H5C2__FLUSH_INVALIDATE_FLAG;
+ int spec_size = 8;
+ struct pe_flush_cache_test_spec spec[8] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 0,
+ /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 75,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 1,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 25,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 2,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, 75, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 50,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 3,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, 75, 25, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 0,
+ /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 20,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 1,
+ /* pin_type[MAX_PINS] = */ {MONSTER_ENTRY_TYPE,
+ -1, -1, -1, -1 -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {10, -1, -1, -1 -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 6,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 30,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 2,
+ /* pin_type[MAX_PINS] = */ {MONSTER_ENTRY_TYPE,
+ MONSTER_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {10, 20, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 7,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 40,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 3,
+ /* pin_type[MAX_PINS] = */ {MONSTER_ENTRY_TYPE,
+ MONSTER_ENTRY_TYPE,
+ MONSTER_ENTRY_TYPE,
+ -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {10, 20, 30, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+
+ check_flush_cache__pe_multi_entry_test(cache_ptr, test_num,
+ flush_flags, spec_size, spec);
+ }
+
+ if ( pass2 )
+ {
+ int test_num = 3;
+ unsigned int flush_flags = H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG;
+ int spec_size = 8;
+ struct pe_flush_cache_test_spec spec[8] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 0,
+ /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 75,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 1,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 25,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 1,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 50,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 1,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 0,
+ /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 20,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 0,
+ /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 6,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 30,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 0,
+ /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 7,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 40,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 0,
+ /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+
+ check_flush_cache__pe_multi_entry_test(cache_ptr, test_num,
+ flush_flags, spec_size, spec);
+ }
+
+
+ if ( pass2 )
+ {
+ int test_num = 4;
+ unsigned int flush_flags = H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_MARKED_ENTRIES_FLAG;
+ int spec_size = 8;
+ struct pe_flush_cache_test_spec spec[8] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 0,
+ /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 75,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 1,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 25,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 1,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 50,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 1,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 0,
+ /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 20,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 4,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, 75, 25, 50, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 6,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 30,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 4,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ PICO_ENTRY_TYPE,
+ -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, 75, 25, 50, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 7,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 40,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 0,
+ /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+
+ check_flush_cache__pe_multi_entry_test(cache_ptr, test_num,
+ flush_flags, spec_size, spec);
+ }
+
+
+ if ( pass2 )
+ {
+ int test_num = 5;
+ unsigned int flush_flags = H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG |
+ H5C2__FLUSH_MARKED_ENTRIES_FLAG;
+ int spec_size = 8;
+ struct pe_flush_cache_test_spec spec[8] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 0,
+ /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 75,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 1,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 25,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 1,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 50,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* num_pins = */ 1,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 1,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 20,
+ /* insert_flag = */ FALSE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 1,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ TRUE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 6,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 30,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ FALSE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 1,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 7,
+ /* entry_type = */ MONSTER_ENTRY_TYPE,
+ /* entry_index = */ 40,
+ /* insert_flag = */ TRUE,
+ /* dirty_flag = */ TRUE,
+ /* flags = */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* num_pins = */ 1,
+ /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE,
+ -1, -1, -1, -1, -1, -1, -1},
+ /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1},
+ /* expected_deserialized = */ FALSE,
+ /* expected_cleared = */ TRUE,
+ /* expected_serialized = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+
+ check_flush_cache__pe_multi_entry_test(cache_ptr, test_num,
+ flush_flags, spec_size, spec);
+ }
+
+ return;
+
+} /* check_flush_cache__multi_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_flush_cache__multi_entry_test()
+ *
+ * Purpose: Run a multi entry flush cache test.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 1/13/05
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_flush_cache__multi_entry_test(H5C2_t * cache_ptr,
+ int test_num,
+ unsigned int flush_flags,
+ int spec_size,
+ struct flush_cache_test_spec spec[])
+{
+ /* const char * fcn_name = "check_flush_cache__multi_entry_test"; */
+ static char msg[128];
+ herr_t result;
+ int i;
+ size_t total_entry_size = 0;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+#if 0 /* JRM */
+ /* This gets used a lot, so lets leave it in. */
+
+ HDfprintf(stdout, "check_flush_cache__multi_entry_test: test %d\n",
+ test_num);
+#endif /* JRM */
+
+ if ( cache_ptr == NULL ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "cache_ptr NULL on entry to single entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ else if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ) {
+
+ pass2 = FALSE;
+
+ HDsnprintf(msg, (size_t)128,
+ "cache not empty at beginning of multi entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ else if ( ( spec_size < 1 ) || ( spec == NULL ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "missing/bad test spec on entry to multi entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+
+ i = 0;
+ while ( ( pass2 ) && ( i < spec_size ) )
+ {
+ if ( ( spec[i].entry_num != i ) ||
+ ( spec[i].entry_type < 0 ) ||
+ ( spec[i].entry_type >= NUMBER_OF_ENTRY_TYPES ) ||
+ ( spec[i].entry_index < 0 ) ||
+ ( spec[i].entry_index > max_indices2[spec[i].entry_type] ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "bad data in spec[%d] on entry to multi entry test #%d.",
+ i, test_num);
+ failure_mssg2 = msg;
+ }
+ i++;
+ }
+
+ i = 0;
+ while ( ( pass2 ) && ( i < spec_size ) )
+ {
+ if ( spec[i].insert_flag ) {
+
+ insert_entry2(cache_ptr, spec[i].entry_type, spec[i].entry_index,
+ spec[i].dirty_flag, spec[i].flags);
+
+ } else {
+
+ protect_entry2(cache_ptr, spec[i].entry_type, spec[i].entry_index);
+
+ unprotect_entry2(cache_ptr, spec[i].entry_type, spec[i].entry_index,
+ (int)(spec[i].dirty_flag), spec[i].flags);
+ }
+
+ total_entry_size += entry_sizes2[spec[i].entry_type];
+
+ i++;
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ flush_flags);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "flush with flags 0x%x failed in multi entry test #%d.",
+ flush_flags, test_num);
+ failure_mssg2 = msg;
+ }
+ }
+
+ i = 0;
+ while ( ( pass2 ) && ( i < spec_size ) )
+ {
+ base_addr = entries2[spec[i].entry_type];
+ entry_ptr = &(base_addr[spec[i].entry_index]);
+
+#ifndef NDEBUG
+ /* The clear_dirty_bits() callback is only called in debug mode --
+ * thus we can only do our full test on the expected entry history
+ * when debug is enabled.
+ */
+ if ( ( entry_ptr->deserialized != spec[i].expected_deserialized ) ||
+ ( entry_ptr->cleared != spec[i].expected_cleared ) ||
+ ( entry_ptr->serialized != spec[i].expected_serialized ) ||
+ ( entry_ptr->destroyed != spec[i].expected_destroyed ) ) {
+#else
+ /* When in procduction mode, the clear_dirty_bits() callback is
+ * not called, so entry_ptr->cleared should never be set.
+ */
+ if ( ( entry_ptr->deserialized != spec[i].expected_deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized != spec[i].expected_serialized ) ||
+ ( entry_ptr->destroyed != spec[i].expected_destroyed ) ) {
+#endif /* NDEBUG */
+
+#if 0 /* This is useful debugging code. Lets keep it around. */
+
+ HDfprintf(stdout,
+ "deslzd = %d(%d), clrd = %d(%d), slzd = %d(%d), dest = %d(%d)\n",
+ (int)(entry_ptr->deserialized),
+ (int)(spec[i].expected_deserialized),
+ (int)(entry_ptr->cleared),
+ (int)(spec[i].expected_cleared),
+ (int)(entry_ptr->serialized),
+ (int)(spec[i].expected_serialized),
+ (int)(entry_ptr->destroyed),
+ (int)(spec[i].expected_destroyed));
+
+#endif
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Bad status on entry %d after flush in multi entry test #%d.",
+ i, test_num);
+ failure_mssg2 = msg;
+ }
+ i++;
+ }
+
+ if ( pass2 ) {
+
+ if ( ( ( (flush_flags & H5C2__FLUSH_INVALIDATE_FLAG) == 0 )
+ &&
+ ( ( cache_ptr->index_len != spec_size )
+ ||
+ ( cache_ptr->index_size != total_entry_size )
+ )
+ )
+ ||
+ ( ( (flush_flags & H5C2__FLUSH_INVALIDATE_FLAG) != 0 )
+ &&
+ ( ( cache_ptr->index_len != 0 )
+ ||
+ ( cache_ptr->index_size != 0 )
+ )
+ )
+ ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Unexpected cache len/size after flush in multi entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ }
+
+ /* clean up the cache to prep for the next test */
+ if ( pass2 ) {
+
+ result = H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ H5C2__FLUSH_INVALIDATE_FLAG);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Flush failed on cleanup in multi entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ else if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Unexpected cache len/size after cleanup in multi entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ i = 0;
+ while ( ( pass2 ) && ( i < spec_size ) )
+ {
+ base_addr = entries2[spec[i].entry_type];
+ entry_ptr = &(base_addr[spec[i].entry_index]);
+
+ entry_ptr->deserialized = FALSE;
+ entry_ptr->cleared = FALSE;
+ entry_ptr->serialized = FALSE;
+ entry_ptr->destroyed = FALSE;
+
+ i++;
+ }
+
+ return;
+
+} /* check_flush_cache__multi_entry_test() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_flush_cache__pe_multi_entry_test()
+ *
+ * Purpose: Run a multi entry flush cache test.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/5/06
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_flush_cache__pe_multi_entry_test(H5C2_t * cache_ptr,
+ int test_num,
+ unsigned int flush_flags,
+ int spec_size,
+ struct pe_flush_cache_test_spec spec[])
+{
+ /* const char * fcn_name = "check_flush_cache__pe_multi_entry_test"; */
+ static char msg[128];
+ herr_t result;
+ int i;
+ int j;
+ size_t total_entry_size = 0;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+#if 0 /* JRM */
+ /* This is useful debugging code. Leave it in for now. */
+
+ HDfprintf(stdout, "check_flush_cache__pe_multi_entry_test: test %d\n",
+ test_num);
+#endif /* JRM */
+
+ if ( cache_ptr == NULL ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "cache_ptr NULL on entry to pe multi entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ else if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ) {
+
+ pass2 = FALSE;
+
+ HDsnprintf(msg, (size_t)128,
+ "cache not empty at beginning of pe multi entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ else if ( ( spec_size < 1 ) || ( spec == NULL ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "missing/bad test spec on entry to pe multi entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+
+ i = 0;
+ while ( ( pass2 ) && ( i < spec_size ) )
+ {
+ if ( ( spec[i].entry_num != i ) ||
+ ( spec[i].entry_type < 0 ) ||
+ ( spec[i].entry_type >= NUMBER_OF_ENTRY_TYPES ) ||
+ ( spec[i].entry_index < 0 ) ||
+ ( spec[i].entry_index > max_indices2[spec[i].entry_type] ) ||
+ ( spec[i].num_pins < 0 ) ||
+ ( spec[i].num_pins > MAX_PINS ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "bad data in spec[%d] on entry to pe multi entry test #%d.",
+ i, test_num);
+ failure_mssg2 = msg;
+ }
+ i++;
+ }
+
+ i = 0;
+ while ( ( pass2 ) && ( i < spec_size ) )
+ {
+ if ( spec[i].insert_flag ) {
+
+ insert_entry2(cache_ptr, spec[i].entry_type, spec[i].entry_index,
+ spec[i].dirty_flag, spec[i].flags);
+
+ } else {
+
+ protect_entry2(cache_ptr, spec[i].entry_type, spec[i].entry_index);
+
+ unprotect_entry2(cache_ptr, spec[i].entry_type, spec[i].entry_index,
+ (int)(spec[i].dirty_flag), spec[i].flags);
+ }
+
+ total_entry_size += entry_sizes2[spec[i].entry_type];
+
+ for ( j = 0; j < spec[i].num_pins; j++ )
+ {
+ create_pinned_entry_dependency2(cache_ptr,
+ spec[i].entry_type,
+ spec[i].entry_index,
+ spec[i].pin_type[j],
+ spec[i].pin_idx[j]);
+ }
+
+ i++;
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ flush_flags);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "flush with flags 0x%x failed in pe multi entry test #%d.",
+ flush_flags, test_num);
+ failure_mssg2 = msg;
+ }
+ }
+
+ i = 0;
+ while ( ( pass2 ) && ( i < spec_size ) )
+ {
+ base_addr = entries2[spec[i].entry_type];
+ entry_ptr = &(base_addr[spec[i].entry_index]);
+
+#ifndef NDEBUG
+ /* The clear_dirty_bits() callback is only called in debug mode --
+ * thus we can only do our full test on the expected entry history
+ * when debug is enabled.
+ */
+ if ( ( entry_ptr->deserialized != spec[i].expected_deserialized ) ||
+ ( entry_ptr->cleared != spec[i].expected_cleared ) ||
+ ( entry_ptr->serialized != spec[i].expected_serialized ) ||
+ ( entry_ptr->destroyed != spec[i].expected_destroyed ) ) {
+#else
+ /* When in procduction mode, the clear_dirty_bits() callback is
+ * not called, so entry_ptr->cleared should never be set.
+ */
+ if ( ( entry_ptr->deserialized != spec[i].expected_deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized != spec[i].expected_serialized ) ||
+ ( entry_ptr->destroyed != spec[i].expected_destroyed ) ) {
+#endif /* NDEBUG */
+
+#if 0 /* This is useful debugging code. Lets keep it around. */
+
+ HDfprintf(stdout,
+ "desrlzd = %d(%d), clrd = %d(%d), srlzd = %d(%d), dest = %d(%d)\n",
+ (int)(entry_ptr->deserialized),
+ (int)(spec[i].expected_deserialized),
+ (int)(entry_ptr->cleared),
+ (int)(spec[i].expected_cleared),
+ (int)(entry_ptr->serialized),
+ (int)(spec[i].expected_serialized),
+ (int)(entry_ptr->destroyed),
+ (int)(spec[i].expected_destroyed));
+
+#endif
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Bad status on entry %d after flush in pe multi entry test #%d.",
+ i, test_num);
+ failure_mssg2 = msg;
+ }
+ i++;
+ }
+
+ if ( pass2 ) {
+
+ if ( ( ( (flush_flags & H5C2__FLUSH_INVALIDATE_FLAG) == 0 )
+ &&
+ ( ( cache_ptr->index_len != spec_size )
+ ||
+ ( cache_ptr->index_size != total_entry_size )
+ )
+ )
+ ||
+ ( ( (flush_flags & H5C2__FLUSH_INVALIDATE_FLAG) != 0 )
+ &&
+ ( ( cache_ptr->index_len != 0 )
+ ||
+ ( cache_ptr->index_size != 0 )
+ )
+ )
+ ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Unexpected cache len/size after flush in pe multi entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ }
+
+ /* clean up the cache to prep for the next test */
+ if ( pass2 ) {
+
+ result = H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ H5C2__FLUSH_INVALIDATE_FLAG);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Flush failed on cleanup in pe multi entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ else if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Unexpected cache len/size after cleanup in pe multi entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ i = 0;
+ while ( ( pass2 ) && ( i < spec_size ) )
+ {
+ base_addr = entries2[spec[i].entry_type];
+ entry_ptr = &(base_addr[spec[i].entry_index]);
+
+ entry_ptr->deserialized = FALSE;
+ entry_ptr->cleared = FALSE;
+ entry_ptr->serialized = FALSE;
+ entry_ptr->destroyed = FALSE;
+
+ i++;
+ }
+
+ return;
+
+} /* check_flush_cache__pe_multi_entry_test() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_flush_cache__flush_ops()
+ *
+ * Purpose: Run the flush ops cache tests.
+ *
+ * These are tests that test the cache's ability to handle
+ * the case in which the flush callback dirties, resizes,
+ * and/or renames entries.
+ *
+ * Do nothing if pass2 is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/3/06
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_flush_cache__flush_ops(H5C2_t * cache_ptr)
+{
+ /* const char * fcn_name = "check_flush_cache__flush_ops"; */
+
+ if ( cache_ptr == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "cache_ptr NULL on entry to flush ops test.";
+ }
+ else if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "cache not empty at beginning of flush ops test.";
+ }
+
+ if ( pass2 ) /* test #1 */
+ {
+ /* start with a very simple test, in which there are two entries
+ * resident in cache, and the second entry dirties the first in
+ * the flush callback. No size changes, and no flush flags.
+ */
+ int test_num = 1;
+ unsigned int flush_flags = H5C2__NO_FLAGS_SET;
+ int spec_size = 2;
+ int init_expected_index_len = 2;
+ size_t init_expected_index_size = 2 * PICO_ENTRY_SIZE;
+ int expected_index_len = 2;
+ size_t expected_index_size = 2 * PICO_ENTRY_SIZE;
+ struct fo_flush_cache_test_spec spec[2] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ 0,
+ /* entry_index = */ 0,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ 0,
+ /* entry_index = */ 1,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 1,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+ int check_size = 0;
+ struct fo_flush_entry_check checks[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ 0,
+ /* entry_index = */ 0,
+ /* expected_size = */ (size_t)0,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ }
+
+ if ( pass2 ) /* test #2 */
+ {
+ /* Same as test 1, only this time set the flush invalidate flag.
+ * Note that we must repeat all tests with the flush invalidate flag
+ * as this triggers a different set of code to execute the flush.
+ *
+ * Create two entries resident in cache, and have the second entry
+ * dirty the first in the flush callback.
+ */
+ int test_num = 2;
+ unsigned int flush_flags = H5C2__FLUSH_INVALIDATE_FLAG;
+ int spec_size = 2;
+ int init_expected_index_len = 2;
+ size_t init_expected_index_size = 2 * PICO_ENTRY_SIZE;
+ int expected_index_len = 0;
+ size_t expected_index_size = 0;
+ struct fo_flush_cache_test_spec spec[2] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 1,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 1,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, PICO_ENTRY_TYPE,0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+ int check_size = 0;
+ struct fo_flush_entry_check checks[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ 0,
+ /* entry_index = */ 0,
+ /* expected_size = */ (size_t)0,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ }
+
+ if ( pass2 ) /* test #3 */
+ {
+ /* Single entry test verifying that the cache can handle the case in
+ * which the call back function resizes the entry for which it has
+ * been called.
+ */
+ int test_num = 3;
+ unsigned int flush_flags = H5C2__NO_FLAGS_SET;
+ int spec_size = 1;
+ int init_expected_index_len = 1;
+ size_t init_expected_index_size = VARIABLE_ENTRY_SIZE / 4;
+ int expected_index_len = 1;
+ size_t expected_index_size = VARIABLE_ENTRY_SIZE / 2;
+ struct fo_flush_cache_test_spec spec[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__SIZE_CHANGED_FLAG,
+ /* new_size = */ VARIABLE_ENTRY_SIZE / 4,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 1,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 0, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+ int check_size = 0;
+ struct fo_flush_entry_check checks[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ 0,
+ /* entry_index = */ 0,
+ /* expected_size = */ (size_t)0,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ }
+
+ if ( pass2 ) /* test #4 */
+ {
+ /* Repeat test #4 with the flush invalidate flag.
+ *
+ * Single entry test verifying that the cache can handle the case in
+ * which the call back function resizes the entry for which it has
+ * been called.
+ */
+ int test_num = 4;
+ unsigned int flush_flags = H5C2__FLUSH_INVALIDATE_FLAG;
+ int spec_size = 1;
+ int init_expected_index_len = 1;
+ size_t init_expected_index_size = VARIABLE_ENTRY_SIZE / 4;
+ int expected_index_len = 0;
+ size_t expected_index_size = 0;
+ struct fo_flush_cache_test_spec spec[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__SIZE_CHANGED_FLAG,
+ /* new_size = */ VARIABLE_ENTRY_SIZE / 4,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 1,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 0, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+ int check_size = 0;
+ struct fo_flush_entry_check checks[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ 0,
+ /* entry_index = */ 0,
+ /* expected_size = */ (size_t)0,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ }
+
+ if ( pass2 ) /* test #5 & #6 */
+ {
+ /* Single entry test verifying that the cache can handle the case in
+ * which the call back function first resizes and then renames the
+ * entry for which it has been called.
+ *
+ * Run this entry twice, as the first run moves the entry to its
+ * alternate address, and the second moves it back.
+ *
+ * 10/8/07 -- JRM
+ * Added a resize operation to this test to satisfy the new
+ * requiremnt that any resize of an entry on flush will always
+ * be accompanied by a resize. Note that as a result, this
+ * test becomes redundant with later tests.
+ */
+ int test_num = 5; /* and 6 */
+ unsigned int flush_flags = H5C2__NO_FLAGS_SET;
+ int spec_size = 1;
+ int init_expected_index_len = 1;
+ size_t init_expected_index_size = VARIABLE_ENTRY_SIZE;
+ int expected_index_len = 1;
+ size_t expected_index_size = VARIABLE_ENTRY_SIZE / 2;
+ struct fo_flush_cache_test_spec spec[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 2,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 0, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+ int check_size = 0;
+ struct fo_flush_entry_check checks[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ 0,
+ /* entry_index = */ 0,
+ /* expected_size = */ (size_t)0,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+
+ /* this change forces the rename to move the target entry back to its
+ * main address. The first test moved it to its alternate address.
+ *
+ * Note that these two tests are not the same, as in the first test,
+ * the renamed entry is moved forward in the slist. In the second
+ * it is moved backwards.
+ *
+ * Since there is only one entry in the cache, this doesn't really
+ * matter in this case. But we will do similar tests later with
+ * other entries in the cache.
+ */
+ if ( pass2 ) {
+
+ spec[0].flush_ops[1].flag = TRUE;
+ test_num = 6;
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ }
+ }
+
+ if ( pass2 ) /* test #7 & #8 */
+ {
+ /* Run tests 5 & 6 again, using the flush invalidate flag on the
+ * second test.
+ *
+ * Single entry test verifying that the cache can handle the case in
+ * which the call back function renames the entry for which it has
+ * been called.
+ *
+ * Run this entry twice, as the first run moves the entry to its
+ * alternate address, and the second moves it back.
+ *
+ * 10/8/07 -- JRM
+ * Added a resize operation to this test to satisfy the new
+ * requiremnt that any resize of an entry on flush will always
+ * be accompanied by a resize. Note that as a result, this
+ * test becomes redundant with later tests.
+ */
+ int test_num = 7; /* and 8 */
+ unsigned int flush_flags = H5C2__NO_FLAGS_SET;
+ int spec_size = 1;
+ int init_expected_index_len = 1;
+ size_t init_expected_index_size = VARIABLE_ENTRY_SIZE;
+ int expected_index_len = 1;
+ size_t expected_index_size = VARIABLE_ENTRY_SIZE / 2;
+ struct fo_flush_cache_test_spec spec[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 2,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 0, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+ int check_size = 0;
+ struct fo_flush_entry_check checks[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ 0,
+ /* entry_index = */ 0,
+ /* expected_size = */ (size_t)0,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+
+ /* this change forces the rename to move the target entry back to its
+ * main address. The first test moved it to its alternate address.
+ *
+ * Note that these two tests are not the same, as in the first test,
+ * the renamed entry is moved forward in the slist. In the second
+ * it is moved backwards.
+ *
+ * Since there is only one entry in the cache, this doesn't really
+ * matter in this case. But we will do similar tests later with
+ * other entries in the cache.
+ */
+
+ if ( pass2 ) {
+
+ test_num = 8;
+ flush_flags = H5C2__FLUSH_INVALIDATE_FLAG;
+ expected_index_len = 0;
+ expected_index_size = 0;
+ spec[0].flush_ops[1].flag = TRUE;
+ spec[0].expected_destroyed = TRUE;
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ }
+ }
+
+ if ( pass2 ) /* test #9 & #10 */
+ {
+ /* Single entry test verifying that the cache can handle the case in
+ * which the call back function both resizes and renames the entry
+ * for which it has been called.
+ *
+ * Again, we run this entry twice, as the first run moves the entry
+ * to its alternate address, and the second moves it back.
+ */
+ int test_num = 9; /* and 10 */
+ unsigned int flush_flags = H5C2__NO_FLAGS_SET;
+ int spec_size = 1;
+ int init_expected_index_len = 1;
+ size_t init_expected_index_size = VARIABLE_ENTRY_SIZE / 2;
+ int expected_index_len = 1;
+ size_t expected_index_size = VARIABLE_ENTRY_SIZE / 4;
+ struct fo_flush_cache_test_spec spec[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__SIZE_CHANGED_FLAG,
+ /* new_size = */ VARIABLE_ENTRY_SIZE / 2,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 2,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 0, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+ int check_size = 0;
+ struct fo_flush_entry_check checks[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ 0,
+ /* entry_index = */ 0,
+ /* expected_size = */ (size_t)0,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+
+ /* this change forces the rename to move the target entry back to its
+ * main address. The first test moved it to its alternate address.
+ *
+ * Note that these two tests are not the same, as in the first test,
+ * the renamed entry is moved forward in the slist. In the second
+ * it is moved backwards.
+ *
+ * Since there is only one entry in the cache, this doesn't really
+ * matter in this case. But we will do similar tests later with
+ * other entries in the cache.
+ */
+ if ( pass2 ) {
+
+ spec[0].flush_ops[1].flag = TRUE;
+ test_num = 10;
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ }
+ }
+
+ if ( pass2 ) /* test #11 & #12 */
+ {
+ /* Repeat the previous test with the flush invalidate flag on the
+ * second test.
+ *
+ * Single entry test verifying that the cache can handle the case in
+ * which the call back function both resizes and renames the entry
+ * for which it has been called.
+ *
+ * Again, we run this entry twice, as the first run moves the entry to its
+ * alternate address, and the second moves it back.
+ */
+ int test_num = 11; /* and 12 */
+ unsigned int flush_flags = H5C2__NO_FLAGS_SET;
+ int spec_size = 1;
+ int init_expected_index_len = 1;
+ size_t init_expected_index_size = VARIABLE_ENTRY_SIZE / 2;
+ int expected_index_len = 1;
+ size_t expected_index_size = VARIABLE_ENTRY_SIZE / 4;
+ struct fo_flush_cache_test_spec spec[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__SIZE_CHANGED_FLAG,
+ /* new_size = */ VARIABLE_ENTRY_SIZE / 2,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 2,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 0, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+ int check_size = 0;
+ struct fo_flush_entry_check checks[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ 0,
+ /* entry_index = */ 0,
+ /* expected_size = */ (size_t)0,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+
+ /* this change forces the rename to move the target entry back to its
+ * main address. The first test moved it to its alternate address.
+ *
+ * Note that these two tests are not the same, as in the first test,
+ * the renamed entry is moved forward in the slist. In the second
+ * it is moved backwards.
+ *
+ * Since there is only one entry in the cache, this doesn't really
+ * matter in this case. But we will do similar tests later with
+ * other entries in the cache.
+ */
+ if ( pass2 ) {
+
+ test_num = 12;
+ flush_flags = H5C2__FLUSH_INVALIDATE_FLAG;
+ expected_index_len = 0;
+ expected_index_size = 0;
+ spec[0].flush_ops[1].flag = TRUE;
+ spec[0].expected_destroyed = TRUE;
+
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ }
+ }
+
+ if ( pass2 ) /* test #13 */
+ {
+ /* Test the ability of the cache to handle the case in which
+ * the flush function of an entry that is resident in cache
+ * dirties two entries that are not in cache. No size
+ * changes.
+ *
+ * At present, I am assured that this case will never occur, but
+ * lets make sure we can handle it regardless.
+ */
+ int test_num = 13;
+ unsigned int flush_flags = H5C2__NO_FLAGS_SET;
+ int spec_size = 1;
+ int init_expected_index_len = 1;
+ size_t init_expected_index_size = 1 * PICO_ENTRY_SIZE;
+ int expected_index_len = 3;
+ size_t expected_index_size = 3 * PICO_ENTRY_SIZE;
+ struct fo_flush_cache_test_spec spec[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ 0,
+ /* entry_index = */ 1,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 2,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, 0, 0, FALSE, 0 },
+ { FLUSH_OP__DIRTY, 0, 2, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+ int check_size = 2;
+ struct fo_flush_entry_check checks[2] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* expected_size = */ PICO_ENTRY_SIZE,
+ /* in_cache = */ TRUE,
+ /* at_main_addr = */ TRUE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 2,
+ /* expected_size = */ PICO_ENTRY_SIZE,
+ /* in_cache = */ TRUE,
+ /* at_main_addr = */ TRUE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ }
+
+ if ( pass2 ) /* test #14 */
+ {
+ /* Repeat previous test with the flush invalidate flag.
+ *
+ * Test the ability of the cache to handle the case in which
+ * the flush function of an entry that is resident in cache
+ * dirties two entries that are not in cache. No size
+ * changes.
+ *
+ * At present, I am assured that this case will never occur, but
+ * lets make sure we can handle it regardless.
+ */
+ int test_num = 14;
+ unsigned int flush_flags = H5C2__FLUSH_INVALIDATE_FLAG;
+ int spec_size = 1;
+ int init_expected_index_len = 1;
+ size_t init_expected_index_size = 1 * PICO_ENTRY_SIZE;
+ int expected_index_len = 0;
+ size_t expected_index_size = (size_t)0;
+ struct fo_flush_cache_test_spec spec[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ 0,
+ /* entry_index = */ 1,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 2,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, 0, 0, FALSE, 0 },
+ { FLUSH_OP__DIRTY, 0, 2, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+ int check_size = 2;
+ struct fo_flush_entry_check checks[2] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* expected_size = */ PICO_ENTRY_SIZE,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ TRUE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 2,
+ /* expected_size = */ PICO_ENTRY_SIZE,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ TRUE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ }
+
+ if ( pass2 ) /* test #15 */
+ {
+ /* Test the ability of the cache to handle the case in which
+ * the flush function of an entry that is resident in cache
+ * resizes and dirties two entries that are not in cache.
+ *
+ * At present, I am assured that this case will never occur, but
+ * lets make sure we can handle it regardless.
+ */
+ int test_num = 15;
+ unsigned int flush_flags = H5C2__NO_FLAGS_SET;
+ int spec_size = 1;
+ int init_expected_index_len = 1;
+ size_t init_expected_index_size = 1 * VARIABLE_ENTRY_SIZE;
+ int expected_index_len = 3;
+ size_t expected_index_size = VARIABLE_ENTRY_SIZE +
+ (VARIABLE_ENTRY_SIZE / 4) +
+ (VARIABLE_ENTRY_SIZE / 2);
+ struct fo_flush_cache_test_spec spec[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 1,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 4,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 0, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 2, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 2, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+ int check_size = 2;
+ struct fo_flush_entry_check checks[2] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 4,
+ /* in_cache = */ TRUE,
+ /* at_main_addr = */ TRUE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 2,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 2,
+ /* in_cache = */ TRUE,
+ /* at_main_addr = */ TRUE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ }
+
+ if ( pass2 ) /* test #16 */
+ {
+ /* Repeat previous test with the flush invalidate flag.
+ *
+ * Test the ability of the cache to handle the case in which
+ * the flush function of an entry that is resident in cache
+ * resizes and dirties two entries that are not in cache.
+ *
+ * At present, I am assured that this case will never occur, but
+ * lets make sure we can handle it regardless.
+ */
+ int test_num = 16;
+ unsigned int flush_flags = H5C2__FLUSH_INVALIDATE_FLAG;
+ int spec_size = 1;
+ int init_expected_index_len = 1;
+ size_t init_expected_index_size = 1 * VARIABLE_ENTRY_SIZE;
+ int expected_index_len = 0;
+ size_t expected_index_size = (size_t)0;
+ struct fo_flush_cache_test_spec spec[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 1,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 4,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 0, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 2, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 2, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+ int check_size = 2;
+ struct fo_flush_entry_check checks[2] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 4,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ TRUE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 2,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 2,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ TRUE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ }
+
+ if ( pass2 ) /* test #17 & #18 */
+ {
+ /* Test the ability of the cache to handle the case in which
+ * the flush function of an entry that is resident in cache
+ * resizes, dirties, and renames two entries that are not in cache.
+ *
+ * At present, I am assured that this case will never occur, but
+ * lets make sure we can handle it regardless.
+ */
+ int test_num = 17; /* and 18 */
+ unsigned int flush_flags = H5C2__NO_FLAGS_SET;
+ int spec_size = 1;
+ int init_expected_index_len = 1;
+ size_t init_expected_index_size = 1 * VARIABLE_ENTRY_SIZE;
+ int expected_index_len = 3;
+ size_t expected_index_size = VARIABLE_ENTRY_SIZE +
+ (VARIABLE_ENTRY_SIZE / 4) +
+ (VARIABLE_ENTRY_SIZE / 2);
+ struct fo_flush_cache_test_spec spec[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 1,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 6,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 0, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 2, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 2, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 2, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+ int check_size = 2;
+ struct fo_flush_entry_check checks[2] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 4,
+ /* in_cache = */ TRUE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 2,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 2,
+ /* in_cache = */ TRUE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+
+ /* this change forces the renames to move the target entries back to
+ * their main address. The first test moved them to their alternate
+ * address.
+ *
+ * Note that these two tests are not the same, as in the first test,
+ * the renamed entries are moved forward in the slist. In the second
+ * they are moved backwards.
+ */
+ if ( pass2 ) {
+
+ test_num = 18;
+ spec[0].flush_ops[2].flag = TRUE;
+ spec[0].flush_ops[5].flag = TRUE;
+ checks[0].at_main_addr = TRUE;
+ checks[1].at_main_addr = TRUE;
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ }
+ }
+
+ if ( pass2 ) /* test #19 & #20 */
+ {
+ /* Repeat the above test with the flush invalidate flag on the
+ * second test.
+ *
+ * Test the ability of the cache to handle the case in which
+ * the flush function of an entry that is resident in cache
+ * resizes, dirties, and renames two entries that are not in cache.
+ *
+ * At present, I am assured that this case will never occur, but
+ * lets make sure we can handle it regardless.
+ */
+ int test_num = 19; /* and 20 */
+ unsigned int flush_flags = H5C2__NO_FLAGS_SET;
+ int spec_size = 1;
+ int init_expected_index_len = 1;
+ size_t init_expected_index_size = 1 * VARIABLE_ENTRY_SIZE;
+ int expected_index_len = 3;
+ size_t expected_index_size = VARIABLE_ENTRY_SIZE +
+ (VARIABLE_ENTRY_SIZE / 4) +
+ (VARIABLE_ENTRY_SIZE / 2);
+ struct fo_flush_cache_test_spec spec[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 1,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 6,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 0, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 2, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 2, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 2, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+ int check_size = 2;
+ struct fo_flush_entry_check checks[2] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 4,
+ /* in_cache = */ TRUE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 2,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 2,
+ /* in_cache = */ TRUE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+
+ /* this change forces the renames to move the target entries back to
+ * their main address. The first test moved them to their alternate
+ * address.
+ *
+ * Note that these two tests are not the same, as in the first test,
+ * the renamed entries are moved forward in the slist. In the second
+ * they are moved backwards.
+ */
+ if ( pass2 ) {
+
+ test_num = 20;
+ flush_flags = H5C2__FLUSH_INVALIDATE_FLAG;
+ expected_index_len = 0;
+ expected_index_size = (size_t)0;
+ spec[0].expected_destroyed = TRUE;
+ spec[0].flush_ops[2].flag = TRUE;
+ spec[0].flush_ops[5].flag = TRUE;
+ checks[0].at_main_addr = TRUE;
+ checks[0].in_cache = FALSE;
+ checks[0].expected_destroyed = TRUE;
+ checks[1].at_main_addr = TRUE;
+ checks[1].in_cache = FALSE;
+ checks[1].expected_destroyed = TRUE;
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ }
+ }
+
+ if ( pass2 ) /* test #21 */
+ {
+ /* Now mix things up a bit.
+ *
+ * Load several entries, two of which have flush functions that
+ * resize, dirty, and rename two entries that are not in the
+ * cache. Mark only one of these entries, and then flush the
+ * cache with the flush marked entries flag.
+ *
+ * This is the only test in which we test the
+ * H5C2__FLUSH_MARKED_ENTRIES_FLAG. The hope is that since
+ * we test the two features extensively by themselves, so
+ * it should be sufficient to verify that they play together
+ * as expected.
+ */
+ int test_num = 21;
+ unsigned int flush_flags = H5C2__FLUSH_MARKED_ENTRIES_FLAG;
+ int spec_size = 4;
+ int init_expected_index_len = 4;
+ size_t init_expected_index_size = (2 * VARIABLE_ENTRY_SIZE) + (2 * PICO_ENTRY_SIZE);
+ int expected_index_len = 6;
+ size_t expected_index_size = (2 * VARIABLE_ENTRY_SIZE) +
+ (VARIABLE_ENTRY_SIZE / 4) +
+ (VARIABLE_ENTRY_SIZE / 2) +
+ (2 * PICO_ENTRY_SIZE);
+ struct fo_flush_cache_test_spec spec[4] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 1,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG | H5C2__SET_FLUSH_MARKER_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 6,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 0, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 2, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 2, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 2, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 11,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 6,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 10, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 10, FALSE, 0 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 10, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 12, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 12, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 12, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG | H5C2__SET_FLUSH_MARKER_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 1,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+ int check_size = 4;
+ struct fo_flush_entry_check checks[4] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 4,
+ /* in_cache = */ TRUE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ TRUE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 2,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 2,
+ /* in_cache = */ TRUE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ TRUE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ TRUE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 12,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ TRUE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ reset_entries2();
+ }
+
+ if ( pass2 ) /* test #22 */
+ {
+ /* Mix things up some more.
+ *
+ * Load lots of entries, some of which have flush functions that
+ * resize, dirty, and rename two entries that are not in the
+ * cache.
+ *
+ * Also load entries that have flush ops on entries that are in
+ * cache.
+ */
+ int test_num = 22;
+ unsigned int flush_flags = H5C2__NO_FLAGS_SET;
+ int spec_size = 6;
+ int init_expected_index_len = 6;
+ size_t init_expected_index_size = (2 * VARIABLE_ENTRY_SIZE) + (4 * PICO_ENTRY_SIZE);
+ int expected_index_len = 10;
+ size_t expected_index_size = (2 * VARIABLE_ENTRY_SIZE) +
+ (2 * (VARIABLE_ENTRY_SIZE / 4)) +
+ (2 * (VARIABLE_ENTRY_SIZE / 2)) +
+ (4 * PICO_ENTRY_SIZE);
+ struct fo_flush_cache_test_spec spec[6] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 1,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 6,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 0, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 2, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 2, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 2, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 11,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 6,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 10, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 10, FALSE, 0 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 10, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 12, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 12, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 12, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 1,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 1,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, PICO_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 20,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 1,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, PICO_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+ int check_size = 4;
+ struct fo_flush_entry_check checks[4] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 4,
+ /* in_cache = */ TRUE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 2,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 2,
+ /* in_cache = */ TRUE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 4,
+ /* in_cache = */ TRUE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 12,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 2,
+ /* in_cache = */ TRUE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ reset_entries2();
+ }
+
+ if ( pass2 ) /* test #23 */
+ {
+ /* Repeat test #23 with the flush invalidate flag set.
+ *
+ * Mix things up some more.
+ *
+ * Load lots of entries, some of which have flush functions that
+ * resize, dirty, and rename two entries that are not in the
+ * cache.
+ *
+ * Also load entries that have flush ops on entries that are in
+ * cache.
+ */
+ int test_num = 23;
+ unsigned int flush_flags = H5C2__FLUSH_INVALIDATE_FLAG;
+ int spec_size = 6;
+ int init_expected_index_len = 6;
+ size_t init_expected_index_size = (2 * VARIABLE_ENTRY_SIZE) + (4 * PICO_ENTRY_SIZE);
+ int expected_index_len = 0;
+ size_t expected_index_size = 0;
+ struct fo_flush_cache_test_spec spec[6] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 1,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 6,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 0, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 2, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 2, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 2, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 11,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 6,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 10, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 10, FALSE, 0 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 10, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 12, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 12, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 12, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 1,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 1,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, PICO_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 20,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 1,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, PICO_ENTRY_TYPE, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+ int check_size = 4;
+ struct fo_flush_entry_check checks[4] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 0,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 4,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 2,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 2,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 10,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 4,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 12,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 2,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ reset_entries2();
+ }
+
+ /* So much for tests involving only flush operations.
+ *
+ * Now create some tests mixing flush ops and pins.
+ */
+ if ( pass2 ) /* test #24 */
+ {
+ /* Pico entries 50 and 150 pin pico entry 100, and also dirty
+ * pico entry 100 on flush.
+ */
+ int test_num = 24;
+ unsigned int flush_flags = H5C2__NO_FLAGS_SET;
+ int spec_size = 3;
+ int init_expected_index_len = 3;
+ size_t init_expected_index_size = 3 * PICO_ENTRY_SIZE;
+ int expected_index_len = 3;
+ size_t expected_index_size = 3 * PICO_ENTRY_SIZE;
+ struct fo_flush_cache_test_spec spec[3] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 50,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 1,
+ /* pin_type = */ {PICO_ENTRY_TYPE, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {100, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 1,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, PICO_ENTRY_TYPE, 100, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 150,
+ /* insert_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 1,
+ /* pin_type = */ {PICO_ENTRY_TYPE, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {100, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 1,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, PICO_ENTRY_TYPE, 100, FALSE, 0 },
+ { FLUSH_OP__DIRTY, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+ int check_size = 0;
+ struct fo_flush_entry_check checks[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ 0,
+ /* entry_index = */ 0,
+ /* expected_size = */ (size_t)0,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ }
+
+ if ( pass2 ) /* test #25 */
+ {
+ /* Repeat the previous test with the flush invalidate flag.
+ *
+ * Pico entries 50 and 150 pin pico entry 100, and also dirty
+ * pico entry 100 on flush.
+ */
+ int test_num = 25;
+ unsigned int flush_flags = H5C2__FLUSH_INVALIDATE_FLAG;
+ int spec_size = 3;
+ int init_expected_index_len = 3;
+ size_t init_expected_index_size = 3 * PICO_ENTRY_SIZE;
+ int expected_index_len = 0;
+ size_t expected_index_size = (size_t)0;
+ struct fo_flush_cache_test_spec spec[3] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 50,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 1,
+ /* pin_type = */ {PICO_ENTRY_TYPE, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {100, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 1,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, PICO_ENTRY_TYPE, 100, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ PICO_ENTRY_TYPE,
+ /* entry_index = */ 150,
+ /* insert_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 1,
+ /* pin_type = */ {PICO_ENTRY_TYPE, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {100, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 1,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, PICO_ENTRY_TYPE, 100, FALSE, 0 },
+ { FLUSH_OP__DIRTY, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+ int check_size = 0;
+ struct fo_flush_entry_check checks[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ 0,
+ /* entry_index = */ 0,
+ /* expected_size = */ (size_t)0,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+ }
+
+ if ( pass2 ) /* test #26 */
+ {
+ /* This one is complex.
+ *
+ * In the following overvies table, VET stands for
+ * VARIABLE_ENTRY_TYPE.
+ *
+ * In trying to follow what happens when we flush the
+ * set of entries constructed below, recall that each
+ * flush operation is executed the first time the
+ * entry is flushed, and then not executed again.
+ * This may be a weakness in the tests, but that
+ * is the way it is for now.
+ *
+ * After thinking about it for a while, I'm not sure that
+ * the interaction between pins and flush operations needs
+ * all that much testing, as the two are essentially
+ * orthoginal. Thus this is a bit of a smoke check to
+ * verify that we get the expected results.
+ *
+ * (VET, 100) initially not resident in cache
+ *
+ * (VET, 200) initially clean and resident in cache
+ *
+ * (VET, 300) initially not resident in cache
+ *
+ * (VET, 2100) initially clean and resident in cache
+ *
+ * (VET, 2200) initially not resident in cache
+ *
+ * (VET, 2300) initially clean and resident in cache
+ *
+ * (VET, 1000) initially clean, and in cache
+ * dirties (VET, 100)
+ * resizes (VET, 200)
+ * dirty (VET, 300) -- dirty first to bring into cache.
+ * renames (VET, 300)
+ *
+ * (VET, 2000) initially clean, and in cache
+ * dirties (VET, 2100)
+ * resizes (VET, 2200)
+ * renames (VET, 2300)
+ *
+ * (VET, 350) initially clean, and in cache
+ * pins (VET, 1000)
+ * dirties (VET, 1000)
+ * resizes (VET, 350)
+ * pins (VET, 2000)
+ * dirties (VET, 2000)
+ *
+ * (VET, 450) initially dirty, and in cache
+ * pins (VET, 1000)
+ * dirties (VET, 1000)
+ * renames (VET, 450)
+ * pins (VET, 2000)
+ * dirties (VET, 2000)
+ *
+ * (VET, 650) initially clean, and in cache
+ * pins (VET, 1000)
+ * dirties (VET, 1000)
+ * resizes (VET, 650)
+ * pins (VET, 2000)
+ * dirties (VET, 2000)
+ *
+ * (VET, 750) initially dirty, and in cache
+ * pins (VET, 1000)
+ * dirties (VET, 1000)
+ * resizes (VET, 750)
+ * pins (VET, 2000)
+ * dirties (VET, 2000)
+ *
+ * (VET, 500) initially dirty, and in cache
+ * dirties (VET, 350)
+ * dirties (VET, 450)
+ * dirties (VET, 650)
+ * dirties (VET, 750)
+ */
+ int test_num = 26;
+ unsigned int flush_flags = H5C2__NO_FLAGS_SET;
+ int spec_size = 10;
+ int init_expected_index_len = 10;
+ size_t init_expected_index_size = 10 * VARIABLE_ENTRY_SIZE;
+ int expected_index_len = 13;
+ size_t expected_index_size = 9 * VARIABLE_ENTRY_SIZE;
+ struct fo_flush_cache_test_spec spec[10] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 200,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 2100,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 2300,
+ /* insert_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 1000,
+ /* insert_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 4,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 100, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 200, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 300, FALSE, 0 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 300, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 2000,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 3,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 2100, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 2200, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 2300, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 350,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 2,
+ /* pin_type = */ {VARIABLE_ENTRY_TYPE, VARIABLE_ENTRY_TYPE, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {1000, 2000, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 3,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 1000, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 2000, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 350, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 6,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 450,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 2,
+ /* pin_type = */ {VARIABLE_ENTRY_TYPE, VARIABLE_ENTRY_TYPE, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {1000, 2000, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 3,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 1000, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 2000, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 450, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 7,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 650,
+ /* insert_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 2,
+ /* pin_type = */ {VARIABLE_ENTRY_TYPE, VARIABLE_ENTRY_TYPE, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {1000, 2000, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 3,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 1000, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 2000, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 650, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 8,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 750,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 2,
+ /* pin_type = */ {VARIABLE_ENTRY_TYPE, VARIABLE_ENTRY_TYPE, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {1000, 2000, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 3,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 1000, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 2000, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 750, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 9,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 500,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 4,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 350, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 450, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 650, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 750, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+ int check_size = 3;
+ struct fo_flush_entry_check checks[3] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE,
+ /* in_cache = */ TRUE,
+ /* at_main_addr = */ TRUE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 300,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE,
+ /* in_cache = */ TRUE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 2200,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 2,
+ /* in_cache = */ TRUE,
+ /* at_main_addr = */ TRUE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+
+ reset_entries2();
+ }
+
+ if ( pass2 ) /* test #27 */
+ {
+ /* Repeat test #26 with the flush invalidate flag.
+ *
+ * In the following overview table, VET stands for
+ * VARIABLE_ENTRY_TYPE.
+ *
+ * In trying to follow what happens when we flush the
+ * set of entries constructed below, recall that each
+ * flush operation is executed the first time the
+ * entry is flushed, and then not executed again.
+ * This may be a weakness in the tests, but that
+ * is the way it is for now.
+ *
+ * After thinking about it for a while, I'm not sure that
+ * the interaction between pins and flush operations needs
+ * all that much testing, as the two are essentially
+ * orthoginal. The big thing is to verify that flushes of
+ * pinned entries with flush ops result in the expected
+ * updates of the cache.
+ *
+ * Thus this is a bit of a smoke check to * verify that we
+ * get the expected results.
+ *
+ * (VET, 100) initially not resident in cache
+ *
+ * (VET, 200) initially clean and resident in cache
+ *
+ * (VET, 300) initially not resident in cache
+ *
+ * (VET, 2100) initially clean and resident in cache
+ *
+ * (VET, 2200) initially not resident in cache
+ *
+ * (VET, 2300) initially clean and resident in cache
+ *
+ * (VET, 1000) initially clean, and in cache
+ * dirties (VET, 100)
+ * resizes (VET, 200)
+ * dirty (VET, 300) -- dirty first to bring into cache.
+ * renames (VET, 300)
+ *
+ * (VET, 2000) initially clean, and in cache
+ * dirties (VET, 2100)
+ * resizes (VET, 2200)
+ * renames (VET, 2300)
+ *
+ * (VET, 350) initially clean, and in cache
+ * pins (VET, 1000)
+ * dirties (VET, 1000)
+ * resizes (VET, 350)
+ * pins (VET, 2000)
+ * dirties (VET, 2000)
+ *
+ * (VET, 450) initially dirty, and in cache
+ * pins (VET, 1000)
+ * dirties (VET, 1000)
+ * renames (VET, 450)
+ * pins (VET, 2000)
+ * dirties (VET, 2000)
+ *
+ * (VET, 650) initially clean, and in cache
+ * pins (VET, 1000)
+ * dirties (VET, 1000)
+ * resizes (VET, 650)
+ * pins (VET, 2000)
+ * dirties (VET, 2000)
+ *
+ * (VET, 750) initially dirty, and in cache
+ * pins (VET, 1000)
+ * dirties (VET, 1000)
+ * resizes (VET, 750)
+ * pins (VET, 2000)
+ * dirties (VET, 2000)
+ *
+ * (VET, 500) initially dirty, and in cache
+ * dirties (VET, 350)
+ * dirties (VET, 450)
+ * dirties (VET, 650)
+ * dirties (VET, 750)
+ */
+ int test_num = 27;
+ unsigned int flush_flags = H5C2__FLUSH_INVALIDATE_FLAG;
+ int spec_size = 10;
+ int init_expected_index_len = 10;
+ size_t init_expected_index_size = 10 * VARIABLE_ENTRY_SIZE;
+ int expected_index_len = 0;
+ size_t expected_index_size = (size_t)0;
+ struct fo_flush_cache_test_spec spec[10] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 200,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 2100,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 2300,
+ /* insert_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 1000,
+ /* insert_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 4,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 100, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 200, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 300, FALSE, 0 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 300, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 2000,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 3,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 2100, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 2200, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 2300, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 5,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 350,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 2,
+ /* pin_type = */ {VARIABLE_ENTRY_TYPE, VARIABLE_ENTRY_TYPE, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {1000, 2000, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 3,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 1000, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 2000, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 350, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 6,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 450,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 2,
+ /* pin_type = */ {VARIABLE_ENTRY_TYPE, VARIABLE_ENTRY_TYPE, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {1000, 2000, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 3,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 1000, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 2000, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 450, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 7,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 650,
+ /* insert_flag = */ TRUE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 2,
+ /* pin_type = */ {VARIABLE_ENTRY_TYPE, VARIABLE_ENTRY_TYPE, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {1000, 2000, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 3,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 1000, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 2000, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 650, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 8,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 750,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 2,
+ /* pin_type = */ {VARIABLE_ENTRY_TYPE, VARIABLE_ENTRY_TYPE, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {1000, 2000, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 3,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 1000, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 2000, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 750, FALSE, VARIABLE_ENTRY_SIZE / 4 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 9,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 500,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 4,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 350, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 450, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 650, FALSE, 0 },
+ { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 750, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+ int check_size = 3;
+ struct fo_flush_entry_check checks[3] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ TRUE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 300,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 2200,
+ /* expected_size = */ VARIABLE_ENTRY_SIZE / 2,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ TRUE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ }
+
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+
+ reset_entries2();
+ }
+
+ if ( pass2 ) /* test #28 */
+ {
+ /* Test the expected fheap case, in which an entry dirties
+ * and resizes itself, and dirties an entry which it has
+ * pinned.
+ */
+ int test_num = 28;
+ unsigned int flush_flags = H5C2__NO_FLAGS_SET;
+ int spec_size = 5;
+ int init_expected_index_len = 5;
+ size_t init_expected_index_size = 3 * VARIABLE_ENTRY_SIZE;
+ int expected_index_len = 5;
+ size_t expected_index_size = 4 * VARIABLE_ENTRY_SIZE;
+ struct fo_flush_cache_test_spec spec[5] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 200,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG | H5C2__SIZE_CHANGED_FLAG,
+ /* new_size = */ VARIABLE_ENTRY_SIZE / 2,
+ /* num_pins = */ 1,
+ /* pin_type = */ {VARIABLE_ENTRY_TYPE, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {100, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 3,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 100, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 200, FALSE, VARIABLE_ENTRY_SIZE },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 200, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 300,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG | H5C2__SIZE_CHANGED_FLAG,
+ /* new_size = */ VARIABLE_ENTRY_SIZE / 4,
+ /* num_pins = */ 1,
+ /* pin_type = */ {VARIABLE_ENTRY_TYPE, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {400, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 3,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 400, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 300, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 300, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 400,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 500,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG | H5C2__SIZE_CHANGED_FLAG,
+ /* new_size = */ VARIABLE_ENTRY_SIZE / 4,
+ /* num_pins = */ 1,
+ /* pin_type = */ {VARIABLE_ENTRY_TYPE, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {100, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 3,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 100, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 500, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 500, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+ int check_size = 0;
+ struct fo_flush_entry_check checks[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ 0,
+ /* entry_index = */ 0,
+ /* expected_size = */ 0,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+
+ reset_entries2();
+ }
+
+ if ( pass2 ) /* test #29 */
+ {
+ /* Repeat test #28 with the flush invalidate flag.
+ *
+ * Test the expected fheap case, in which an entry dirties
+ * and resizes itself, and dirties an entry which it has
+ * pinned.
+ */
+ int test_num = 29;
+ unsigned int flush_flags = H5C2__FLUSH_INVALIDATE_FLAG;
+ int spec_size = 5;
+ int init_expected_index_len = 5;
+ size_t init_expected_index_size = 3 * VARIABLE_ENTRY_SIZE;
+ int expected_index_len = 0;
+ size_t expected_index_size = 0;
+ struct fo_flush_cache_test_spec spec[5] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 100,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 1,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 200,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG | H5C2__SIZE_CHANGED_FLAG,
+ /* new_size = */ VARIABLE_ENTRY_SIZE / 2,
+ /* num_pins = */ 1,
+ /* pin_type = */ {VARIABLE_ENTRY_TYPE, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {100, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 3,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 100, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 200, FALSE, VARIABLE_ENTRY_SIZE },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 200, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 2,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 300,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG | H5C2__SIZE_CHANGED_FLAG,
+ /* new_size = */ VARIABLE_ENTRY_SIZE / 4,
+ /* num_pins = */ 1,
+ /* pin_type = */ {VARIABLE_ENTRY_TYPE, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {400, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 3,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 400, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 300, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 300, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 3,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 400,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__NO_FLAGS_SET,
+ /* new_size = */ 0,
+ /* num_pins = */ 0,
+ /* pin_type = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 0,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ },
+ {
+ /* entry_num = */ 4,
+ /* entry_type = */ VARIABLE_ENTRY_TYPE,
+ /* entry_index = */ 500,
+ /* insert_flag = */ FALSE,
+ /* flags = */ H5C2__DIRTIED_FLAG | H5C2__SIZE_CHANGED_FLAG,
+ /* new_size = */ VARIABLE_ENTRY_SIZE / 4,
+ /* num_pins = */ 1,
+ /* pin_type = */ {VARIABLE_ENTRY_TYPE, 0, 0, 0, 0, 0, 0, 0},
+ /* pin_idx = */ {100, 0, 0, 0, 0, 0, 0, 0},
+ /* num_flush_ops = */ 3,
+ /* flush_ops = */
+ /* op_code: type: idx: flag: size: */
+ { { FLUSH_OP__DIRTY, VARIABLE_ENTRY_TYPE, 100, FALSE, 0 },
+ { FLUSH_OP__RESIZE, VARIABLE_ENTRY_TYPE, 500, FALSE, VARIABLE_ENTRY_SIZE / 2 },
+ { FLUSH_OP__RENAME, VARIABLE_ENTRY_TYPE, 500, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 },
+ { FLUSH_OP__NO_OP, 0, 0, FALSE, 0 } },
+ /* expected_loaded = */ TRUE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ TRUE,
+ /* expected_destroyed = */ TRUE
+ }
+ };
+ int check_size = 0;
+ struct fo_flush_entry_check checks[1] =
+ {
+ {
+ /* entry_num = */ 0,
+ /* entry_type = */ 0,
+ /* entry_index = */ 0,
+ /* expected_size = */ 0,
+ /* in_cache = */ FALSE,
+ /* at_main_addr = */ FALSE,
+ /* is_dirty = */ FALSE,
+ /* is_protected = */ FALSE,
+ /* is_pinned = */ FALSE,
+ /* expected_loaded = */ FALSE,
+ /* expected_cleared = */ FALSE,
+ /* expected_flushed = */ FALSE,
+ /* expected_destroyed = */ FALSE
+ }
+ };
+
+ check_flush_cache__flush_op_test(cache_ptr,
+ test_num,
+ flush_flags,
+ spec_size,
+ spec,
+ init_expected_index_len,
+ init_expected_index_size,
+ expected_index_len,
+ expected_index_size,
+ check_size,
+ checks);
+
+ reset_entries2();
+ }
+
+ /* finally finish up with the flush ops eviction test */
+ check_flush_cache__flush_op_eviction_test(cache_ptr);
+
+ return;
+
+} /* check_flush_cache__flush_ops() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_flush_cache__flush_op_test()
+ *
+ * Purpose: Run a flush op flush cache test. Of the nature of
+ * flush operations, this is a multi-entry test.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/3/06
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_flush_cache__flush_op_test(H5C2_t * cache_ptr,
+ int test_num,
+ unsigned int flush_flags,
+ int spec_size,
+ struct fo_flush_cache_test_spec spec[],
+ int init_expected_index_len,
+ size_t init_expected_index_size,
+ int expected_index_len,
+ size_t expected_index_size,
+ int check_size,
+ struct fo_flush_entry_check check[])
+{
+ const char * fcn_name = "check_flush_cache__flush_op_test";
+ static char msg[128];
+ herr_t result;
+ int i;
+ int j;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+#if 0 /* This is useful debugging code -- lets keep it around. */
+ HDfprintf(stdout, "check_flush_cache__flush_op_test: test %d\n",
+ test_num);
+#endif
+
+ if ( cache_ptr == NULL ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "cache_ptr NULL on entry to flush op test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ else if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ) {
+#if 0 /* JRM */
+ HDfprintf(stdout, "%s:(1) index_len = %ld, index_size = %ld.\n",
+ fcn_name, (long)(cache_ptr->index_len),
+ (long)(cache_ptr->index_size));
+#endif /* JRM */
+ pass2 = FALSE;
+
+ HDsnprintf(msg, (size_t)128,
+ "cache not empty at beginning of flush op test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ else if ( ( spec_size < 1 ) || ( spec == NULL ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "missing/bad test spec on entry to flush op test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+
+ i = 0;
+ while ( ( pass2 ) && ( i < spec_size ) )
+ {
+ if ( ( spec[i].entry_num != i ) ||
+ ( spec[i].entry_type < 0 ) ||
+ ( spec[i].entry_type >= NUMBER_OF_ENTRY_TYPES ) ||
+ ( spec[i].entry_index < 0 ) ||
+ ( spec[i].entry_index > max_indices2[spec[i].entry_type] ) ||
+ ( spec[i].num_pins < 0 ) ||
+ ( spec[i].num_pins > MAX_PINS ) ||
+ ( spec[i].num_flush_ops < 0 ) ||
+ ( spec[i].num_flush_ops > MAX_FLUSH_OPS ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "bad data in spec[%d] on entry to flush op test #%d.",
+ i, test_num);
+ failure_mssg2 = msg;
+ }
+ i++;
+ }
+
+ i = 0;
+ while ( ( pass2 ) && ( i < check_size ) )
+ {
+ if ( ( check[i].entry_num != i ) ||
+ ( check[i].entry_type < 0 ) ||
+ ( check[i].entry_type >= NUMBER_OF_ENTRY_TYPES ) ||
+ ( check[i].entry_index < 0 ) ||
+ ( check[i].entry_index > max_indices2[check[i].entry_type] ) ||
+ ( check[i].expected_size <= (size_t)0 ) ||
+ ( ( check[i].in_cache != TRUE ) &&
+ ( check[i].in_cache != FALSE ) ) ||
+ ( ( check[i].at_main_addr != TRUE ) &&
+ ( check[i].at_main_addr != FALSE ) ) ||
+ ( ( check[i].is_dirty != TRUE ) &&
+ ( check[i].is_dirty != FALSE ) ) ||
+ ( ( check[i].is_protected != TRUE ) &&
+ ( check[i].is_protected != FALSE ) ) ||
+ ( ( check[i].is_pinned != TRUE ) &&
+ ( check[i].is_pinned != FALSE ) ) ||
+ ( ( check[i].expected_deserialized != TRUE ) &&
+ ( check[i].expected_deserialized != FALSE ) ) ||
+ ( ( check[i].expected_cleared != TRUE ) &&
+ ( check[i].expected_cleared != FALSE ) ) ||
+ ( ( check[i].expected_serialized != TRUE ) &&
+ ( check[i].expected_serialized != FALSE ) ) ||
+ ( ( check[i].expected_destroyed != TRUE ) &&
+ ( check[i].expected_destroyed != FALSE ) ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "bad data in check[%d] on entry to flush op test #%d.",
+ i, test_num);
+ failure_mssg2 = msg;
+ }
+ i++;
+ }
+
+ i = 0;
+ while ( ( pass2 ) && ( i < spec_size ) )
+ {
+ if ( spec[i].insert_flag ) {
+
+ insert_entry2(cache_ptr, spec[i].entry_type, spec[i].entry_index,
+ TRUE, spec[i].flags);
+
+ } else {
+
+ protect_entry2(cache_ptr, spec[i].entry_type, spec[i].entry_index);
+
+ unprotect_entry_with_size_change2(cache_ptr, spec[i].entry_type,
+ spec[i].entry_index,
+ spec[i].flags, spec[i].new_size);
+ }
+
+ for ( j = 0; j < spec[i].num_pins; j++ )
+ {
+ create_pinned_entry_dependency2(cache_ptr,
+ spec[i].entry_type,
+ spec[i].entry_index,
+ spec[i].pin_type[j],
+ spec[i].pin_idx[j]);
+ }
+
+ for ( j = 0; j < spec[i].num_flush_ops; j++ )
+ {
+ add_flush_op2(spec[i].entry_type,
+ spec[i].entry_index,
+ spec[i].flush_ops[j].op_code,
+ spec[i].flush_ops[j].type,
+ spec[i].flush_ops[j].idx,
+ spec[i].flush_ops[j].flag,
+ spec[i].flush_ops[j].size);
+ }
+
+ i++;
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != init_expected_index_len ) ||
+ ( cache_ptr->index_size != init_expected_index_size ) ) {
+
+ pass2 = FALSE;
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Unexpected cache len/size before flush in flush op test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ flush_flags);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "flush with flags 0x%x failed in flush op test #%d.",
+ flush_flags, test_num);
+ failure_mssg2 = msg;
+ }
+ }
+
+
+ i = 0;
+ while ( ( pass2 ) && ( i < spec_size ) )
+ {
+ base_addr = entries2[spec[i].entry_type];
+ entry_ptr = &(base_addr[spec[i].entry_index]);
+
+#ifndef NDEBUG
+ /* The clear_dirty_bits() callback is only called in debug mode --
+ * thus we can only do our full test on the expected entry history
+ * when debug is enabled.
+ */
+ if ( ( entry_ptr->deserialized != spec[i].expected_deserialized ) ||
+ ( entry_ptr->cleared != spec[i].expected_cleared ) ||
+ ( entry_ptr->serialized != spec[i].expected_serialized ) ||
+ ( entry_ptr->destroyed != spec[i].expected_destroyed ) ) {
+#else
+ /* When in procduction mode, the clear_dirty_bits() callback is
+ * not called, so entry_ptr->cleared should never be set.
+ */
+ if ( ( entry_ptr->deserialized != spec[i].expected_deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized != spec[i].expected_serialized ) ||
+ ( entry_ptr->destroyed != spec[i].expected_destroyed ) ) {
+#endif /* NDEBUG */
+
+#if 0 /* This is useful debugging code. Lets keep it around. */
+
+ HDfprintf(stdout,
+ "desrlzd = %d(%d), clrd = %d(%d), srlzd = %d(%d), dest = %d(%d)\n",
+ (int)(entry_ptr->deserialized),
+ (int)(spec[i].expected_deserialized),
+ (int)(entry_ptr->cleared),
+ (int)(spec[i].expected_cleared),
+ (int)(entry_ptr->serialized),
+ (int)(spec[i].expected_serialized),
+ (int)(entry_ptr->destroyed),
+ (int)(spec[i].expected_destroyed));
+
+ HDfprintf(stdout, "entry_ptr->header.is_dirty = %d\n",
+ (int)(entry_ptr->header.is_dirty));
+#endif
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Bad status on entry %d after flush op test #%d.",
+ i, test_num);
+ failure_mssg2 = msg;
+ }
+ i++;
+ }
+
+ if ( pass2 ) {
+
+ i = 0;
+ while ( ( pass2 ) && ( i < check_size ) )
+ {
+ if ( check[i].in_cache != entry_in_cache2(cache_ptr,
+ check[i].entry_type,
+ check[i].entry_index) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Check1 failed on entry %d after flush op test #%d.",
+ i, test_num);
+ failure_mssg2 = msg;
+ }
+
+ base_addr = entries2[check[i].entry_type];
+ entry_ptr = &(base_addr[check[i].entry_index]);
+
+#ifndef NDEBUG
+ /* The clear_dirty_bits() callback is only called in debug mode --
+ * thus we can only do our full test on the expected entry status
+ * and histry when debug is enabled.
+ */
+ if ( ( entry_ptr->size != check[i].expected_size ) ||
+ ( ( ! entry_ptr->header.destroy_in_progress ) &&
+ ( check[i].in_cache ) &&
+ ( entry_ptr->header.size != check[i].expected_size ) ) ||
+ ( entry_ptr->at_main_addr != check[i].at_main_addr ) ||
+ ( entry_ptr->is_dirty != check[i].is_dirty ) ||
+ ( entry_ptr->header.is_dirty != check[i].is_dirty ) ||
+ ( entry_ptr->is_protected != check[i].is_protected ) ||
+ ( entry_ptr->header.is_protected != check[i].is_protected ) ||
+ ( entry_ptr->is_pinned != check[i].is_pinned ) ||
+ ( entry_ptr->header.is_pinned != check[i].is_pinned ) ||
+ ( entry_ptr->deserialized !=
+ check[i].expected_deserialized ) ||
+ ( entry_ptr->cleared != check[i].expected_cleared ) ||
+ ( entry_ptr->serialized != check[i].expected_serialized ) ||
+ ( entry_ptr->destroyed != check[i].expected_destroyed ) ) {
+#else
+ /* When in procduction mode, the clear_dirty_bits() callback is
+ * not called, so entry_ptr->cleared should never be set.
+ */
+ if ( ( entry_ptr->size != check[i].expected_size ) ||
+ ( ( ! entry_ptr->header.destroy_in_progress ) &&
+ ( check[i].in_cache ) &&
+ ( entry_ptr->header.size != check[i].expected_size ) ) ||
+ ( entry_ptr->at_main_addr != check[i].at_main_addr ) ||
+ ( entry_ptr->is_dirty != check[i].is_dirty ) ||
+ ( entry_ptr->header.is_dirty != check[i].is_dirty ) ||
+ ( entry_ptr->is_protected != check[i].is_protected ) ||
+ ( entry_ptr->header.is_protected != check[i].is_protected ) ||
+ ( entry_ptr->is_pinned != check[i].is_pinned ) ||
+ ( entry_ptr->header.is_pinned != check[i].is_pinned ) ||
+ ( entry_ptr->deserialized !=
+ check[i].expected_deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized != check[i].expected_serialized ) ||
+ ( entry_ptr->destroyed != check[i].expected_destroyed ) ) {
+#endif /* NDEBUG */
+
+
+#if 0 /* This is useful debugging code. Lets keep it around for a while. */
+
+ if ( entry_ptr->size != check[i].expected_size ) {
+ HDfprintf(stdout, "entry_ptr->size (expected) = %d (%d).\n",
+ (int)(entry_ptr->size),
+ (int)(check[i].expected_size));
+ }
+ if ( ( ! entry_ptr->header.destroy_in_progress ) &&
+ ( check[i].in_cache ) &&
+ ( entry_ptr->header.size != check[i].expected_size ) ) {
+ HDfprintf(stdout,
+ "(!destroy in progress and in cache and size (expected) = %d (%d).\n",
+ (int)(entry_ptr->header.size),
+ (int)(check[i].expected_size));
+ }
+ if ( entry_ptr->at_main_addr != check[i].at_main_addr ) {
+ HDfprintf(stdout, "(%d,%d) at main addr (expected) = %d (%d).\n",
+ (int)(check[i].entry_type),
+ (int)(check[i].entry_index),
+ (int)(entry_ptr->at_main_addr),
+ (int)(check[i].at_main_addr));
+ }
+ if ( entry_ptr->is_dirty != check[i].is_dirty ) {
+ HDfprintf(stdout, "entry_ptr->is_dirty (expected) = %d (%d).\n",
+ (int)(entry_ptr->is_dirty),
+ (int)(check[i].is_dirty));
+ }
+ if ( entry_ptr->header.is_dirty != check[i].is_dirty ) {
+ HDfprintf(stdout, "entry_ptr->header.is_dirty (expected) = %d (%d).\n",
+ (int)(entry_ptr->header.is_dirty),
+ (int)(check[i].is_dirty));
+ }
+ if ( entry_ptr->is_protected != check[i].is_protected ) {
+ HDfprintf(stdout, "entry_ptr->is_protected (expected) = %d (%d).\n",
+ (int)(entry_ptr->is_protected),
+ (int)(check[i].is_protected));
+ }
+ if ( entry_ptr->header.is_protected != check[i].is_protected ) {
+ HDfprintf(stdout, "entry_ptr->header.is_protected (expected) = %d (%d).\n",
+ (int)(entry_ptr->is_protected),
+ (int)(check[i].is_protected));
+ }
+ if ( entry_ptr->is_pinned != check[i].is_pinned ) {
+ HDfprintf(stdout, "entry_ptr->is_pinned (expected) = %d (%d).\n",
+ (int)(entry_ptr->is_pinned),
+ (int)(check[i].is_pinned));
+ }
+ if ( entry_ptr->header.is_pinned != check[i].is_pinned ) {
+ HDfprintf(stdout, "entry_ptr->header.is_pinned (expected) = %d (%d).\n",
+ (int)(entry_ptr->header.is_pinned),
+ (int)(check[i].is_pinned));
+ }
+ if ( entry_ptr->deserialized !=
+ check[i].expected_deserialized ) {
+ HDfprintf(stdout,
+ "entry_ptr->deserialized (expected) = %d (%d).\n",
+ (int)(entry_ptr->deserialized),
+ (int)(check[i].expected_deserialized));
+ }
+ if ( entry_ptr->cleared != check[i].expected_cleared ) {
+ HDfprintf(stdout, "entry_ptr->cleared (expected) = %d (%d).\n",
+ (int)(entry_ptr->cleared),
+ (int)(check[i].expected_cleared));
+ }
+ if ( entry_ptr->serialized != check[i].expected_serialized ) {
+ HDfprintf(stdout,
+ "entry_ptr->serialized (expected) = %d (%d).\n",
+ (int)(entry_ptr->serialized),
+ (int)(check[i].expected_serialized));
+ }
+ if ( entry_ptr->destroyed != check[i].expected_destroyed ) {
+ HDfprintf(stdout, "entry_ptr->destroyed (expected) = %d (%d).\n",
+ (int)(entry_ptr->destroyed),
+ (int)(check[i].expected_destroyed));
+ }
+#endif
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Check2 failed on entry %d after flush op test #%d.",
+ i, test_num);
+ failure_mssg2 = msg;
+ }
+ i++;
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( ( (flush_flags & H5C2__FLUSH_INVALIDATE_FLAG) == 0 )
+ &&
+ ( ( cache_ptr->index_len != expected_index_len )
+ ||
+ ( cache_ptr->index_size != expected_index_size )
+ )
+ )
+ ||
+ ( ( (flush_flags & H5C2__FLUSH_INVALIDATE_FLAG) != 0 )
+ &&
+ ( ( cache_ptr->index_len != 0 )
+ ||
+ ( cache_ptr->index_size != 0 )
+ )
+ )
+ ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Unexpected cache len/size after flush in flush op test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ }
+
+ /* clean up the cache to prep for the next test */
+ if ( pass2 ) {
+
+ result = H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ H5C2__FLUSH_INVALIDATE_FLAG);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Flush failed on cleanup in flush op test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ else if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Unexpected cache len/size after cleanup in flush op test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ i = 0;
+ while ( ( pass2 ) && ( i < spec_size ) )
+ {
+ base_addr = entries2[spec[i].entry_type];
+ entry_ptr = &(base_addr[spec[i].entry_index]);
+
+ entry_ptr->size = entry_sizes2[spec[i].entry_type];
+
+ entry_ptr->deserialized = FALSE;
+ entry_ptr->cleared = FALSE;
+ entry_ptr->serialized = FALSE;
+ entry_ptr->destroyed = FALSE;
+
+ i++;
+ }
+
+ i = 0;
+ while ( ( pass2 ) && ( i < check_size ) )
+ {
+ base_addr = entries2[check[i].entry_type];
+ entry_ptr = &(base_addr[check[i].entry_index]);
+
+ entry_ptr->size = entry_sizes2[check[i].entry_type];
+
+ entry_ptr->deserialized = FALSE;
+ entry_ptr->cleared = FALSE;
+ entry_ptr->serialized = FALSE;
+ entry_ptr->destroyed = FALSE;
+
+ i++;
+ }
+
+ return;
+
+} /* check_flush_cache__flush_op_test() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_flush_cache__flush_op_eviction_test()
+ *
+ * Purpose: Verify that flush operations work as expected when an
+ * entry is evicted.
+ *
+ * Do nothing if pass2 is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 10/3/06
+ *
+ * Modifications:
+ *
+ * JRM -- 10/9/07
+ * Modified test to accomodate changes in the cache API.
+ * In particular, since the cache is now reading and writing
+ * its own entries, the serialize callback is not called on
+ * entry eviction unless the entry is dirty.
+ *
+ * This fact broke some basic assumptions of the test, and
+ * necessitated a substantial re-write.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_flush_cache__flush_op_eviction_test(H5C2_t * cache_ptr)
+{
+ const char * fcn_name = "check_flush_cache__flush_op_eviction_test";
+ int i;
+ int num_variable_entries = 10;
+ int num_monster_entries = 31;
+ int num_large_entries = 0;
+ herr_t result;
+ test_entry_t * entry_ptr;
+ test_entry_t * base_addr;
+ struct expected_entry_status expected[10 + 31 + 14] =
+ {
+ /* the expected array is used to maintain a table of the expected status of every
+ * entry used in this test. Note that since the function that processes this
+ * array only processes as much of it as it is told to, we don't have to
+ * worry about maintaining the status of entries that we haven't used yet.
+ */
+ /* entry entry in at main */
+ /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: clrd: srlzd: dest: */
+ { VARIABLE_ENTRY_TYPE, 0, VARIABLE_ENTRY_SIZE/4, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE },
+ { VARIABLE_ENTRY_TYPE, 1, VARIABLE_ENTRY_SIZE/4, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { VARIABLE_ENTRY_TYPE, 2, VARIABLE_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { VARIABLE_ENTRY_TYPE, 3, VARIABLE_ENTRY_SIZE/4, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { VARIABLE_ENTRY_TYPE, 4, VARIABLE_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { VARIABLE_ENTRY_TYPE, 5, VARIABLE_ENTRY_SIZE/4, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { VARIABLE_ENTRY_TYPE, 6, VARIABLE_ENTRY_SIZE/2, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { VARIABLE_ENTRY_TYPE, 7, VARIABLE_ENTRY_SIZE/2, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { VARIABLE_ENTRY_TYPE, 8, VARIABLE_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { VARIABLE_ENTRY_TYPE, 9, VARIABLE_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 0, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 1, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 2, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 3, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 4, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 5, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 6, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 7, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 8, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 9, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 10, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 11, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 12, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 13, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 14, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 15, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 16, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 17, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 18, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 19, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 20, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 21, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 22, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 23, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 24, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 25, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 26, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 27, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 28, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 29, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { MONSTER_ENTRY_TYPE, 30, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { LARGE_ENTRY_TYPE, 0, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { LARGE_ENTRY_TYPE, 1, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { LARGE_ENTRY_TYPE, 2, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { LARGE_ENTRY_TYPE, 3, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { LARGE_ENTRY_TYPE, 4, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { LARGE_ENTRY_TYPE, 5, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { LARGE_ENTRY_TYPE, 6, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { LARGE_ENTRY_TYPE, 7, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { LARGE_ENTRY_TYPE, 8, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { LARGE_ENTRY_TYPE, 9, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { LARGE_ENTRY_TYPE, 10, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { LARGE_ENTRY_TYPE, 11, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { LARGE_ENTRY_TYPE, 12, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE },
+ { LARGE_ENTRY_TYPE, 13, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE }
+ };
+
+ if ( cache_ptr == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "cache_ptr NULL on entry to flush ops test.";
+ }
+ else if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "cache not empty at start of flush ops eviction test.";
+ }
+ else if ( ( cache_ptr->max_cache_size != (2 * 1024 * 1024 ) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024 ) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "unexpected cache config at start of flush op eviction test.";
+
+ } else {
+
+ /* set min clean size to zero for this test as it simplifies
+ * computing the expected cache size after each operation.
+ */
+
+ cache_ptr->min_clean_size = 0;
+ }
+
+ if ( pass2 ) {
+
+ /* the basic idea in this test is to insert a bunch of entries
+ * with flush operations associated with them, and then load
+ * other entries into the cache until the cache is full. At
+ * that point, load yet more entries into the cache, and see
+ * if the flush operations are performed as expected.
+ *
+ * To make things a bit more interesting, we also include a
+ * couple of pins.
+ */
+
+ /* reset the stats before we start. If stats are enabled, we will
+ * check to see if they are as expected at the end.
+ */
+ H5C2_stats__reset(cache_ptr);
+
+
+ /* load a few entries with pin relationships and flush ops.
+ * Start by just loading the entries.
+ */
+
+ protect_entry2(cache_ptr, VARIABLE_ENTRY_TYPE, 0);
+ unprotect_entry_with_size_change2(cache_ptr, VARIABLE_ENTRY_TYPE, 0,
+ H5C2__DIRTIED_FLAG | H5C2__SIZE_CHANGED_FLAG,
+ (VARIABLE_ENTRY_SIZE / 4));
+
+ protect_entry2(cache_ptr, VARIABLE_ENTRY_TYPE, 1);
+ unprotect_entry_with_size_change2(cache_ptr, VARIABLE_ENTRY_TYPE, 1,
+ H5C2__DIRTIED_FLAG | H5C2__SIZE_CHANGED_FLAG,
+ (VARIABLE_ENTRY_SIZE / 4));
+
+ protect_entry2(cache_ptr, VARIABLE_ENTRY_TYPE, 2);
+ unprotect_entry_with_size_change2(cache_ptr, VARIABLE_ENTRY_TYPE, 2,
+ H5C2__NO_FLAGS_SET, (size_t)0);
+
+ protect_entry2(cache_ptr, VARIABLE_ENTRY_TYPE, 3);
+ unprotect_entry_with_size_change2(cache_ptr, VARIABLE_ENTRY_TYPE, 3,
+ H5C2__DIRTIED_FLAG | H5C2__SIZE_CHANGED_FLAG,
+ (VARIABLE_ENTRY_SIZE / 4));
+
+ protect_entry2(cache_ptr, VARIABLE_ENTRY_TYPE, 4);
+ unprotect_entry_with_size_change2(cache_ptr, VARIABLE_ENTRY_TYPE, 4,
+ H5C2__NO_FLAGS_SET, (size_t)0);
+
+ protect_entry2(cache_ptr, VARIABLE_ENTRY_TYPE, 5);
+ unprotect_entry_with_size_change2(cache_ptr, VARIABLE_ENTRY_TYPE, 5,
+ H5C2__DIRTIED_FLAG | H5C2__SIZE_CHANGED_FLAG,
+ (VARIABLE_ENTRY_SIZE / 4));
+
+ protect_entry2(cache_ptr, VARIABLE_ENTRY_TYPE, 6);
+ unprotect_entry_with_size_change2(cache_ptr, VARIABLE_ENTRY_TYPE, 6,
+ H5C2__DIRTIED_FLAG | H5C2__SIZE_CHANGED_FLAG,
+ (VARIABLE_ENTRY_SIZE / 2));
+
+ protect_entry2(cache_ptr, VARIABLE_ENTRY_TYPE, 7);
+ unprotect_entry_with_size_change2(cache_ptr, VARIABLE_ENTRY_TYPE, 7,
+ H5C2__DIRTIED_FLAG | H5C2__SIZE_CHANGED_FLAG,
+ (VARIABLE_ENTRY_SIZE / 2));
+
+ protect_entry2(cache_ptr, VARIABLE_ENTRY_TYPE, 8);
+ unprotect_entry_with_size_change2(cache_ptr, VARIABLE_ENTRY_TYPE, 8,
+ H5C2__NO_FLAGS_SET, (size_t)0);
+
+ protect_entry2(cache_ptr, VARIABLE_ENTRY_TYPE, 9);
+ unprotect_entry_with_size_change2(cache_ptr, VARIABLE_ENTRY_TYPE, 9,
+ H5C2__NO_FLAGS_SET, (size_t)0);
+
+ if ( ( cache_ptr->index_len != 10 ) ||
+ ( cache_ptr->index_size != (4 * (VARIABLE_ENTRY_SIZE / 4)) +
+ (2 * (VARIABLE_ENTRY_SIZE / 2)) +
+ (4 * VARIABLE_ENTRY_SIZE) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "unexpected size/len in flush op eviction test 1.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ /* Now set up the pinning relationships:
+ *
+ * Briefly, (VET, 0) is pinned by (VET, 1), (VET, 3), and (VET, 5)
+ * (VET, 9) is pinned by (VET, 5), and (VET, 7)
+ */
+ create_pinned_entry_dependency2(cache_ptr, VARIABLE_ENTRY_TYPE, 1,
+ VARIABLE_ENTRY_TYPE, 0);
+ create_pinned_entry_dependency2(cache_ptr, VARIABLE_ENTRY_TYPE, 3,
+ VARIABLE_ENTRY_TYPE, 0);
+ create_pinned_entry_dependency2(cache_ptr, VARIABLE_ENTRY_TYPE, 5,
+ VARIABLE_ENTRY_TYPE, 0);
+ create_pinned_entry_dependency2(cache_ptr, VARIABLE_ENTRY_TYPE, 5,
+ VARIABLE_ENTRY_TYPE, 9);
+ create_pinned_entry_dependency2(cache_ptr, VARIABLE_ENTRY_TYPE, 7,
+ VARIABLE_ENTRY_TYPE, 9);
+
+ /* Next, set up the flush operations:
+ *
+ * Briefly, (VET, 1) dirties (VET, 0)
+ * resizes (VET, 0) to 3/4 VARIABLE_ENTRY_SIZE
+ *
+ * (VET, 3) dirties (VET, 0)
+ * resizes (VET, 0) to VARIABLE_ENTRY_SIZE
+ * renames (VET, 0) to its alternate address
+ *
+ * (VET, 5) dirties (VET, 0)
+ * resizes itself to VARIABLE_ENTRY_SIZE / 2
+ *
+ * (VET, 7) dirties (VET, 9)
+ *
+ * (VET, 9) dirties (VET, 8)
+ */
+
+ add_flush_op2(VARIABLE_ENTRY_TYPE, 1, FLUSH_OP__DIRTY,
+ VARIABLE_ENTRY_TYPE, 0, FALSE, (size_t)0);
+ add_flush_op2(VARIABLE_ENTRY_TYPE, 1, FLUSH_OP__RESIZE,
+ VARIABLE_ENTRY_TYPE, 0, FALSE,
+ 3 * VARIABLE_ENTRY_SIZE / 4);
+
+ add_flush_op2(VARIABLE_ENTRY_TYPE, 3, FLUSH_OP__DIRTY,
+ VARIABLE_ENTRY_TYPE, 0, FALSE, (size_t)0);
+ add_flush_op2(VARIABLE_ENTRY_TYPE, 3, FLUSH_OP__RESIZE,
+ VARIABLE_ENTRY_TYPE, 0, FALSE, VARIABLE_ENTRY_SIZE);
+ add_flush_op2(VARIABLE_ENTRY_TYPE, 3, FLUSH_OP__RENAME,
+ VARIABLE_ENTRY_TYPE, 0, FALSE, (size_t)0);
+
+ add_flush_op2(VARIABLE_ENTRY_TYPE, 5, FLUSH_OP__DIRTY,
+ VARIABLE_ENTRY_TYPE, 0, FALSE, (size_t)0);
+ add_flush_op2(VARIABLE_ENTRY_TYPE, 5, FLUSH_OP__RESIZE,
+ VARIABLE_ENTRY_TYPE, 5, FALSE, VARIABLE_ENTRY_SIZE / 2);
+
+ add_flush_op2(VARIABLE_ENTRY_TYPE, 7, FLUSH_OP__DIRTY,
+ VARIABLE_ENTRY_TYPE, 9, FALSE, (size_t)0);
+
+ add_flush_op2(VARIABLE_ENTRY_TYPE, 9, FLUSH_OP__DIRTY,
+ VARIABLE_ENTRY_TYPE, 8, FALSE, (size_t)0);
+ }
+
+ if ( pass2 ) {
+
+ /* to summarize, at present the following variable size entries
+ * are in cache with the following characteristics:
+ *
+ * in
+ * entry: cache? size: dirty? pinned? pins: flush operations:
+ *
+ * (VET, 0) Y 2.5 KB Y Y - -
+ *
+ * (VET, 1) Y 2.5 KB Y N 0 dirty (VET, 0),
+ * resize (VET, 0) to 7.5 KB
+ *
+ * (VET, 2) Y 10 KB N N - -
+ *
+ *
+ * (VET, 3) Y 2.5 KB N N 0 dirty (VET, 0)
+ * resize (VET, 0) to 10 KB
+ * rename (VET, 0) to its alternate address
+ *
+ * (VET, 4) Y 10 KB N N - -
+ *
+ *
+ * (VET, 5) Y 2.5 KB Y N 0, 9 dirty (VET, 0)
+ * resize (VET, 5) to 5 KB
+ *
+ * (VET, 6) Y 5 KB Y N - -
+ *
+ * (VET, 7) Y 5 KB Y N 9 dirty (VET, 9)
+ *
+ * (VET, 8) Y 10 KB N N - -
+ *
+ * (VET, 9) Y 10 KB N N - dirty (VET, 8)
+ *
+ * Recall that in this test bed, flush operations are excuted the
+ * first time the associated entry is flushed, and are then
+ * deleted.
+ */
+
+ /* Now fill up the cache with other, unrelated entries */
+ for ( i = 0; i < 31; i++ )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+ unprotect_entry_with_size_change2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ H5C2__DIRTIED_FLAG, (size_t)0);
+ }
+
+ for ( i = 0; i < 1; i++ )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+ unprotect_entry_with_size_change2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ H5C2__DIRTIED_FLAG, (size_t)0);
+ }
+
+ /* The cache should now be exactly full */
+ if ( ( cache_ptr->index_len != 42 ) ||
+ ( cache_ptr->index_size != 2 * 1024 * 1024 ) ||
+ ( cache_ptr->index_size != ((4 * VARIABLE_ENTRY_SIZE / 4) +
+ (2 * VARIABLE_ENTRY_SIZE / 2) +
+ (4 * VARIABLE_ENTRY_SIZE) +
+ (31 * MONSTER_ENTRY_SIZE) +
+ (1 * LARGE_ENTRY_SIZE)) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "unexpected size/len in flush op eviction test 2.";
+
+ } else {
+
+ /* verify the expected status of all entries we have loaded to date: */
+ num_large_entries = 1;
+ verify_entry_status2(cache_ptr,
+ 0,
+ (num_variable_entries + num_monster_entries + num_large_entries),
+ expected);
+ }
+ }
+
+
+ if ( pass2 ) {
+
+ /* Now load a large entry. This should result in the eviction
+ * of (VET,2), and the increase in the size of (VET, 0) from .25
+ * VARIABLE_ENTRY_SIZE to .75 VARIABLE_ENTRY_SIZE.
+ *
+ * The following table illustrates the intended state of affairs
+ * after the eviction:
+ *
+ * in
+ * entry: cache? size: dirty? pinned? pins: flush operations:
+ *
+ * (VET, 0) Y 7.5 KB Y Y - -
+ *
+ * (VET, 1) Y 2.5 KB N N - -
+ *
+ * (VET, 2) N 10 KB N N - -
+ *
+ * (VET, 3) Y 2.5 KB Y N 0 dirty (VET, 0)
+ * resize (VET, 0) to 10 KB
+ * rename (VET, 0) to its alternate address
+ *
+ * (VET, 4) Y 10 KB N N - -
+ *
+ * (VET, 5) Y 2.5 KB Y N 0, 9 dirty (VET, 0)
+ * resize (VET, 5) to 5 KB
+ *
+ * (VET, 6) Y 5 KB Y N - -
+ *
+ * (VET, 7) Y 5 KB Y N 9 dirty (VET, 9)
+ *
+ * (VET, 8) Y 10 KB N N - -
+ *
+ * (VET, 9) Y 10 KB N Y - dirty (VET, 8)
+ *
+ * Start by updating the expected table for the expected changes in entry status:
+ */
+ expected[0].size = 3 * VARIABLE_ENTRY_SIZE / 4;
+ expected[1].is_dirty = FALSE;
+ expected[1].serialized = TRUE;
+ expected[2].in_cache = FALSE;
+ expected[2].destroyed = TRUE;
+
+ num_large_entries = 2;
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 1);
+ unprotect_entry_with_size_change2(cache_ptr, LARGE_ENTRY_TYPE, 1,
+ H5C2__DIRTIED_FLAG, (size_t)0);
+
+ if ( ( cache_ptr->index_len != 42 ) ||
+ ( cache_ptr->index_size != (2 * 1024 * 1024) -
+ (VARIABLE_ENTRY_SIZE) +
+ (VARIABLE_ENTRY_SIZE / 2) +
+ (LARGE_ENTRY_SIZE) ) ||
+ ( cache_ptr->index_size != ((1 * (3 * VARIABLE_ENTRY_SIZE / 4)) +
+ (3 * VARIABLE_ENTRY_SIZE / 4) +
+ (2 * VARIABLE_ENTRY_SIZE / 2) +
+ (3 * VARIABLE_ENTRY_SIZE) +
+ (31 * MONSTER_ENTRY_SIZE) +
+ (2 * LARGE_ENTRY_SIZE)) ) ) {
+ pass2 = FALSE;
+ failure_mssg2 = "unexpected size/len in flush op eviction test 3.";
+ }
+
+ /* verify entry status */
+ verify_entry_status2(cache_ptr,
+ 1,
+ (num_variable_entries + num_monster_entries + num_large_entries),
+ expected);
+ }
+
+ if ( pass2 ) {
+
+ /* Now load another large entry. This should result in the eviction
+ * of (VET, 4), the increase in the size of (VET, 0) from .75
+ * VARIABLE_ENTRY_SIZE to 1.0 VARIABLE_ENTRY_SIZE, and the renaming
+ * of (VET, 0) to its alternate address.
+ *
+ * The following table shows the expected states of the variable
+ * size entries after the test.
+ *
+ * in
+ * entry: cache? size: dirty? pinned? pins: flush operations:
+ *
+ * (VET, 0) Y 10 KB Y Y - -
+ *
+ * (VET, 1) Y 2.5 KB N N - -
+ *
+ * (VET, 2) N 10 KB N N - -
+ *
+ * (VET, 3) Y 2.5 KB N N - -
+ *
+ * (VET, 4) N 10 KB N N - -
+ *
+ * (VET, 5) Y 2.5 KB Y N 0, 9 dirty (VET, 0)
+ * resize (VET, 5) to 5 KB
+ *
+ * (VET, 6) Y 5 KB Y N - -
+ *
+ * (VET, 7) Y 5 KB Y N 9 dirty (VET, 9)
+ *
+ * (VET, 8) Y 10 KB N N - -
+ *
+ * (VET, 9) Y 10 KB N Y - dirty (VET, 8)
+ *
+ * Start by updating the expected table for the expected changes in entry status:
+ */
+ expected[0].size = VARIABLE_ENTRY_SIZE;
+ expected[0].at_main_addr = FALSE;
+ expected[3].is_dirty = FALSE;
+ expected[3].serialized = TRUE;
+ expected[4].in_cache = FALSE;
+ expected[4].destroyed = TRUE;
+
+ num_large_entries = 3;
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 2);
+ unprotect_entry_with_size_change2(cache_ptr, LARGE_ENTRY_TYPE, 2,
+ H5C2__DIRTIED_FLAG, (size_t)0);
+
+ if ( ( cache_ptr->index_len != 42 ) ||
+ ( cache_ptr->index_size != (2 * 1024 * 1024) -
+ (2 * VARIABLE_ENTRY_SIZE) +
+ (3 * VARIABLE_ENTRY_SIZE / 4) +
+ (2 * LARGE_ENTRY_SIZE) ) ||
+ ( cache_ptr->index_size != ((3 * VARIABLE_ENTRY_SIZE / 4) +
+ (2 * VARIABLE_ENTRY_SIZE / 2) +
+ (3 * VARIABLE_ENTRY_SIZE) +
+ (31 * MONSTER_ENTRY_SIZE) +
+ (3 * LARGE_ENTRY_SIZE)) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "unexpected size/len in flush op eviction test 4.";
+ }
+
+ /* verify entry status */
+ verify_entry_status2(cache_ptr,
+ 2,
+ (num_variable_entries + num_monster_entries + num_large_entries),
+ expected);
+ }
+
+ if ( pass2 ) {
+
+ /* load two more large entries. This should result in (VET, 5) being
+ * flushed, and increasing its size from 1/4 VARIABLE_ENTRY_SIZE to
+ * VARIABLE_ENTRY_SIZE.
+ *
+ * As a result of this size increase, the cache will have to look
+ * for another entry to evict. After flushing (VET, 6) and (VET, 7),
+ * it should evict (VET, 8), yielding the needed memory and dirtying
+ * (VET, 9).
+ *
+ * The following table shows the expected states of the variable
+ * size entries after the test.
+ *
+ * in
+ * entry: cache? size: dirty? pinned? pins: flush operations:
+ *
+ * (VET, 0) Y 10 KB Y Y - -
+ *
+ * (VET, 1) Y 2.5 KB N N - -
+ *
+ * (VET, 2) N 10 KB N N - -
+ *
+ * (VET, 3) Y 2.5 KB N N - -
+ *
+ * (VET, 4) N 10 KB N N - -
+ *
+ * (VET, 5) Y 5 KB N N 0, 9 -
+ *
+ * (VET, 6) Y 5 KB N N - -
+ *
+ * (VET, 7) Y 5 KB N N 9 -
+ *
+ * (VET, 8) N 10 KB N N - -
+ *
+ * (VET, 9) Y 10 KB N Y - dirty (VET, 8)
+ *
+ * Start by updating the expected table for the expected changes in entry status:
+ */
+
+ expected[5].size = VARIABLE_ENTRY_SIZE / 2;
+ expected[5].is_dirty = FALSE;
+ expected[5].serialized = TRUE;
+ expected[6].is_dirty = FALSE;
+ expected[6].serialized = TRUE;
+ expected[7].is_dirty = FALSE;
+ expected[7].serialized = TRUE;
+ expected[8].in_cache = FALSE;
+ expected[8].destroyed = TRUE;
+ expected[9].is_dirty = TRUE;
+
+ num_large_entries = 5;
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 3);
+ unprotect_entry_with_size_change2(cache_ptr, LARGE_ENTRY_TYPE, 3,
+ H5C2__DIRTIED_FLAG, (size_t)0);
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 4);
+ unprotect_entry_with_size_change2(cache_ptr, LARGE_ENTRY_TYPE, 4,
+ H5C2__DIRTIED_FLAG, (size_t)0);
+
+ /* verify cache size */
+ if ( ( cache_ptr->index_len != 43 ) ||
+ ( cache_ptr->index_size != (2 * 1024 * 1024) -
+ (3 * VARIABLE_ENTRY_SIZE) +
+ (1 * VARIABLE_ENTRY_SIZE / 4) +
+ (3 * VARIABLE_ENTRY_SIZE / 4) +
+ (4 * LARGE_ENTRY_SIZE) ) ||
+ ( cache_ptr->index_size != ((2 * VARIABLE_ENTRY_SIZE / 4) +
+ (3 * VARIABLE_ENTRY_SIZE / 2) +
+ (2 * VARIABLE_ENTRY_SIZE) +
+ (31 * MONSTER_ENTRY_SIZE) +
+ (5 * LARGE_ENTRY_SIZE)) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "unexpected size/len in flush op eviction test 5.";
+ }
+
+ /* verify entry status */
+ verify_entry_status2(cache_ptr,
+ 3,
+ (num_variable_entries + num_monster_entries + num_large_entries),
+ expected);
+ }
+
+ if ( pass2 ) {
+
+ /* now touch all the non VARIABLE_ENTRY_TYPE entries in the
+ * cache to bring all the VARIABLE_ENTRY_TYPE entries to the
+ * end of the LRU list.
+ *
+ * Note that we don't have to worry about (VET, 0) and (VET, 9)
+ * as they are pinned and thus not in the LRU list to begin with.
+ */
+ for ( i = 0; i < 31; i++ )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+ unprotect_entry_with_size_change2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ H5C2__DIRTIED_FLAG, (size_t)0);
+ }
+
+ for ( i = 0; i < 5; i++ )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+ unprotect_entry_with_size_change2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ H5C2__DIRTIED_FLAG, (size_t)0);
+ }
+
+ /* verify cache size */
+ if ( ( cache_ptr->index_len != 43 ) ||
+ ( cache_ptr->index_size != (2 * 1024 * 1024) -
+ (3 * VARIABLE_ENTRY_SIZE) +
+ (1 * VARIABLE_ENTRY_SIZE / 4) +
+ (3 * VARIABLE_ENTRY_SIZE / 4) +
+ (4 * LARGE_ENTRY_SIZE) ) ||
+ ( cache_ptr->index_size != ((2 * VARIABLE_ENTRY_SIZE / 4) +
+ (3 * VARIABLE_ENTRY_SIZE / 2) +
+ (2 * VARIABLE_ENTRY_SIZE) +
+ (31 * MONSTER_ENTRY_SIZE) +
+ (5 * LARGE_ENTRY_SIZE)) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "unexpected size/len in flush op eviction test 6.";
+ }
+
+ /* verify entry status */
+ verify_entry_status2(cache_ptr,
+ 4,
+ (num_variable_entries + num_monster_entries + num_large_entries),
+ expected);
+ }
+
+ if ( pass2 ) {
+
+ /* Now load three more large entries. This should result
+ * in the evictions of (VET, 1), (VET, 3), and (VET, 5), and the
+ * unpinning of (VET, 0)
+ *
+ * The following table shows the expected states of the variable
+ * size entries after the test.
+ *
+ * in
+ * entry: cache? size: dirty? pinned? pins: flush operations:
+ *
+ * (VET, 0) Y 10 KB Y N - -
+ *
+ * (VET, 1) N 2.5 KB N N - -
+ *
+ * (VET, 2) N 10 KB N N - -
+ *
+ * (VET, 3) N 2.5 KB N N - -
+ *
+ * (VET, 4) N 10 KB N N - -
+ *
+ * (VET, 5) N 5 KB N N - -
+ *
+ * (VET, 6) Y 5 KB N N - -
+ *
+ * (VET, 7) Y 5 KB N N 9 -
+ *
+ * (VET, 8) N 10 KB N N - -
+ *
+ * (VET, 9) Y 10 KB N Y - dirty (VET, 8)
+ *
+ * Start by updating the expected table for the expected changes in entry status:
+ */
+
+ expected[0].is_pinned = FALSE;
+ expected[1].in_cache = FALSE;
+ expected[1].destroyed = TRUE;
+ expected[3].in_cache = FALSE;
+ expected[3].destroyed = TRUE;
+ expected[5].in_cache = FALSE;
+ expected[5].destroyed = TRUE;
+
+ num_large_entries = 8;
+
+ for ( i = 5; i < 8; i++ )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+ unprotect_entry_with_size_change2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ H5C2__DIRTIED_FLAG, (size_t)0);
+ }
+
+ /* verify cache size */
+ if ( ( cache_ptr->index_len != 43 ) ||
+ ( cache_ptr->index_size != (2 * 1024 * 1024) -
+ (4 * VARIABLE_ENTRY_SIZE) +
+ (1 * VARIABLE_ENTRY_SIZE / 4) +
+ (3 * VARIABLE_ENTRY_SIZE / 4) +
+ (7 * LARGE_ENTRY_SIZE) ) ||
+ ( cache_ptr->index_size != ((2 * VARIABLE_ENTRY_SIZE / 2) +
+ (2 * VARIABLE_ENTRY_SIZE) +
+ (31 * MONSTER_ENTRY_SIZE) +
+ (8 * LARGE_ENTRY_SIZE)) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "unexpected size/len in flush op eviction test 7.";
+ }
+
+ /* verify entry status */
+ verify_entry_status2(cache_ptr,
+ 5,
+ (num_variable_entries + num_monster_entries + num_large_entries),
+ expected);
+ }
+
+ if ( pass2 ) {
+
+ /* load another large entry. (VET, 6) should be evicted.
+ *
+ * The following table shows the expected states of the variable
+ * size entries after the test.
+ *
+ * in
+ * entry: cache? size: dirty? pinned? pins: flush operations:
+ *
+ * (VET, 0) Y 10 KB Y N - -
+ *
+ * (VET, 1) N 2.5 KB N N - -
+ *
+ * (VET, 2) N 10 KB N N - -
+ *
+ * (VET, 3) N 2.5 KB N N - -
+ *
+ * (VET, 4) N 10 KB N N - -
+ *
+ * (VET, 5) N 5 KB N N - -
+ *
+ * (VET, 6) N 5 KB N N - -
+ *
+ * (VET, 7) Y 5 KB N N 9 -
+ *
+ * (VET, 8) N 10 KB N N - -
+ *
+ * (VET, 9) Y 10 KB N Y - dirty (VET, 8)
+ *
+ * Start by updating the expected table for the expected changes in entry status:
+ */
+
+ expected[6].in_cache = FALSE;
+ expected[6].destroyed = TRUE;
+
+ num_large_entries = 9;
+
+ for ( i = 8; i < 9; i++ )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+ unprotect_entry_with_size_change2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ H5C2__DIRTIED_FLAG, (size_t)0);
+ }
+
+ /* verify cache size */
+ if ( ( cache_ptr->index_len != 43 ) ||
+ ( cache_ptr->index_size != (2 * 1024 * 1024) -
+ (3 * VARIABLE_ENTRY_SIZE) -
+ (VARIABLE_ENTRY_SIZE / 2) +
+ (8 * LARGE_ENTRY_SIZE) ) ||
+ ( cache_ptr->index_size != ((1 * VARIABLE_ENTRY_SIZE / 2) +
+ (2 * VARIABLE_ENTRY_SIZE) +
+ (31 * MONSTER_ENTRY_SIZE) +
+ (9 * LARGE_ENTRY_SIZE)) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "unexpected size/len in flush op eviction test 8.";
+ }
+
+ /* verify entry status */
+ verify_entry_status2(cache_ptr,
+ 6,
+ (num_variable_entries + num_monster_entries + num_large_entries),
+ expected);
+ }
+
+ if ( pass2 ) {
+
+ /* Load another large entry.
+ *
+ * (VET, 7) should be evicted, and (VET, 9) should be unpinned.
+ *
+ * The following table shows the expected states of the variable
+ * size entries after the test.
+ *
+ * in
+ * entry: cache? size: dirty? pinned? pins: flush operations:
+ *
+ * (VET, 0) Y 10 KB Y N - -
+ *
+ * (VET, 1) N 2.5 KB N N - -
+ *
+ * (VET, 2) N 10 KB N N - -
+ *
+ * (VET, 3) N 2.5 KB N N - -
+ *
+ * (VET, 4) N 10 KB N N - -
+ *
+ * (VET, 5) N 5 KB N N - -
+ *
+ * (VET, 6) N 5 KB N N - -
+ *
+ * (VET, 7) N 5 KB N N - -
+ *
+ * (VET, 8) N 10 KB N N - -
+ *
+ * (VET, 9) Y 10 KB Y N - dirty (VET, 8)
+ *
+ * Start by updating the expected table for the expected changes in entry status:
+ */
+
+ expected[7].in_cache = FALSE;
+ expected[7].destroyed = TRUE;
+ expected[9].is_pinned = FALSE;
+
+ num_large_entries = 10;
+
+ for ( i = 9; i < 10; i++ )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+ unprotect_entry_with_size_change2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ H5C2__DIRTIED_FLAG, (size_t)0);
+ }
+
+ /* verify cache size */
+ if ( ( cache_ptr->index_len != 43 ) ||
+ ( cache_ptr->index_size != (2 * 1024 * 1024) -
+ (4 * VARIABLE_ENTRY_SIZE) +
+ (9 * LARGE_ENTRY_SIZE) ) ||
+ ( cache_ptr->index_size != ((2 * VARIABLE_ENTRY_SIZE) +
+ (31 * MONSTER_ENTRY_SIZE) +
+ (10 * LARGE_ENTRY_SIZE)) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "unexpected size/len in flush op eviction test 9.";
+ }
+
+ /* verify entry status */
+ verify_entry_status2(cache_ptr,
+ 7,
+ (num_variable_entries + num_monster_entries + num_large_entries),
+ expected);
+ }
+
+ if ( pass2 ) {
+
+ /* Again, touch all the non VARIABLE_ENTRY_TYPE entries in the
+ * cache to bring all the VARIABLE_ENTRY_TYPE entries to the
+ * end of the LRU list.
+ *
+ * Both (VET, 0) and (VET, 7) have been unpinned, so they are
+ * now in the LRU list.
+ */
+ for ( i = 0; i < 31; i++ )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+ unprotect_entry_with_size_change2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ H5C2__DIRTIED_FLAG, (size_t)0);
+ }
+
+ for ( i = 0; i < 10; i++ )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+ unprotect_entry_with_size_change2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ H5C2__DIRTIED_FLAG, (size_t)0);
+ }
+
+ /* verify cache size */
+ if ( ( cache_ptr->index_len != 43 ) ||
+ ( cache_ptr->index_size != (2 * 1024 * 1024) -
+ (4 * VARIABLE_ENTRY_SIZE) +
+ (9 * LARGE_ENTRY_SIZE) ) ||
+ ( cache_ptr->index_size != ((2 * VARIABLE_ENTRY_SIZE) +
+ (31 * MONSTER_ENTRY_SIZE) +
+ (10 * LARGE_ENTRY_SIZE)) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "unexpected size/len in flush op eviction test 10.";
+ }
+
+ /* verify entry status */
+ verify_entry_status2(cache_ptr,
+ 8,
+ (num_variable_entries + num_monster_entries + num_large_entries),
+ expected);
+ }
+
+ if ( pass2 ) {
+
+ /* load two more large entries. Things get a bit complicated here,
+ * so I'll go through the operation step by step.
+ *
+ * Initially, the cache has 4 KB of empty space, so the first entry
+ * (LET, 10) is loaded via calls to H5C2_protect() H5C2_unprotect()
+ * without causing any evictions.
+ *
+ * However, this is not the case for the call of H5C2_protect() on
+ * (LET, 11).
+ *
+ * Before inserting (LET, 11), H5C2_protect(LET, 11) must try to
+ * free up at least 4 KB of space. To do this, it starts scanning
+ * up the LRU list to find entries to evict.
+ *
+ * (VET, 0) is at the bottom of the LRU list, and thus is the first
+ * entry considered. However, it is dirty, so it is moved to the
+ * top of the LRU list, flushed to disk, and marked clean.
+ *
+ * (VET, 9) is the next entry on the bottom of the LRU list. It is
+ * dirty too, so the cache moves it to the top of the LRU list,
+ * and calls its serialize callback function to construct an on
+ * disk image of the entry.
+ *
+ * However, this serialize function needs to modify (VET, 8), which
+ * is currently not in cache. Thus it calls H5C2_protect(VET, 8)
+ * to gain access to it. H5C2_protect(VET, 8) loads (VET, 8), and
+ * then attempts to evict entries to make space for it. (VET, 9)
+ * has already been moved to the head of the LRU list, so the next
+ * entries on the LRU are (MET, 0) thru (MET, 30) and (LET, 0) thru
+ * (LET, 10) -- all of which are dirty, and are therefore flushed
+ * and moved to the head of the LRU list.
+ *
+ * The next entry on the bottom of the LRU list is (VET, 0), which
+ * is clean, and is therefore evicted to make space for (VET, 8).
+ * This space is sufficient, so H5C2_protect(VET, 8) inserts
+ * (VET, 8) into the cache's index, marks it as protected, and
+ * returns to the serialize function for (VET, 9).
+ *
+ * When the serialize function for (VET, 9) is done with (VET, 8), it
+ * calls H5C2_unprotect(VET, 8), which markes (VET, 8) as dirty and
+ * unprotected, and places it at the head of the LRU.
+ *
+ * The serialize function for (VET, 9) then returns, and (VET, 9) is
+ * is written to disk, and marked clean.
+ *
+ * At this point, the cache is still full (since (VET, 8) took the
+ * space created by the eviction of (VET, 0)). Thus
+ * H5C2_protect(LET, 11) continues to look for space. While
+ * (MET, 0) was the next item on the LRU list when it called the
+ * serialize function for (VET, 9), the function notices that the
+ * LRU has been modified, and restarts its search for candidates
+ * for eviction at the bottom of the LRU.
+ *
+ * (VET, 0) is now at the bottom of the LRU, and is clean. Thus
+ * it is evicted. This makes sufficient space for (LET, 11), so
+ * H5C2_protect(LET, 11) inserts it into the cache, marks it as
+ * protected, and returns.
+ *
+ * H5C2_unprotect(VET, 11) marks (VET, 11) as unprotected, and then
+ * returns as well.
+ *
+ * The following table shows the expected states of the variable
+ * size entries after the test.
+ *
+ * in
+ * entry: cache? size: dirty? pinned? pins: flush operations:
+ *
+ * (VET, 0) N 10 KB N N - -
+ *
+ * (VET, 1) N 2.5 KB N N - -
+ *
+ * (VET, 2) N 10 KB N N - -
+ *
+ * (VET, 3) N 2.5 KB N N - -
+ *
+ * (VET, 4) N 10 KB N N - -
+ *
+ * (VET, 5) N 5 KB N N - -
+ *
+ * (VET, 6) N 5 KB N N - -
+ *
+ * (VET, 7) N 5 KB N N - -
+ *
+ * (VET, 8) Y 10 KB Y N - -
+ *
+ * (VET, 9) N 10 KB N N - -
+ *
+ * Start by updating the expected table for the expected changes in
+ * entry status:
+ *
+ * Note that we reset the loaded, cleared, flushed, and destroyed
+ * fields of (VET,8) so we can track what is happening.
+ */
+ base_addr = entries2[VARIABLE_ENTRY_TYPE];
+ entry_ptr = &(base_addr[8]);
+ entry_ptr->deserialized = FALSE;
+ entry_ptr->cleared = FALSE;
+ entry_ptr->deserialized = FALSE;
+ entry_ptr->destroyed = FALSE;
+
+ expected[0].in_cache = FALSE;
+ expected[0].is_dirty = FALSE;
+ expected[0].serialized = TRUE;
+ expected[0].destroyed = TRUE;
+ expected[8].in_cache = TRUE;
+ expected[8].is_dirty = TRUE;
+ expected[8].deserialized = TRUE;
+ expected[8].serialized = FALSE;
+ expected[8].destroyed = FALSE;
+ expected[9].in_cache = FALSE;
+ expected[9].is_dirty = FALSE;
+ expected[9].serialized = TRUE;
+ expected[9].destroyed = TRUE;
+
+ num_large_entries = 12;
+
+ /* a newly loaded entry is not inserted in the cache until after
+ * space has been made for it. Thus (LET, 11) will not be flushed.
+ */
+ for ( i = num_variable_entries;
+ i < num_variable_entries + num_monster_entries + num_large_entries - 1;
+ i++ )
+ {
+ expected[i].is_dirty = FALSE;
+ expected[i].serialized = TRUE;
+ }
+
+ for ( i = 10; i < 12; i++ )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+ unprotect_entry_with_size_change2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ H5C2__DIRTIED_FLAG, (size_t)0);
+ }
+
+ /* verify cache size */
+ if ( ( cache_ptr->index_len != 44 ) ||
+ ( cache_ptr->index_size != (2 * 1024 * 1024) -
+ (5 * VARIABLE_ENTRY_SIZE) +
+ (11 * LARGE_ENTRY_SIZE) ) ||
+ ( cache_ptr->index_size != ((1 * VARIABLE_ENTRY_SIZE) +
+ (31 * MONSTER_ENTRY_SIZE) +
+ (12 * LARGE_ENTRY_SIZE)) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "unexpected size/len in flush op eviction test 11.";
+ }
+
+ /* verify entry status */
+ verify_entry_status2(cache_ptr,
+ 9,
+ (num_variable_entries + num_monster_entries + num_large_entries),
+ expected);
+ }
+
+ if ( pass2 ) {
+
+ /* Again, touch all the non VARIABLE_ENTRY_TYPE entries in the
+ * cache to bring the last remaining VARIABLE_ENTRY_TYPE entry to the
+ * end of the LRU list.
+ */
+ for ( i = 0; i < num_monster_entries; i++ )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+ unprotect_entry_with_size_change2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ H5C2__DIRTIED_FLAG, (size_t)0);
+ }
+
+ for ( i = 0; i < num_large_entries; i++ )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+ unprotect_entry_with_size_change2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ H5C2__DIRTIED_FLAG, (size_t)0);
+ }
+
+ /* update the expected array to mark all these entries dirty again. */
+ for ( i = num_variable_entries;
+ i < num_variable_entries + num_monster_entries + num_large_entries - 1;
+ i++ )
+ {
+ expected[i].is_dirty = TRUE;
+ }
+
+ /* verify cache size */
+ if ( ( cache_ptr->index_len != 44 ) ||
+ ( cache_ptr->index_size != (2 * 1024 * 1024) -
+ (5 * VARIABLE_ENTRY_SIZE) +
+ (11 * LARGE_ENTRY_SIZE) ) ||
+ ( cache_ptr->index_size != ((1 * VARIABLE_ENTRY_SIZE) +
+ (31 * MONSTER_ENTRY_SIZE) +
+ (12 * LARGE_ENTRY_SIZE)) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "unexpected size/len in flush op eviction test 12.";
+ }
+
+ /* verify entry status */
+ verify_entry_status2(cache_ptr,
+ 10,
+ (num_variable_entries + num_monster_entries + num_large_entries),
+ expected);
+ }
+
+ if ( pass2 ) {
+
+ /* Load two more large entries.
+ *
+ * Since (VET, 8) is dirty, at first this will just cause (VET, 8)
+ * to be flushed.
+ *
+ * But all other entries in the cache are dirty, so the cache will
+ * flush them all, and then evict (VET, 8) on the second pass.
+ *
+ * The following table shows the expected states of the variable
+ * size entries after the test.
+ *
+ * in
+ * entry: cache? size: dirty? pinned? pins: flush operations:
+ *
+ * (VET, 0) N 10 KB N N - -
+ *
+ * (VET, 1) N 2.5 KB N N - -
+ *
+ * (VET, 2) N 10 KB N N - -
+ *
+ * (VET, 3) N 2.5 KB N N - -
+ *
+ * (VET, 4) N 10 KB N N - -
+ *
+ * (VET, 5) N 5 KB N N - -
+ *
+ * (VET, 6) N 5 KB N N - -
+ *
+ * (VET, 7) N 5 KB N N - -
+ *
+ * (VET, 8) N 10 KB N N - -
+ *
+ * (VET, 9) N 10 KB N N - -
+ *
+ * Start by updating the expected table for the expected changes in
+ * entry status:
+ */
+
+ expected[8].in_cache = FALSE;
+ expected[8].is_dirty = FALSE;
+ expected[8].serialized = TRUE;
+ expected[8].destroyed = TRUE;
+
+ num_large_entries = 14;
+
+ /* a newly loaded entry is not inserted in the cache until after
+ * space has been made for it. Thus (LET, 13) will not be flushed.
+ */
+ for ( i = num_variable_entries;
+ i < num_variable_entries + num_monster_entries + num_large_entries - 1;
+ i++ )
+ {
+ expected[i].is_dirty = FALSE;
+ expected[i].serialized = TRUE;
+ }
+
+ for ( i = 12; i < 14; i++ )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+ unprotect_entry_with_size_change2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ H5C2__DIRTIED_FLAG, (size_t)0);
+ }
+
+ /* verify cache size */
+ if ( ( cache_ptr->index_len != 45 ) ||
+ ( cache_ptr->index_size != (2 * 1024 * 1024) -
+ (6 * VARIABLE_ENTRY_SIZE) +
+ (13 * LARGE_ENTRY_SIZE) ) ||
+ ( cache_ptr->index_size != ((31 * MONSTER_ENTRY_SIZE) +
+ (14 * LARGE_ENTRY_SIZE)) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "unexpected size/len in flush op eviction test 13.";
+ }
+
+ /* verify entry status */
+ verify_entry_status2(cache_ptr,
+ 11,
+ (num_variable_entries + num_monster_entries + num_large_entries),
+ expected);
+ }
+
+ /* at this point we have cycled all the variable size entries through
+ * the cache.
+ *
+ * flush the cache and end the test.
+ */
+
+ if ( pass2 ) {
+
+ result = H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ H5C2__FLUSH_INVALIDATE_FLAG);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Cache flush invalidate failed after flush op eviction test";
+ }
+ else if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache len/size after cleanup of flush op eviction test";
+
+ }
+ }
+
+#if H5C2_COLLECT_CACHE_STATS
+ /* If we are collecting stats, check to see if we get the expected
+ * values.
+ *
+ * Testing the stats code is fairly new, but given the extent
+ * to which I find myself depending on the stats, I've decided
+ * to start testing the stats whenever it is convenient to do
+ * so.
+ */
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->insertions[VARIABLE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->pinned_insertions[VARIABLE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->clears[VARIABLE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->flushes[VARIABLE_ENTRY_TYPE] != 19 ) ||
+ ( cache_ptr->evictions[VARIABLE_ENTRY_TYPE] != 11 ) ||
+ ( cache_ptr->renames[VARIABLE_ENTRY_TYPE] != 1 ) ||
+ ( cache_ptr->entry_flush_renames[VARIABLE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->cache_flush_renames[VARIABLE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->pins[VARIABLE_ENTRY_TYPE] != 2 ) ||
+ ( cache_ptr->unpins[VARIABLE_ENTRY_TYPE] != 2 ) ||
+ ( cache_ptr->dirty_pins[VARIABLE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->pinned_flushes[VARIABLE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->pinned_clears[VARIABLE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->size_increases[VARIABLE_ENTRY_TYPE] != 3 ) ||
+ ( cache_ptr->size_decreases[VARIABLE_ENTRY_TYPE] != 6 ) ||
+ ( cache_ptr->entry_flush_size_changes[VARIABLE_ENTRY_TYPE] != 1 ) ||
+ ( cache_ptr->cache_flush_size_changes[VARIABLE_ENTRY_TYPE] != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected variable size entry stats.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->insertions[LARGE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->pinned_insertions[LARGE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->clears[LARGE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->flushes[LARGE_ENTRY_TYPE] != 38 ) ||
+ ( cache_ptr->evictions[LARGE_ENTRY_TYPE] != 14 ) ||
+ ( cache_ptr->renames[LARGE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->entry_flush_renames[LARGE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->cache_flush_renames[LARGE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->pins[LARGE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->unpins[LARGE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->dirty_pins[LARGE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->pinned_flushes[LARGE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->pinned_clears[LARGE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->size_increases[LARGE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->size_decreases[LARGE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->entry_flush_size_changes[LARGE_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->cache_flush_size_changes[LARGE_ENTRY_TYPE] != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected monster entry stats.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->insertions[MONSTER_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->pinned_insertions[MONSTER_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->clears[MONSTER_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->flushes[MONSTER_ENTRY_TYPE] != 93 ) ||
+ ( cache_ptr->evictions[MONSTER_ENTRY_TYPE] != 31 ) ||
+ ( cache_ptr->renames[MONSTER_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->entry_flush_renames[MONSTER_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->cache_flush_renames[MONSTER_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->pins[MONSTER_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->unpins[MONSTER_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->dirty_pins[MONSTER_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->pinned_flushes[MONSTER_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->pinned_clears[MONSTER_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->size_increases[MONSTER_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->size_decreases[MONSTER_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->entry_flush_size_changes[MONSTER_ENTRY_TYPE] != 0 ) ||
+ ( cache_ptr->cache_flush_size_changes[MONSTER_ENTRY_TYPE] != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected monster entry stats.";
+ }
+ }
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+ }
+
+ return;
+
+} /* check_flush_cache__flush_op_eviction_test() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_flush_cache__single_entry()
+ *
+ * Purpose: Verify that flush_cache behaves as expected when the cache
+ * contains only one element.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 1/12/05
+ *
+ * Modifications:
+ *
+ * JRM -- 3/29/06
+ * Added tests for pinned entries.
+ *
+ * JRM -- 5/17/06
+ * Complete reqrite of pinned entry tests to accomodate
+ * the new H5C2_mark_pinned_or_protected_entry_dirty()
+ * call.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_flush_cache__single_entry(H5C2_t * cache_ptr)
+{
+ /* const char * fcn_name = "check_flush_cache__single_entry"; */
+
+ if ( cache_ptr == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "cache_ptr NULL on entry to single entry case.";
+ }
+ else if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "cache not empty at beginning of single entry case.";
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 1,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 2,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 3,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 4,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 5,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 6,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 7,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 8,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 9,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 10,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 11,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 12,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 13,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C2__FLUSH_INVALIDATE_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 14,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C2__FLUSH_INVALIDATE_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 15,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG |
+ H5C2__FLUSH_MARKED_ENTRIES_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 16,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG |
+ H5C2__FLUSH_MARKED_ENTRIES_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 17,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 18,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 19,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 20,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 21,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 22,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 23,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 24,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 25,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 26,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 27,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 28,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 29,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C2__FLUSH_INVALIDATE_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 30,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C2__FLUSH_INVALIDATE_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 31,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG |
+ H5C2__FLUSH_MARKED_ENTRIES_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 32,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ FALSE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG |
+ H5C2__FLUSH_MARKED_ENTRIES_FLAG,
+ /* expected_deserialized */ TRUE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 33,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 34,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 35,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 36,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 37,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 38,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 39,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 40,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 41,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 42,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 43,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 44,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 45,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C2__FLUSH_INVALIDATE_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 46,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C2__FLUSH_INVALIDATE_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 47,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG |
+ H5C2__FLUSH_MARKED_ENTRIES_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 48,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__NO_FLAGS_SET,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG |
+ H5C2__FLUSH_MARKED_ENTRIES_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 49,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 50,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__NO_FLAGS_SET,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 51,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 52,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 53,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 54,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 55,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 56,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 57,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 58,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 59,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 60,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ FALSE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 61,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C2__FLUSH_INVALIDATE_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 62,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C2__FLUSH_INVALIDATE_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ FALSE,
+ /* expected_serialized */ TRUE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 63,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ FALSE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG |
+ H5C2__FLUSH_MARKED_ENTRIES_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+ if ( pass2 ) {
+
+ check_flush_cache__single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ 64,
+ /* entry_type */ PICO_ENTRY_TYPE,
+ /* entry_idx */ 0,
+ /* insert_flag */ TRUE,
+ /* dirty_flag */ TRUE,
+ /* flags */ H5C2__SET_FLUSH_MARKER_FLAG,
+ /* flush_flags */ H5C2__FLUSH_INVALIDATE_FLAG |
+ H5C2__FLUSH_CLEAR_ONLY_FLAG |
+ H5C2__FLUSH_MARKED_ENTRIES_FLAG,
+ /* expected_deserialized */ FALSE,
+ /* expected_cleared */ TRUE,
+ /* expected_serialized */ FALSE,
+ /* expected_destroyed */ TRUE
+ );
+ }
+
+
+ /* Now run single entry tests for pinned entries. Test all combinations
+ * of:
+ *
+ * 1) Unpin by unprotect vs. unpin by call to H5C2_unpin_entry().
+ *
+ * 2) Marked dirty by unprotect or not.
+ *
+ * 3) Marked dirty by call to H5C2_mark_pinned_entry_dirty() or not.
+ *
+ * 4) Marked dirty by call to H5C2_mark_pinned_or_protected_entry_dirty()
+ * while protected or not.
+ *
+ * 5) Marked dirty by call to H5C2_mark_pinned_or_protected_entry_dirty()
+ * while pinned or not.
+ *
+ * 6) Entry marked for flush or not.
+ *
+ * 7) Call flush with H5C2__FLUSH_MARKED_ENTRIES_FLAG or not.
+ *
+ * 8) Call flush with H5C2__FLUSH_CLEAR_ONLY_FLAG or not.
+ *
+ * This yields a total of 256 tests.
+ *
+ * The tests and their expected results are given in the spec table
+ * below. The values assigned to the expected_cleared, expected_flushed,
+ * and expected_destroyed fields are somewhat arcane, so the following
+ * overview may be useful.
+ *
+ * In addition to simply checking to see if the test case runs,
+ * we also check to see if the desired operations take place on the
+ * cache entry. Thus expected_cleared is set to TRUE if we expect
+ * the entry to be flushed, expected_flushed is set to TRUE if we
+ * we expect the entry to be flushed, and expected_destroyed is set
+ * to TRUE if we expect the entry to be destroyed.
+ *
+ * In this test, we are working with pinned entries which can't be
+ * evicted, so expected_destroyed is always FALSE. We could pull it
+ * from the table, but it is a hold over from the code this test
+ * was adapted from, and it doesn't do any particular harm.
+ *
+ * In general, we expect an entry to be flushed if it is dirty, and
+ * flush in invoked WITHOUT the H5C2__FLUSH_CLEAR_ONLY_FLAG. However,
+ * there are exceptions: If flush is invoked with the
+ * H5C2__FLUSH_MARKED_ENTRIES_FLAG, only marked entries will be flushed.
+ *
+ * Further, unprotecting an entry with the H5C2__SET_FLUSH_MARKER_FLAG
+ * will NOT mark the entry unless the entry has either been marked
+ * dirty either before or durting the unprotect call. This results in
+ * some counterintuitive entries in the table. It make be useful to
+ * look in the test code to see the exact order of operations.
+ *
+ * Similarly, we expect an entry to be cleared if it is dirty, and
+ * flush is invoked WITH the H5C2__FLUSH_CLEAR_ONLY_FLAG. Again, there
+ * are exceptions -- If flush is also invoked with the
+ * H5C2__FLUSH_MARKED_ENTRIES_FLAG, only the marked entries will be
+ * cleared.
+ *
+ * The above comments about applying unprotect with the
+ * H5C2__SET_FLUSH_MARKER_FLAG apply here as well.
+ */
+
+ if ( pass2 ) {
+
+ int i;
+ struct pinned_single_entry_test_spec
+ {
+ int test_num;
+ int entry_type;
+ int entry_idx;
+ hbool_t dirty_flag;
+ hbool_t mark_dirty;
+ hbool_t pop_mark_dirty_prot;
+ hbool_t pop_mark_dirty_pinned;
+ hbool_t unprotect_unpin;
+ unsigned int flags;
+ unsigned int flush_flags;
+ hbool_t expected_cleared;
+ hbool_t expected_serialized;
+ hbool_t expected_destroyed;
+ } spec[256] =
+ /* pop pop
+ * ent mark mark
+ * test entry -ry dirty mark dirty dirty unprot flush expect expect expect
+ * num type idx flag dirty prot pinned unpin flags flags clear srlzd destroy
+ */
+ { { 1, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, FALSE, FALSE },
+ { 2, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, FALSE, FALSE },
+ { 3, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 4, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 5, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 6, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 7, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 8, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 9, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 10, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 11, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 12, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 13, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 14, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 15, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 16, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 17, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 18, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 19, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 20, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 21, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 22, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 23, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 24, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 25, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 26, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 27, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 28, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 29, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 30, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 31, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 32, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 33, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, FALSE, FALSE },
+ { 34, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, FALSE, FALSE },
+ { 35, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 36, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 37, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 38, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 39, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 40, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 41, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 42, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 43, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 44, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 45, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 46, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 47, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 48, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 49, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 50, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 51, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 52, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 53, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 54, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 55, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 56, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 57, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 58, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 59, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 60, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 61, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 62, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 63, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 64, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__NO_FLAGS_SET, FALSE, TRUE, FALSE },
+ { 65, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 66, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 67, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 68, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 69, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 70, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 71, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 72, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 73, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 74, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 75, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 76, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 77, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 78, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 79, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 80, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 81, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 82, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 83, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 84, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 85, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 86, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 87, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 88, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 89, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 90, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 91, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 92, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 93, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 94, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 95, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 96, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 97, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 98, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 99, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 100, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 101, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 102, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 103, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 104, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 105, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 106, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 107, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 108, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, FALSE, FALSE },
+ { 109, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 110, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 111, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 112, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 113, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 114, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 115, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 116, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 117, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 118, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 119, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 120, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 121, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 122, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 123, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 124, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 125, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 126, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 127, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 128, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG, FALSE, TRUE, FALSE },
+ { 129, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 130, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 131, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 132, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 133, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 134, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 135, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 136, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 137, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 138, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 139, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 140, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 141, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 142, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 143, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 144, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 145, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 146, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 147, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 148, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 149, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 150, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 151, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 152, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 153, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 154, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 155, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 156, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 157, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 158, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 159, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 160, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 161, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 162, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 163, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 164, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 165, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 166, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 167, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 168, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 169, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 170, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 171, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 172, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 173, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 174, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 175, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 176, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 177, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 178, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 179, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 180, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 181, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 182, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 183, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 184, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 185, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 186, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 187, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 188, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 189, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 190, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 191, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 192, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 193, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 194, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 195, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 196, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 197, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 198, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 199, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 200, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 201, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 202, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 203, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 204, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 205, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 206, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 207, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 208, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 209, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 210, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 211, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 212, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 213, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 214, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 215, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 216, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 217, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 218, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 219, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 220, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 221, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, FALSE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 222, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, FALSE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 223, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, TRUE, FALSE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 224, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, TRUE, TRUE, H5C2__NO_FLAGS_SET, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 225, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 226, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 227, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 228, PICO_ENTRY_TYPE, 0, FALSE, FALSE, FALSE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 229, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 230, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 231, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 232, PICO_ENTRY_TYPE, 0, FALSE, FALSE, TRUE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 233, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 234, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 235, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 236, PICO_ENTRY_TYPE, 0, FALSE, TRUE, FALSE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, FALSE, FALSE, FALSE },
+ { 237, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 238, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 239, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 240, PICO_ENTRY_TYPE, 0, FALSE, TRUE, TRUE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 241, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 242, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 243, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 244, PICO_ENTRY_TYPE, 0, TRUE, FALSE, FALSE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 245, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 246, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 247, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 248, PICO_ENTRY_TYPE, 0, TRUE, FALSE, TRUE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 249, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 250, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 251, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 252, PICO_ENTRY_TYPE, 0, TRUE, TRUE, FALSE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 253, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, FALSE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 254, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, FALSE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 255, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, TRUE, FALSE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE },
+ { 256, PICO_ENTRY_TYPE, 0, TRUE, TRUE, TRUE, TRUE, TRUE, H5C2__SET_FLUSH_MARKER_FLAG, H5C2__FLUSH_MARKED_ENTRIES_FLAG | H5C2__FLUSH_CLEAR_ONLY_FLAG, TRUE, FALSE, FALSE } };
+
+ i = 0;
+ while ( ( pass2 ) && ( i < 256 ) )
+ {
+ check_flush_cache__pinned_single_entry_test
+ (
+ /* cache_ptr */ cache_ptr,
+ /* test_num */ spec[i].test_num,
+ /* entry_type */ spec[i].entry_type,
+ /* entry_idx */ spec[i].entry_idx,
+ /* dirty_flag */ spec[i].dirty_flag,
+ /* mark_dirty */ spec[i].mark_dirty,
+ /* pop_mark_dirty_prot */ spec[i].pop_mark_dirty_prot,
+ /* pop_mark_dirty_pinned */ spec[i].pop_mark_dirty_pinned,
+ /* unprotect_unpin */ spec[i].unprotect_unpin,
+ /* flags */ spec[i].flags,
+ /* flush_flags */ spec[i].flush_flags,
+ /* expected_cleared */ spec[i].expected_cleared,
+ /* expected_serialized */ spec[i].expected_serialized,
+ /* expected_destroyed */ spec[i].expected_destroyed
+ );
+ i++;
+ }
+ }
+
+ return;
+
+} /* check_flush_cache__single_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_flush_cache__single_entry_test()
+ *
+ * Purpose: Run a single entry flush cache test.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 1/12/05
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_flush_cache__single_entry_test(H5C2_t * cache_ptr,
+ int test_num,
+ int entry_type,
+ int entry_idx,
+ hbool_t insert_flag,
+ hbool_t dirty_flag,
+ unsigned int flags,
+ unsigned int flush_flags,
+ hbool_t expected_deserialized,
+ hbool_t expected_cleared,
+ hbool_t expected_serialized,
+ hbool_t expected_destroyed)
+{
+ /* const char * fcn_name = "check_flush_cache__single_entry_test"; */
+ static char msg[128];
+ herr_t result;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ if ( cache_ptr == NULL ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "cache_ptr NULL on entry to single entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ else if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "cache not empty at beginning of single entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ else if ( ( entry_type < 0 ) || ( entry_type >= NUMBER_OF_ENTRY_TYPES ) ||
+ ( entry_idx < 0 ) || ( entry_idx > max_indices2[entry_type] ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Bad parameters on entry to single entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+
+ if ( pass2 ) {
+
+ base_addr = entries2[entry_type];
+ entry_ptr = &(base_addr[entry_idx]);
+
+ if ( insert_flag ) {
+
+ insert_entry2(cache_ptr, entry_type, entry_idx, dirty_flag, flags);
+
+ } else {
+
+ protect_entry2(cache_ptr, entry_type, entry_idx);
+
+ unprotect_entry2(cache_ptr, entry_type, entry_idx,
+ (int)dirty_flag, flags);
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ flush_flags);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "flush with flags 0x%x failed in single entry test #%d.",
+ flush_flags, test_num);
+ failure_mssg2 = msg;
+ }
+#ifndef NDEBUG
+ /* The clear_dirty_bits() callback is only called in debug mode --
+ * thus we can only do our full test on the expected entry history
+ * when debug is enabled.
+ */
+ else if ( ( entry_ptr->deserialized != expected_deserialized ) ||
+ ( entry_ptr->cleared != expected_cleared ) ||
+ ( entry_ptr->serialized != expected_serialized ) ||
+ ( entry_ptr->destroyed != expected_destroyed ) ) {
+#else
+ /* When in procduction mode, the clear_dirty_bits() callback is
+ * not called, so entry_ptr->cleared should never be set.
+ */
+ else if ( ( entry_ptr->deserialized != expected_deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized != expected_serialized ) ||
+ ( entry_ptr->destroyed != expected_destroyed ) ) {
+#endif /* NDEBUG */
+#if 1 /* This is useful debugging code -- lets keep it for a while */
+
+ HDfprintf(stdout,
+ "desrlzd = %d(%d), clrd = %d(%d), srlzd = %d(%d), dest = %d(%d)\n",
+ (int)(entry_ptr->deserialized),
+ (int)expected_deserialized,
+ (int)(entry_ptr->cleared),
+ (int)expected_cleared,
+ (int)(entry_ptr->serialized),
+ (int)expected_serialized,
+ (int)(entry_ptr->destroyed),
+ (int)expected_destroyed);
+#endif
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Unexpected entry status after flush in single entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ else if ( ( ( (flush_flags & H5C2__FLUSH_INVALIDATE_FLAG) == 0 )
+ &&
+ ( ( cache_ptr->index_len != 1 )
+ ||
+ ( cache_ptr->index_size != entry_sizes2[entry_type] )
+ )
+ )
+ ||
+ ( ( (flush_flags & H5C2__FLUSH_INVALIDATE_FLAG) != 0 )
+ &&
+ ( ( cache_ptr->index_len != 0 )
+ ||
+ ( cache_ptr->index_size != 0 )
+ )
+ )
+ ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Unexpected cache len/size after flush in single entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ }
+
+
+ /* clean up the cache to prep for the next test */
+ if ( pass2 ) {
+
+ result = H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ H5C2__FLUSH_INVALIDATE_FLAG);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Flush failed on cleanup in single entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ else if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Unexpected cache len/size after cleanup in single entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+
+ } else {
+
+ entry_ptr->deserialized = FALSE;
+ entry_ptr->cleared = FALSE;
+ entry_ptr->serialized = FALSE;
+ entry_ptr->destroyed = FALSE;
+ }
+ }
+
+ return;
+
+} /* check_flush_cache__single_entry_test() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_flush_cache__pinned_single_entry_test()
+ *
+ * Purpose: Run a pinned single entry flush cache test.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/28/06
+ *
+ * Modifications:
+ *
+ * JRM -- 5/17/06
+ * Added the pop_mark_dirty_prot and pop_mark_dirty_pinned
+ * flags and supporting code to allow us to test the
+ * H5C2_mark_pinned_or_protected_entry_dirty() call. Use the
+ * call to mark the entry dirty while the entry is protected
+ * if pop_mark_dirty_prot is TRUE, and to mark the entry
+ * dirty while it is pinned if pop_mark_dirty_pinned is TRUE.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_flush_cache__pinned_single_entry_test(H5C2_t * cache_ptr,
+ int test_num,
+ int entry_type,
+ int entry_idx,
+ hbool_t dirty_flag,
+ hbool_t mark_dirty,
+ hbool_t pop_mark_dirty_prot,
+ hbool_t pop_mark_dirty_pinned,
+ hbool_t unprotect_unpin,
+ unsigned int flags,
+ unsigned int flush_flags,
+ hbool_t expected_cleared,
+ hbool_t expected_serialized,
+ hbool_t expected_destroyed)
+{
+ /* const char *fcn_name = "check_flush_cache__pinned_single_entry_test"; */
+ static char msg[128];
+ hbool_t expected_deserialized = TRUE;
+ herr_t result;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ if ( cache_ptr == NULL ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "cache_ptr NULL on entry to pinned single entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ else if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "cache not empty at beginning of pinned single entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ else if ( ( entry_type < 0 ) || ( entry_type >= NUMBER_OF_ENTRY_TYPES ) ||
+ ( entry_idx < 0 ) || ( entry_idx > max_indices2[entry_type] ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Bad parameters on entry to pinned single entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+
+ if ( pass2 ) {
+
+ base_addr = entries2[entry_type];
+ entry_ptr = &(base_addr[entry_idx]);
+
+ protect_entry2(cache_ptr, entry_type, entry_idx);
+
+ if ( pop_mark_dirty_prot ) {
+
+ mark_pinned_or_protected_entry_dirty2(cache_ptr,
+ entry_type,
+ entry_idx);
+ }
+
+ unprotect_entry2(cache_ptr, entry_type, entry_idx,
+ (int)dirty_flag, (flags | H5C2__PIN_ENTRY_FLAG));
+
+ if ( mark_dirty ) {
+
+ mark_pinned_entry_dirty2(cache_ptr, entry_type, entry_idx,
+ FALSE, (size_t)0);
+ }
+
+ if ( pop_mark_dirty_pinned ) {
+
+ mark_pinned_or_protected_entry_dirty2(cache_ptr,
+ entry_type,
+ entry_idx);
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ flush_flags);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "flush with flags 0x%x failed in pinned single entry test #%d.",
+ flush_flags, test_num);
+ failure_mssg2 = msg;
+ }
+#ifndef NDEBUG
+ /* The clear_dirty_bits() callback is only called in debug mode --
+ * thus we can only do our full test on the expected entry history
+ * when debug is enabled.
+ */
+ else if ( ( entry_ptr->deserialized != expected_deserialized ) ||
+ ( entry_ptr->cleared != expected_cleared ) ||
+ ( entry_ptr->serialized != expected_serialized ) ||
+ ( entry_ptr->destroyed != expected_destroyed ) ) {
+#else
+ /* When in procduction mode, the clear_dirty_bits() callback is
+ * not called, so entry_ptr->cleared should never be set.
+ */
+ else if ( ( entry_ptr->deserialized != expected_deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized != expected_serialized ) ||
+ ( entry_ptr->destroyed != expected_destroyed ) ) {
+#endif
+#if 0 /* this is useful debugging code -- keep it around */
+ HDfprintf(stdout,
+ "desrlzd = %d(%d), clrd = %d(%d), srlzd = %d(%d), dest = %d(%d)\n",
+ (int)(entry_ptr->deserialized),
+ (int)expected_deserialized,
+ (int)(entry_ptr->cleared),
+ (int)expected_cleared,
+ (int)(entry_ptr->serialized),
+ (int)expected_serialized,
+ (int)(entry_ptr->destroyed),
+ (int)expected_destroyed);
+#endif
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Unexpected entry status after flush in pinned single entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ else if ( ( ( (flush_flags & H5C2__FLUSH_INVALIDATE_FLAG) == 0 )
+ &&
+ ( ( cache_ptr->index_len != 1 )
+ ||
+ ( cache_ptr->index_size != entry_sizes2[entry_type] )
+ )
+ )
+ ||
+ ( ( (flush_flags & H5C2__FLUSH_INVALIDATE_FLAG) != 0 )
+ &&
+ ( ( cache_ptr->index_len != 0 )
+ ||
+ ( cache_ptr->index_size != 0 )
+ )
+ )
+ ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Unexpected cache len/size after flush in pinned single entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ }
+
+
+ /* clean up the cache to prep for the next test */
+ if ( pass2 ) {
+
+ if ( unprotect_unpin ) {
+
+ protect_entry2(cache_ptr, entry_type, entry_idx);
+
+ unprotect_entry2(cache_ptr, entry_type, entry_idx,
+ (int)dirty_flag, H5C2__UNPIN_ENTRY_FLAG);
+
+ } else {
+
+ unpin_entry2(cache_ptr, entry_type, entry_idx);
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ H5C2__FLUSH_INVALIDATE_FLAG);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Flush failed on cleanup in pinned single entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ else if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Unexpected cache len/size after cleanup in pinned single entry test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+
+ } else {
+
+ entry_ptr->deserialized = FALSE;
+ entry_ptr->cleared = FALSE;
+ entry_ptr->serialized = FALSE;
+ entry_ptr->destroyed = FALSE;
+ }
+ }
+
+ return;
+
+} /* check_flush_cache__pinned_single_entry_test() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_get_entry_status()
+ *
+ * Purpose: Verify that H5C2_get_entry_status() behaves as expected.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/28/06
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_get_entry_status(void)
+{
+ const char * fcn_name = "check_get_entry_status";
+ static char msg[128];
+ herr_t result;
+ hbool_t in_cache;
+ hbool_t is_dirty;
+ hbool_t is_protected;
+ hbool_t is_pinned;
+ size_t entry_size;
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ TESTING("H5C2_get_entry_status() functionality");
+
+ pass2 = TRUE;
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024 * 1024),
+ (size_t)(1 * 1024 * 1024));
+
+ base_addr = entries2[0];
+ entry_ptr = &(base_addr[0]);
+ }
+
+ if ( pass2 ) {
+
+ /* entry not in cache -- only in_cache should be touched by
+ * the call. Thus, only check that boolean.
+ */
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr, &entry_size,
+ &in_cache, &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 1.");
+ failure_mssg2 = msg;
+
+ } else if ( in_cache ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 1.");
+ failure_mssg2 = msg;
+ }
+ }
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__NO_FLAGS_SET);
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr, &entry_size,
+ &in_cache, &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 2.");
+ failure_mssg2 = msg;
+
+ } else if ( !in_cache || is_dirty || is_protected || is_pinned ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 2.");
+ failure_mssg2 = msg;
+ }
+ }
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr, &entry_size,
+ &in_cache, &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 3.");
+ failure_mssg2 = msg;
+
+ } else if ( !in_cache || is_dirty || !is_protected || is_pinned ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 3.");
+ failure_mssg2 = msg;
+ }
+ }
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__PIN_ENTRY_FLAG);
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr, &entry_size,
+ &in_cache, &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 4.");
+ failure_mssg2 = msg;
+
+ } else if ( !in_cache || is_dirty || is_protected || !is_pinned ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 4.");
+ failure_mssg2 = msg;
+ }
+ }
+
+ mark_pinned_entry_dirty2(cache_ptr, 0, 0, FALSE, (size_t)0);
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr, &entry_size,
+ &in_cache, &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 5.");
+ failure_mssg2 = msg;
+
+ } else if ( !in_cache || !is_dirty || is_protected || !is_pinned ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 5.");
+ failure_mssg2 = msg;
+ }
+ }
+
+ unpin_entry2(cache_ptr, 0, 0);
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr, &entry_size,
+ &in_cache, &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 6.");
+ failure_mssg2 = msg;
+
+ } else if ( !in_cache || !is_dirty || is_protected || is_pinned ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 6.");
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_get_entry_status() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_expunge_entry()
+ *
+ * Purpose: Verify that H5C2_expunge_entry() behaves as expected.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/5/06
+ *
+ * Modifications:
+ *
+ * JRM -- 10/15/07
+ * Minor updates to conform to new cache API.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_expunge_entry(void)
+{
+ const char * fcn_name = "check_expunge_entry";
+ static char msg[128];
+ herr_t result;
+ hbool_t in_cache;
+ hbool_t is_dirty;
+ hbool_t is_protected;
+ hbool_t is_pinned;
+ size_t entry_size;
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ TESTING("H5C2_expunge_entry() functionality");
+
+ pass2 = TRUE;
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024 * 1024),
+ (size_t)(1 * 1024 * 1024));
+
+ base_addr = entries2[0];
+ entry_ptr = &(base_addr[0]);
+ }
+
+ if ( pass2 ) {
+
+ /* entry not in cache -- only in_cache should be touched by
+ * the status call. Thus, only check that boolean.
+ */
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr, &entry_size,
+ &in_cache, &is_dirty, &is_protected,
+ &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 1.");
+ failure_mssg2 = msg;
+
+ } else if ( in_cache ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 1.");
+ failure_mssg2 = msg;
+
+ } else if ( ( entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 1.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ /* protect an entry to force the cache to load it, and then unprotect
+ * it without marking it dirty.
+ */
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__NO_FLAGS_SET);
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr, &entry_size,
+ &in_cache, &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 2.");
+ failure_mssg2 = msg;
+
+ } else if ( !in_cache || is_dirty || is_protected || is_pinned ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 2.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 2.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ /* Expunge the entry and then verify that it is no longer in the cache.
+ * Also verify that the entry was loaded, cleared, and destroyed, but
+ * not flushed.
+ *
+ * JRM -- 10/15/07
+ * With the advent of the new cache API, the old clear() callback has
+ * been replaced with the new clear_dirty_bits() callback. This
+ * callback is only called if the entry is dirty to begin with.
+ * Thus, the entry will no longer be marked as cleared.
+ */
+ expunge_entry2(cache_ptr, 0, 0);
+
+ if ( pass2 ) {
+
+ /* entry shouldn't be in cache -- only in_cache should be touched
+ * by the status call. Thus, only check that boolean.
+ */
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr, &entry_size,
+ &in_cache, &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 3.");
+ failure_mssg2 = msg;
+
+ } else if ( in_cache ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 3.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( ! entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 3.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ /* now repeat the process with a different entry. On unprotect
+ * mark the entry as dirty. Verify that it is not flushed.
+ */
+
+ base_addr = entries2[0];
+ entry_ptr = &(base_addr[1]);
+
+ if ( pass2 ) {
+
+ /* entry not in cache -- only in_cache should be touched by
+ * the status call. Thus, only check that boolean.
+ */
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr, &entry_size,
+ &in_cache, &is_dirty, &is_protected,
+ &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 4.");
+ failure_mssg2 = msg;
+
+ } else if ( in_cache ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 4.");
+ failure_mssg2 = msg;
+
+ } else if ( ( entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 4.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ /* protect the entry to force the cache to load it, and then unprotect
+ * it with the dirty flag set.
+ */
+
+ protect_entry2(cache_ptr, 0, 1);
+
+ unprotect_entry2(cache_ptr, 0, 1, TRUE, H5C2__NO_FLAGS_SET);
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr, &entry_size,
+ &in_cache, &is_dirty, &is_protected,
+ &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 5.");
+ failure_mssg2 = msg;
+
+ } else if ( !in_cache || !is_dirty || is_protected || is_pinned ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 5.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 5.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ /* Expunge the entry and then verify that it is no longer in the cache.
+ * Also verify that the entry was loaded, cleared and destroyed, but not
+ * flushed.
+ */
+ expunge_entry2(cache_ptr, 0, 1);
+
+ if ( pass2 ) {
+
+ /* entry shouldn't be in cache -- only in_cache should be touched
+ * by the status call. Thus, only check that boolean.
+ */
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr, &entry_size,
+ &in_cache, &is_dirty, &is_protected,
+ &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 6.");
+ failure_mssg2 = msg;
+
+ } else if ( in_cache ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 6.");
+ failure_mssg2 = msg;
+
+#ifndef NDEBUG
+ /* The clear_dirty_bits() callback is only called in debug mode --
+ * thus we can only do our full test on the expected entry history
+ * when debug is enabled.
+ */
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( ! entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( ! entry_ptr->destroyed ) ) {
+#else
+ /* When in procduction mode, the clear_dirty_bits() callback is
+ * not called, so entry_ptr->cleared should never be set.
+ */
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( ! entry_ptr->destroyed ) ) {
+#endif /* NDEBUG */
+
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 6.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_expunge_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_multiple_read_protect()
+ *
+ * Purpose: Verify that multiple, simultaneous read protects of a
+ * single entry perform as expectd.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/1/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+
+static void
+check_multiple_read_protect(void)
+{
+ const char * fcn_name = "check_multiple_read_protect()";
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * entry_ptr;
+
+ TESTING("multiple read only protects on a single entry");
+
+ pass2 = TRUE;
+
+ /* allocate a cache. Should succeed.
+ *
+ * Then to start with, proceed as follows:
+ *
+ * Read protect an entry.
+ *
+ * Then read protect the entry again. Should succeed.
+ *
+ * Read protect yet again. Should succeed.
+ *
+ * Unprotect with no changes, and then read protect twice again.
+ * Should succeed.
+ *
+ * Now unprotect three times. Should succeed.
+ *
+ * If stats are enabled, verify that correct stats are collected at
+ * every step.
+ *
+ * Also, verify internal state of read protects at every step.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ entry_ptr = &((entries2[0])[0]);
+
+ if ( ( entry_ptr->header.is_protected ) ||
+ ( entry_ptr->header.is_read_only ) ||
+ ( entry_ptr->header.ro_ref_count != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected ro protected status 1.\n";
+ }
+ }
+
+#if H5C2_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 0 ) ||
+ ( cache_ptr->max_read_protects[0] != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected protect stats 1.\n";
+ }
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+ if ( pass2 )
+ {
+ protect_entry_ro2(cache_ptr, 0, 0);
+
+ if ( ( ! ( entry_ptr->header.is_protected ) ) ||
+ ( ! ( entry_ptr->header.is_read_only ) ) ||
+ ( entry_ptr->header.ro_ref_count != 1 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected ro protected status 2.\n";
+ }
+ }
+
+#if H5C2_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 1 ) ||
+ ( cache_ptr->max_read_protects[0] != 1 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected protect stats 2.\n";
+ }
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+ if ( pass2 )
+ {
+ protect_entry_ro2(cache_ptr, 0, 0);
+
+ if ( ( ! ( entry_ptr->header.is_protected ) ) ||
+ ( ! ( entry_ptr->header.is_read_only ) ) ||
+ ( entry_ptr->header.ro_ref_count != 2 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected ro protected status 3.\n";
+ }
+ }
+
+#if H5C2_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 2 ) ||
+ ( cache_ptr->max_read_protects[0] != 2 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected protect stats 3.\n";
+ }
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+ if ( pass2 )
+ {
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__NO_FLAGS_SET);
+
+ if ( ( ! ( entry_ptr->header.is_protected ) ) ||
+ ( ! ( entry_ptr->header.is_read_only ) ) ||
+ ( entry_ptr->header.ro_ref_count != 1 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected ro protected status 4.\n";
+ }
+ }
+
+#if H5C2_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 2 ) ||
+ ( cache_ptr->max_read_protects[0] != 2 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected protect stats 4.\n";
+ }
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+ if ( pass2 )
+ {
+ protect_entry_ro2(cache_ptr, 0, 0);
+
+ if ( ( ! ( entry_ptr->header.is_protected ) ) ||
+ ( ! ( entry_ptr->header.is_read_only ) ) ||
+ ( entry_ptr->header.ro_ref_count != 2 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected ro protected status 5.\n";
+ }
+ }
+
+#if H5C2_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 3 ) ||
+ ( cache_ptr->max_read_protects[0] != 2 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected protect stats 5.\n";
+ }
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+ if ( pass2 )
+ {
+ protect_entry_ro2(cache_ptr, 0, 0);
+
+ if ( ( ! ( entry_ptr->header.is_protected ) ) ||
+ ( ! ( entry_ptr->header.is_read_only ) ) ||
+ ( entry_ptr->header.ro_ref_count != 3 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected ro protected status 6.\n";
+ }
+ }
+
+#if H5C2_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 4 ) ||
+ ( cache_ptr->max_read_protects[0] != 3 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected protect stats 6.\n";
+ }
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+ if ( pass2 )
+ {
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__NO_FLAGS_SET);
+
+ if ( ( ! ( entry_ptr->header.is_protected ) ) ||
+ ( ! ( entry_ptr->header.is_read_only ) ) ||
+ ( entry_ptr->header.ro_ref_count != 2 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected ro protected status 7.\n";
+ }
+ }
+
+#if H5C2_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 4 ) ||
+ ( cache_ptr->max_read_protects[0] != 3 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected protect stats 7.\n";
+ }
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+ if ( pass2 )
+ {
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__NO_FLAGS_SET);
+
+ if ( ( ! ( entry_ptr->header.is_protected ) ) ||
+ ( ! ( entry_ptr->header.is_read_only ) ) ||
+ ( entry_ptr->header.ro_ref_count != 1 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected ro protected status 8.\n";
+ }
+ }
+
+#if H5C2_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 4 ) ||
+ ( cache_ptr->max_read_protects[0] != 3 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected protect stats 8.\n";
+ }
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+ if ( pass2 )
+ {
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__NO_FLAGS_SET);
+
+ if ( ( entry_ptr->header.is_protected ) ||
+ ( entry_ptr->header.is_read_only ) ||
+ ( entry_ptr->header.ro_ref_count != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected ro protected status 9.\n";
+ }
+ }
+
+#if H5C2_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 4 ) ||
+ ( cache_ptr->max_read_protects[0] != 3 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected protect stats 9.\n";
+ }
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+
+ /* If we get this far, do a write protect and unprotect to verify
+ * that the stats are getting collected properly here as well.
+ */
+
+ if ( pass2 )
+ {
+ protect_entry2(cache_ptr, 0, 0);
+
+ if ( ( ! ( entry_ptr->header.is_protected ) ) ||
+ ( entry_ptr->header.is_read_only ) ||
+ ( entry_ptr->header.ro_ref_count != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected ro protected status 10.\n";
+ }
+ }
+
+#if H5C2_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 1 ) ||
+ ( cache_ptr->read_protects[0] != 4 ) ||
+ ( cache_ptr->max_read_protects[0] != 3 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected protect stats 10.\n";
+ }
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+ if ( pass2 )
+ {
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__NO_FLAGS_SET);
+
+ if ( ( entry_ptr->header.is_protected ) ||
+ ( entry_ptr->header.is_read_only ) ||
+ ( entry_ptr->header.ro_ref_count != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected ro protected status 11.\n";
+ }
+ }
+
+#if H5C2_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 1 ) ||
+ ( cache_ptr->read_protects[0] != 4 ) ||
+ ( cache_ptr->max_read_protects[0] != 3 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected protect stats 11.\n";
+ }
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+
+ /* Finally, mix things up a little, using a mix of reads and
+ * and writes on different entries. Also include a pin to verify
+ * that it works as well.
+ *
+ * Stats are looking OK, so we will only test them one more time
+ * at the end to ensure that all is at it should be.
+ */
+
+ if ( pass2 ) {
+
+ protect_entry2(cache_ptr, 0, 2); /* (0,2) write */
+ protect_entry_ro2(cache_ptr, 0, 4); /* (0,4) read only (1) */
+ protect_entry2(cache_ptr, 0, 6); /* (0,6) write */
+
+ unprotect_entry2(cache_ptr, 0, 2, FALSE, /* (0,2) unprotect */
+ H5C2__NO_FLAGS_SET);
+
+ protect_entry_ro2(cache_ptr, 0, 2); /* (0,2) read only (1) */
+ protect_entry2(cache_ptr, 0, 1); /* (0,1) write */
+ protect_entry_ro2(cache_ptr, 0, 4); /* (0,4) read only (2) */
+ protect_entry2(cache_ptr, 0, 0); /* (0,0) write */
+ protect_entry_ro2(cache_ptr, 0, 2); /* (0,2) read only (2) */
+
+ unprotect_entry2(cache_ptr, 0, 2, FALSE, /* (0,2) read only (1) pin */
+ H5C2__PIN_ENTRY_FLAG);
+ unprotect_entry2(cache_ptr, 0, 6, FALSE, /* (0,6) unprotect */
+ H5C2__NO_FLAGS_SET);
+
+ protect_entry_ro2(cache_ptr, 0, 4); /* (0,4) read only (3) */
+
+ unprotect_entry2(cache_ptr, 0, 2, FALSE, /* (0,2) unprotect */
+ H5C2__NO_FLAGS_SET);
+ unprotect_entry2(cache_ptr, 0, 1, FALSE, /* (0,1) unprotect */
+ H5C2__NO_FLAGS_SET);
+
+ if ( pass2 ) {
+
+ entry_ptr = &((entries2[0])[4]);
+
+ if ( H5C2_pin_protected_entry(cache_ptr, (void *)entry_ptr) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_pin_protected_entry() failed.\n";
+
+ } else if ( ! (entry_ptr->header.is_pinned) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "entry (0,4) not pinned.\n";
+
+ } else {
+
+ /* keep test bed sanity checks happy */
+ entry_ptr->is_pinned = TRUE;
+
+ }
+ }
+
+ unprotect_entry2(cache_ptr, 0, 4, FALSE, /* (0,4) read only (2) */
+ H5C2__NO_FLAGS_SET);
+ unprotect_entry2(cache_ptr, 0, 4, FALSE, /* (0,4) read only (1) */
+ H5C2__UNPIN_ENTRY_FLAG);
+
+ if ( ( pass2 ) && ( entry_ptr->header.is_pinned ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "enty (0,4) still pinned.\n";
+
+ }
+
+ unprotect_entry2(cache_ptr, 0, 4, FALSE, /* (0,4) unprotect */
+ H5C2__NO_FLAGS_SET);
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, /* (0,0) unprotect */
+ H5C2__NO_FLAGS_SET);
+
+ unpin_entry2(cache_ptr, 0, 2);
+ }
+
+#if H5C2_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 5 ) ||
+ ( cache_ptr->read_protects[0] != 9 ) ||
+ ( cache_ptr->max_read_protects[0] != 3 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected protect stats 11.\n";
+ }
+#endif /* H5C2_COLLECT_CACHE_STATS */
+
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_multiple_read_protect() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_rename_entry()
+ *
+ * Purpose: Verify that H5C2_rename_entry behaves as expected. In
+ * particular, verify that it works correctly with pinned
+ * entries.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/26/06
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_rename_entry(void)
+{
+ const char * fcn_name = "check_rename_entry";
+ int i;
+ H5C2_t * cache_ptr = NULL;
+ struct rename_entry_test_spec test_specs[4] =
+ {
+ {
+ /* int entry_type = */ PICO_ENTRY_TYPE,
+ /* int entry_index = */ 10,
+ /* hbool_t is_dirty = */ FALSE,
+ /* hbool_t is_pinned = */ FALSE
+ },
+ {
+ /* int entry_type = */ PICO_ENTRY_TYPE,
+ /* int entry_index = */ 20,
+ /* hbool_t is_dirty = */ TRUE,
+ /* hbool_t is_pinned = */ FALSE
+ },
+ {
+ /* int entry_type = */ PICO_ENTRY_TYPE,
+ /* int entry_index = */ 30,
+ /* hbool_t is_dirty = */ FALSE,
+ /* hbool_t is_pinned = */ TRUE
+ },
+ {
+ /* int entry_type = */ PICO_ENTRY_TYPE,
+ /* int entry_index = */ 40,
+ /* hbool_t is_dirty = */ TRUE,
+ /* hbool_t is_pinned = */ TRUE
+ }
+ };
+
+ TESTING("H5C2_rename_entry() functionality");
+
+ pass2 = TRUE;
+
+ /* allocate a cache, load entries into it, and then rename
+ * them. To the extent possible, verify that the desired
+ * actions took place.
+ *
+ * At present, we should do the following tests:
+ *
+ * 1) Rename a clean, unprotected, unpinned entry.
+ *
+ * 2) Rename a dirty, unprotected, unpinned entry.
+ *
+ * 3) Rename a clean, unprotected, pinned entry.
+ *
+ * 4) Rename a dirty, unprotected, pinned entry.
+ *
+ * In all cases, the entry should have moved to its
+ * new location, and have been marked dirty if it wasn't
+ * already.
+ *
+ * Unpinned entries should have been moved to the head
+ * of the LRU list.
+ *
+ * Pinned entries should remain untouched on the pinned entry
+ * list.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024 * 1024),
+ (size_t)(1 * 1024 * 1024));
+ }
+
+ i = 0;
+ while ( ( pass2 ) && ( i < 4 ) )
+ {
+ check_rename_entry__run_test(cache_ptr, i, &(test_specs[i]));
+ i++;
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_rename_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_rename_entry__run_test()
+ *
+ * Purpose: Run a rename entry test.
+ *
+ * Do nothing if pass2 is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/27/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_rename_entry__run_test(H5C2_t * cache_ptr,
+ int test_num,
+ struct rename_entry_test_spec * spec_ptr)
+{
+ /* const char * fcn_name = "check_rename_entry__run_test"; */
+ static char msg[128];
+ unsigned int flags = H5C2__NO_FLAGS_SET;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr = NULL;
+ H5C2_cache_entry_t * test_ptr = NULL;
+
+ if ( cache_ptr == NULL ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "cache_ptr NULL on entry to rename test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+
+ } else if ( spec_ptr == NULL ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "spec_ptr NULL on entry to rename test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+
+ }
+
+ if ( pass2 ) {
+
+ base_addr = entries2[spec_ptr->entry_type];
+ entry_ptr = &(base_addr[spec_ptr->entry_index]);
+
+ if ( ( entry_ptr->self != entry_ptr ) ||
+ ( ( entry_ptr->cache_ptr != cache_ptr ) &&
+ ( entry_ptr->cache_ptr != NULL ) ) ||
+ ( ! ( entry_ptr->at_main_addr ) ) ||
+ ( entry_ptr->addr != entry_ptr->main_addr ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "bad entry_ptr in rename test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+
+ } else if ( spec_ptr->is_pinned ) {
+
+ flags |= H5C2__PIN_ENTRY_FLAG;
+ }
+ }
+
+ protect_entry2(cache_ptr, spec_ptr->entry_type, spec_ptr->entry_index);
+
+ unprotect_entry2(cache_ptr, spec_ptr->entry_type, spec_ptr->entry_index,
+ (int)(spec_ptr->is_dirty), flags);
+
+ rename_entry2(cache_ptr, spec_ptr->entry_type, spec_ptr->entry_index, FALSE);
+
+ if ( pass2 ) {
+
+ /* verify that the rename took place, and that the cache's internal
+ * structures are as expected. Note that some sanity checking is
+ * done by rename_entry2(), so we don't have to repeat it here.
+ */
+
+ if ( spec_ptr->is_pinned ) {
+
+ if ( ! ( entry_ptr->header.is_pinned ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Pinned entry not pinned after rename in test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+
+ if ( pass2 ) {
+
+ test_ptr = cache_ptr->pel_head_ptr;
+
+ while ( ( test_ptr != NULL ) &&
+ ( test_ptr != (H5C2_cache_entry_t *)entry_ptr ) )
+ {
+ test_ptr = test_ptr->next;
+ }
+
+ if ( test_ptr == NULL ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Pinned entry not in pel after rename in test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ }
+
+ unpin_entry2(cache_ptr, spec_ptr->entry_type,
+ spec_ptr->entry_index);
+
+ } else {
+
+ if ( entry_ptr->header.is_pinned ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Unpinned entry pinned after rename in test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+
+ if ( ( entry_ptr->header.prev != NULL ) ||
+ ( cache_ptr->LRU_head_ptr != (H5C2_cache_entry_t *)entry_ptr ) )
+ {
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Entry not at head of LRU after rename in test #%d.",
+ test_num);
+ failure_mssg2 = msg;
+ }
+ }
+ }
+
+ /* put the entry back where it started from */
+ rename_entry2(cache_ptr, spec_ptr->entry_type, spec_ptr->entry_index, TRUE);
+
+ return;
+
+} /* check_rename_entry__run_test() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_pin_protected_entry()
+ *
+ * Purpose: Verify that H5C2_pin_protected_entry behaves as expected.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/28/06
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_pin_protected_entry(void)
+{
+ const char * fcn_name = "check_pin_protected_entry";
+ static char msg[128];
+ herr_t result;
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ TESTING("H5C2_pin_protected_entry() functionality");
+
+ pass2 = TRUE;
+
+ /* Create a cache, protect an entry, and then use H5C2_pin_protected_entry()
+ * to pin it. Verify that the entry is in fact pined. Unprotect the entry
+ * to unpin it, and then destroy the cache.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024 * 1024),
+ (size_t)(1 * 1024 * 1024));
+ }
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ if ( pass2 ) {
+
+ base_addr = entries2[0];
+ entry_ptr = &(base_addr[0]);
+
+ result = H5C2_pin_protected_entry(cache_ptr, (void *)entry_ptr);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5C2_pin_protected_entry() reports failure.");
+ failure_mssg2 = msg;
+
+ } else if ( ! ( entry_ptr->header.is_pinned ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "entry not pinned when it should be.");
+ failure_mssg2 = msg;
+
+ } else {
+
+ entry_ptr->is_pinned = TRUE;
+ }
+ }
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__UNPIN_ENTRY_FLAG);
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_pin_protected_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_resize_entry()
+ *
+ * Purpose: Verify that H5C2_resize_entry() and H5C2_unprotect() resize
+ * entries as expected.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/7/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_resize_entry(void)
+{
+ const char * fcn_name = "check_resize_entry";
+ static char msg[128];
+ herr_t result;
+ hbool_t in_cache;
+ hbool_t is_dirty;
+ hbool_t is_protected;
+ hbool_t is_pinned;
+ size_t entry_size;
+ size_t reported_entry_size;
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ TESTING("entry resize functionality");
+
+ /* Setup a cache and verify that it is empty.
+ *
+ * Then force the load of an entry by protecting it, and verify that
+ * the entry and cache have the expected sizes.
+ *
+ * Then unprotect the entry with the size changed flag and a reduced
+ * size. Verify that the entry and cache have the expected expected
+ * sizes.
+ *
+ * Use a second protect/unprotect cycle to restore the entry to
+ * its original size. Verify that the entry and cache have the
+ * expected sizes.
+ *
+ * Protect and unprotect the entry again to pin it. Use
+ * H5C2_resize_entry to reduce its size. Verify that the entry
+ * and cache have the expected sizes.
+ *
+ * Use H5C2_resize_entry again to restore the entry to its original
+ * size. Verify that the entry and cache have the expected sizes.
+ *
+ * Use a protect / unprotect cycle to unpin and destroy the entry.
+ * Verify that the entry and cache have the expected sizes.
+ *
+ *
+ * Obesrve that all the above tests have been done with only one
+ * entry in the cache. Repeat the tests with several entries in
+ * the cache.
+ */
+
+ pass2 = TRUE;
+
+ /* tests with only one entry in the cache: */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024 * 1024),
+ (size_t)(1 * 1024 * 1024));
+
+ base_addr = entries2[LARGE_ENTRY_TYPE];
+ entry_ptr = &(base_addr[0]);
+ entry_size = LARGE_ENTRY_SIZE;
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ||
+ ( cache_ptr->slist_len != 0 ) ||
+ ( cache_ptr->slist_size != 0 ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 1.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 1 ) ||
+ ( cache_ptr->index_size != LARGE_ENTRY_SIZE ) ||
+ ( cache_ptr->slist_len != 0 ) ||
+ ( cache_ptr->slist_size != 0 ) ) {
+
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 2.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr,
+ &reported_entry_size, &in_cache,
+ &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 1.");
+ failure_mssg2 = msg;
+
+ } else if ( !in_cache || is_dirty || !is_protected || is_pinned ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 1.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 1.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_unprotect(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[LARGE_ENTRY_TYPE]), entry_ptr->addr,
+ (void *)entry_ptr,
+ H5C2__SIZE_CHANGED_FLAG | H5C2__DIRTIED_FLAG,
+ (LARGE_ENTRY_SIZE / 2));
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "H5C2_unprotect() reports failure 1.");
+ failure_mssg2 = msg;
+
+ } else {
+
+ /* tidy up so we play nice with the standard protect / unprotect
+ * calls.
+ */
+ entry_ptr->is_protected = FALSE;
+ entry_ptr->is_dirty = TRUE;
+ entry_ptr->size = LARGE_ENTRY_SIZE / 2;
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 1 ) ||
+ ( cache_ptr->index_size != (LARGE_ENTRY_SIZE / 2) ) ||
+ ( cache_ptr->slist_len != 1 ) ||
+ ( cache_ptr->slist_size != (LARGE_ENTRY_SIZE / 2) ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 3.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr,
+ &reported_entry_size, &in_cache,
+ &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 2.");
+ failure_mssg2 = msg;
+
+ } else if ( !in_cache || !is_dirty || is_protected || is_pinned ||
+ ( reported_entry_size != (LARGE_ENTRY_SIZE / 2) ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 2.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 2.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+
+ result = H5C2_unprotect(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[LARGE_ENTRY_TYPE]), entry_ptr->addr,
+ (void *)entry_ptr,
+ (H5C2__DIRTIED_FLAG | H5C2__SIZE_CHANGED_FLAG),
+ LARGE_ENTRY_SIZE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "H5C2_unprotect() reports failure 2.");
+ failure_mssg2 = msg;
+
+ } else {
+
+ /* tidy up so we play nice with the standard protect / unprotect
+ * calls.
+ */
+ entry_ptr->is_protected = FALSE;
+ entry_ptr->is_dirty = TRUE;
+ entry_ptr->size = LARGE_ENTRY_SIZE;
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 1 ) ||
+ ( cache_ptr->index_size != LARGE_ENTRY_SIZE ) ||
+ ( cache_ptr->slist_len != 1 ) ||
+ ( cache_ptr->slist_size != LARGE_ENTRY_SIZE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 4.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr,
+ &reported_entry_size, &in_cache,
+ &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 3.");
+ failure_mssg2 = msg;
+
+ } else if ( !in_cache || !is_dirty || is_protected || is_pinned ||
+ ( reported_entry_size != LARGE_ENTRY_SIZE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 3.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 3.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 0);
+
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 0, FALSE, H5C2__PIN_ENTRY_FLAG);
+
+ if ( pass2 ) {
+
+ result = H5C2_resize_pinned_entry(cache_ptr, (void *)entry_ptr,
+ (LARGE_ENTRY_SIZE / 4));
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5C2_resize_pinned_entry() reports failure 1.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 1 ) ||
+ ( cache_ptr->index_size != (LARGE_ENTRY_SIZE / 4) ) ||
+ ( cache_ptr->slist_len != 1 ) ||
+ ( cache_ptr->slist_size != (LARGE_ENTRY_SIZE / 4) ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 5.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr,
+ &reported_entry_size, &in_cache,
+ &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 4.");
+ failure_mssg2 = msg;
+
+ } else if ( !in_cache || !is_dirty || is_protected || ! is_pinned ||
+ ( reported_entry_size != (LARGE_ENTRY_SIZE / 4) ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 4.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 4.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_resize_pinned_entry(cache_ptr, (void *)entry_ptr,
+ LARGE_ENTRY_SIZE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5C2_resize_pinned_entry() reports failure 2.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 1 ) ||
+ ( cache_ptr->index_size != LARGE_ENTRY_SIZE ) ||
+ ( cache_ptr->slist_len != 1 ) ||
+ ( cache_ptr->slist_size != LARGE_ENTRY_SIZE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 6.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr,
+ &reported_entry_size, &in_cache,
+ &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 5.");
+ failure_mssg2 = msg;
+
+ } else if ( !in_cache || !is_dirty || is_protected || ! is_pinned ||
+ ( reported_entry_size != LARGE_ENTRY_SIZE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 5.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 5.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 0);
+
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 0, FALSE,
+ H5C2__UNPIN_ENTRY_FLAG | H5C2__DELETED_FLAG);
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr, &entry_size,
+ &in_cache, &is_dirty, &is_protected,
+ &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 6.");
+ failure_mssg2 = msg;
+
+ } else if ( in_cache ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 6.");
+ failure_mssg2 = msg;
+
+#ifndef NDEBUG
+ /* The clear_dirty_bits() callback is only called in debug mode --
+ * thus we can only do our full test on the expected entry history
+ * when debug is enabled.
+ */
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( ! entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( ! entry_ptr->destroyed ) ) {
+#else
+ /* When in procduction mode, the clear_dirty_bits() callback is
+ * not called, so entry_ptr->cleared should never be set.
+ */
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( ! entry_ptr->destroyed ) ) {
+#endif /* NDEBUG */
+
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 6.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ||
+ ( cache_ptr->slist_len != 0 ) ||
+ ( cache_ptr->slist_size != 0 ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 7.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+
+ /* now repreat the above tests with several entries in the cache: */
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ||
+ ( cache_ptr->slist_len != 0 ) ||
+ ( cache_ptr->slist_size != 0 ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 8.");
+ failure_mssg2 = msg;
+
+ }
+ base_addr = entries2[LARGE_ENTRY_TYPE];
+ entry_ptr = &(base_addr[3]);
+ entry_size = LARGE_ENTRY_SIZE;
+ }
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 0);
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 0, FALSE, H5C2__NO_FLAGS_SET);
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 1);
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 1, TRUE, H5C2__NO_FLAGS_SET);
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 2);
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 2, FALSE, H5C2__NO_FLAGS_SET);
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 3 ) ||
+ ( cache_ptr->index_size != 3 * LARGE_ENTRY_SIZE ) ||
+ ( cache_ptr->slist_len != 1 ) ||
+ ( cache_ptr->slist_size != LARGE_ENTRY_SIZE ) ) {
+
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 9.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 3);
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 4 ) ||
+ ( cache_ptr->index_size != 4 * LARGE_ENTRY_SIZE ) ||
+ ( cache_ptr->slist_len != 1 ) ||
+ ( cache_ptr->slist_size != LARGE_ENTRY_SIZE ) ) {
+
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 10.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr,
+ &reported_entry_size, &in_cache,
+ &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 7.");
+ failure_mssg2 = msg;
+
+ } else if ( !in_cache || is_dirty || !is_protected || is_pinned ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 7.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 7.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_unprotect(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[LARGE_ENTRY_TYPE]), entry_ptr->addr,
+ (void *)entry_ptr,
+ H5C2__SIZE_CHANGED_FLAG | H5C2__DIRTIED_FLAG,
+ (LARGE_ENTRY_SIZE / 2));
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "H5C2_unprotect() reports failure 3.");
+ failure_mssg2 = msg;
+
+ } else {
+
+ /* tidy up so we play nice with the standard protect / unprotect
+ * calls.
+ */
+ entry_ptr->is_protected = FALSE;
+ entry_ptr->is_dirty = TRUE;
+ entry_ptr->size = LARGE_ENTRY_SIZE / 2;
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 4 ) ||
+ ( cache_ptr->index_size !=
+ ((3 * LARGE_ENTRY_SIZE) + (LARGE_ENTRY_SIZE / 2)) ) ||
+ ( cache_ptr->slist_len != 2 ) ||
+ ( cache_ptr->slist_size !=
+ (LARGE_ENTRY_SIZE + (LARGE_ENTRY_SIZE / 2)) ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 11.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr,
+ &reported_entry_size, &in_cache,
+ &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 8.");
+ failure_mssg2 = msg;
+
+ } else if ( !in_cache || !is_dirty || is_protected || is_pinned ||
+ ( reported_entry_size != (LARGE_ENTRY_SIZE / 2) ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 8.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 8.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 3);
+
+ if ( pass2 ) {
+
+ result = H5C2_unprotect(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[LARGE_ENTRY_TYPE]), entry_ptr->addr,
+ (void *)entry_ptr,
+ (H5C2__DIRTIED_FLAG | H5C2__SIZE_CHANGED_FLAG),
+ LARGE_ENTRY_SIZE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "H5C2_unprotect() reports failure 4.");
+ failure_mssg2 = msg;
+
+ } else {
+
+ /* tidy up so we play nice with the standard protect / unprotect
+ * calls.
+ */
+ entry_ptr->is_protected = FALSE;
+ entry_ptr->is_dirty = TRUE;
+ entry_ptr->size = LARGE_ENTRY_SIZE;
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 4 ) ||
+ ( cache_ptr->index_size != 4 * LARGE_ENTRY_SIZE ) ||
+ ( cache_ptr->slist_len != 2 ) ||
+ ( cache_ptr->slist_size != 2 * LARGE_ENTRY_SIZE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 12.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr,
+ &reported_entry_size, &in_cache,
+ &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 9.");
+ failure_mssg2 = msg;
+
+ } else if ( !in_cache || !is_dirty || is_protected || is_pinned ||
+ ( reported_entry_size != LARGE_ENTRY_SIZE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 9.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 9.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 3);
+
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 3, FALSE, H5C2__PIN_ENTRY_FLAG);
+
+ if ( pass2 ) {
+
+ result = H5C2_resize_pinned_entry(cache_ptr, (void *)entry_ptr,
+ (LARGE_ENTRY_SIZE / 4));
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5C2_resize_pinned_entry() reports failure 3.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 4 ) ||
+ ( cache_ptr->index_size !=
+ ((3 * LARGE_ENTRY_SIZE) + (LARGE_ENTRY_SIZE / 4)) ) ||
+ ( cache_ptr->slist_len != 2 ) ||
+ ( cache_ptr->slist_size !=
+ (LARGE_ENTRY_SIZE + (LARGE_ENTRY_SIZE / 4)) ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 13.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr,
+ &reported_entry_size, &in_cache,
+ &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 10.");
+ failure_mssg2 = msg;
+
+ } else if ( !in_cache || !is_dirty || is_protected || ! is_pinned ||
+ ( reported_entry_size != (LARGE_ENTRY_SIZE / 4) ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 10.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 10.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_resize_pinned_entry(cache_ptr, (void *)entry_ptr,
+ LARGE_ENTRY_SIZE);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5C2_resize_pinned_entry() reports failure 4.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 4 ) ||
+ ( cache_ptr->index_size != (4 * LARGE_ENTRY_SIZE) ) ||
+ ( cache_ptr->slist_len != 2 ) ||
+ ( cache_ptr->slist_size != (2 * LARGE_ENTRY_SIZE) ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 14.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr,
+ &reported_entry_size, &in_cache,
+ &is_dirty, &is_protected, &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 11.");
+ failure_mssg2 = msg;
+
+ } else if ( !in_cache || !is_dirty || is_protected || ! is_pinned ||
+ ( reported_entry_size != LARGE_ENTRY_SIZE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 11.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 11.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 3);
+
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 3, FALSE,
+ H5C2__UNPIN_ENTRY_FLAG | H5C2__DELETED_FLAG);
+
+ if ( pass2 ) {
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr, &entry_size,
+ &in_cache, &is_dirty, &is_protected,
+ &is_pinned);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 12.");
+ failure_mssg2 = msg;
+
+ } else if ( in_cache ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 12.");
+ failure_mssg2 = msg;
+
+#ifndef NDEBUG
+ /* The clear_dirty_bits() callback is only called in debug mode --
+ * thus we can only do our full test on the expected entry history
+ * when debug is enabled.
+ */
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( ! entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( ! entry_ptr->destroyed ) ) {
+#else
+ /* When in procduction mode, the clear_dirty_bits() callback is
+ * not called, so entry_ptr->cleared should never be set.
+ */
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( ! entry_ptr->destroyed ) ) {
+#endif /* NDEBUG */
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 12.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 3 ) ||
+ ( cache_ptr->index_size != (3 * LARGE_ENTRY_SIZE) ) ||
+ ( cache_ptr->slist_len != 1 ) ||
+ ( cache_ptr->slist_size != LARGE_ENTRY_SIZE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 15.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 2);
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 2, FALSE, H5C2__DELETED_FLAG);
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 1);
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 1, FALSE, H5C2__DELETED_FLAG);
+
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 0);
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, 0, FALSE, H5C2__DELETED_FLAG);
+
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ||
+ ( cache_ptr->slist_len != 0 ) ||
+ ( cache_ptr->slist_size != 0 ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 16.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_resize_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_evictions_enabled()
+ *
+ * Purpose: Verify that H5C2_get_evictions_enabled() and
+ * H5C2_set_evictions_enabled() functions perform as expected.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 8/2/07
+ *
+ * Modifications:
+ *
+ * JRM -- 10/15/07
+ * Minor updates to adapt to cache API changes.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_evictions_enabled(void)
+{
+ const char * fcn_name = "check_evictions_enabled";
+ static char msg[128];
+ herr_t result;
+ hbool_t show_progress = FALSE;
+ hbool_t evictions_enabled;
+ hbool_t in_cache;
+ int i;
+ int mile_stone = 1;
+ size_t entry_size;
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ TESTING("evictions enabled/disabled functionality");
+
+ /* Setup a cache and verify that it is empty.
+ *
+ * Use H5C2_get_evictions_enabled() to determine if evictions are
+ * currently enabled -- they should be.
+ *
+ * Load entries until the cache is full. Load one more. Verify that
+ * this caused an entry to be evicted.
+ *
+ * Insert an entry. Verify that this cases and entry to be evicted.
+ *
+ * Used H5C2_set_evictions_enabled() to disable evictions. Verify
+ * with a call to H5C2_get_evictions_enabled().
+ *
+ * Load another entry -- verify that this does not cause an entry
+ * to be evicted.
+ *
+ * Insert an entry -- verify that this does not cause an entry to
+ * be evicted.
+ *
+ * Use H5C2_set_evictions_enabled() to re-enable evictions. Verify
+ * with a call to H5C2_get_evictions_enabled().
+ *
+ * Protect and unprotect some of the entries in the cache. Verify
+ * that there are no evictions (since we only try to make space
+ * when we either insert or load a new entry).
+ *
+ * Protect an entry not in the cache. Verify that this causes
+ * two evictions.
+ *
+ * Used H5C2_set_evictions_enabled() to disable evictions again.
+ * Verify with a call to H5C2_get_evictions_enabled().
+ *
+ * Now flush and discard the cache -- should succeed.
+ */
+
+ pass2 = TRUE;
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* create the cache */
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(1 * 1024 * 1024),
+ (size_t)( 512 * 1024));
+
+ base_addr = entries2[MONSTER_ENTRY_TYPE];
+ entry_size = MONSTER_ENTRY_SIZE;
+ }
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* verify that it is empty */
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ||
+ ( cache_ptr->slist_len != 0 ) ||
+ ( cache_ptr->slist_size != 0 ) ||
+ ( cache_ptr->evictions_enabled != TRUE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 1.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* verify that H5C2_get_evictions_enabled() returns the expected value */
+ if ( pass2 ) {
+
+ result = H5C2_get_evictions_enabled(cache_ptr, &evictions_enabled);
+
+ if ( ( result != SUCCEED ) || ( evictions_enabled != TRUE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected evictions enabled 1.");
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* fill the cache */
+ for ( i = 0; i < 16 ; i++ )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ FALSE, H5C2__NO_FLAGS_SET);
+ }
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* verify that the cache is full */
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 16 ) ||
+ ( cache_ptr->index_size != 16 * MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->slist_len != 0 ) ||
+ ( cache_ptr->slist_size != 0 ) ||
+ ( cache_ptr->evictions_enabled != TRUE ) ) {
+
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 2.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* protect and unprotect another entry */
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 16);
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 16,
+ FALSE, H5C2__NO_FLAGS_SET);
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* verify that an entry has been evicted */
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 16 ) ||
+ ( cache_ptr->index_size != 16 * MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->slist_len != 0 ) ||
+ ( cache_ptr->slist_size != 0 ) ||
+ ( cache_ptr->evictions_enabled != TRUE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 3.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ entry_ptr = &(base_addr[0]);
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr,
+ NULL, &in_cache, NULL, NULL, NULL);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 1.");
+ failure_mssg2 = msg;
+
+ } else if ( in_cache ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 1.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( ! entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 1.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* insert an entry */
+ insert_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 17, TRUE, H5C2__NO_FLAGS_SET);
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* verify that another entry has been evicted */
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 16 ) ||
+ ( cache_ptr->index_size != 16 * MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->slist_len != 1 ) ||
+ ( cache_ptr->slist_size != MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->evictions_enabled != TRUE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 4.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( show_progress ) /* 11 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ entry_ptr = &(base_addr[1]);
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr,
+ NULL, &in_cache, NULL, NULL, NULL);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 2.");
+ failure_mssg2 = msg;
+
+ } else if ( in_cache ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 2.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( ! entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 2.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( show_progress ) /* 12 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* disable evictions */
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, FALSE);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "can't disable evictions 1.");
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( show_progress ) /* 13 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* verify that evictions are disabled */
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 16 ) ||
+ ( cache_ptr->index_size != 16 * MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->slist_len != 1 ) ||
+ ( cache_ptr->slist_size != MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->evictions_enabled != FALSE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 5.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( show_progress ) /* 14 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* protect and unprotect another entry */
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 18);
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 18,
+ FALSE, H5C2__NO_FLAGS_SET);
+
+ if ( show_progress ) /* 15 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* verify that no entry has been evicted */
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 17 ) ||
+ ( cache_ptr->index_size != 17 * MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->slist_len != 1 ) ||
+ ( cache_ptr->slist_size != MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->evictions_enabled != FALSE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 6.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( show_progress ) /* 16 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* insert another entry */
+ insert_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 19, TRUE, H5C2__NO_FLAGS_SET);
+
+ if ( show_progress ) /* 17 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* verify that no entry has been evicted */
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 18 ) ||
+ ( cache_ptr->index_size != 18 * MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->slist_len != 2 ) ||
+ ( cache_ptr->slist_size != 2 * MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->evictions_enabled != FALSE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 7.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( show_progress ) /* 18 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* re-enable evictions */
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, TRUE);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "can't enable evictions 1.");
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( show_progress ) /* 19 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* protect and unprotect an entry that is in the cache */
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 19);
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 19,
+ FALSE, H5C2__NO_FLAGS_SET);
+
+ if ( show_progress ) /* 20 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* verify that no entries have been evicted */
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 18 ) ||
+ ( cache_ptr->index_size != 18 * MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->slist_len != 2 ) ||
+ ( cache_ptr->slist_size != 2 * MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->evictions_enabled != TRUE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 8.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( show_progress ) /* 21 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* protect and unprotect an entry that isn't in the cache */
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 20);
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 20,
+ FALSE, H5C2__NO_FLAGS_SET);
+
+ if ( show_progress ) /* 22 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* verify that the entries have been evicted to bring the
+ * cache back down to its normal size.
+ */
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 16 ) ||
+ ( cache_ptr->index_size != 16 * MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->slist_len != 2 ) ||
+ ( cache_ptr->slist_size != 2 * MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->evictions_enabled != TRUE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 9.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( show_progress ) /* 23 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ entry_ptr = &(base_addr[2]);
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr,
+ NULL, &in_cache, NULL, NULL, NULL);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 3.");
+ failure_mssg2 = msg;
+
+ } else if ( in_cache ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 3.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( ! entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 3.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( show_progress ) /* 24 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ entry_ptr = &(base_addr[3]);
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr,
+ NULL, &in_cache, NULL, NULL, NULL);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 4.");
+ failure_mssg2 = msg;
+
+ } else if ( in_cache ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 4.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( ! entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 4.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( show_progress ) /* 25 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* disable evictions again */
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, FALSE);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "can't disable evictions 2.");
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( show_progress ) /* 26 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* protect and unprotect an entry that isn't in the cache, forcing
+ * the cache to grow.
+ */
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 21);
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 21,
+ FALSE, H5C2__NO_FLAGS_SET);
+
+
+ if ( show_progress ) /* 27 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* verify that the cache has grown */
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 17 ) ||
+ ( cache_ptr->index_size != 17 * MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->slist_len != 2 ) ||
+ ( cache_ptr->slist_size != 2 * MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->evictions_enabled != FALSE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 10.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( show_progress ) /* 28 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* re-enable evictions again */
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, TRUE);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "can't enable evictions 2.");
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( show_progress ) /* 29 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* insert an entry */
+ insert_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 22, TRUE, H5C2__NO_FLAGS_SET);
+
+ if ( show_progress ) /* 30 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* verify that the cache has returned to its maximum size */
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->index_len != 16 ) ||
+ ( cache_ptr->index_size != 16 * MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->slist_len != 3 ) ||
+ ( cache_ptr->slist_size != 3 * MONSTER_ENTRY_SIZE ) ||
+ ( cache_ptr->evictions_enabled != TRUE ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected cache status 11.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( show_progress ) /* 31 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ entry_ptr = &(base_addr[4]);
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->addr,
+ NULL, &in_cache, NULL, NULL, NULL);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5AC_get_entry_status() reports failure 5.");
+ failure_mssg2 = msg;
+
+ } else if ( in_cache ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected status 5.");
+ failure_mssg2 = msg;
+
+ } else if ( ( ! entry_ptr->deserialized ) ||
+ ( entry_ptr->cleared ) ||
+ ( entry_ptr->serialized ) ||
+ ( ! entry_ptr->destroyed ) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Unexpected entry history 5.");
+ failure_mssg2 = msg;
+
+ }
+ }
+
+ if ( show_progress ) /* 32 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ /* disable evictions one last time before we shut down */
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, FALSE);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "can't disable evictions 3.");
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( show_progress ) /* 33 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( show_progress ) /* 34 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_evictions_enabled() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_flush_protected_err()
+ *
+ * Purpose: Verify that an attempt to flush the cache when it contains
+ * a protected entry will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_flush_protected_err(void)
+{
+ const char * fcn_name = "check_flush_protected_err";
+ H5C2_t * cache_ptr = NULL;
+
+ TESTING("flush cache with protected entry error");
+
+ pass2 = TRUE;
+
+ /* allocate a cache, protect an entry, and try to flush. This
+ * should fail. Unprotect the entry and flush again -- should
+ * succeed.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ if ( H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ H5C2__NO_FLAGS_SET)
+ >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "flush succeeded on cache with protected entry.\n";
+
+ } else {
+
+ unprotect_entry2(cache_ptr, 0, 0, TRUE, H5C2__NO_FLAGS_SET);
+
+ if ( H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ H5C2__NO_FLAGS_SET)
+ < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "flush failed after unprotect.\n";
+
+ } else {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+ }
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_flush_protected_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_destroy_pinned_err()
+ *
+ * Purpose: Verify that an attempt to destroy the cache when it contains
+ * a pinned entry that can't be unpined during the flush destroy
+ * will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/7/06
+ *
+ * Modifications:
+ *
+ * JRM -- 10/15/07
+ * Minor updates to accomodate cache API mods.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_destroy_pinned_err(void)
+{
+ const char * fcn_name = "check_destroy_pinned_err()";
+ H5C2_t * cache_ptr = NULL;
+
+ TESTING("destroy cache with permanently pinned entry error");
+
+ pass2 = TRUE;
+
+ /* allocate a cache, pin an entry, and try to flush destroy. This
+ * should fail. Unpin the entry and flush destroy again -- should
+ * succeed.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry2(cache_ptr, 0, 0);
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__PIN_ENTRY_FLAG);
+
+ if ( H5C2_dest(cache_ptr, H5P_DATASET_XFER_DEFAULT) >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "destroy succeeded on cache with pinned entry.\n";
+
+ } else {
+
+ unpin_entry2(cache_ptr, 0, 0);
+
+ if ( H5C2_dest(cache_ptr, H5P_DATASET_XFER_DEFAULT) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "destroy failed after unpin.\n";
+
+ }
+ }
+
+ /* call takedown_cache2() with a NULL cache_ptr parameter.
+ * This causes the function to close and delete the file,
+ * while skipping the call to H5C2_dest().
+ */
+ takedown_cache2(NULL, FALSE, FALSE);
+
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_destroy_pinned_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_destroy_protected_err()
+ *
+ * Purpose: Verify that an attempt to destroy the cache when it contains
+ * a protected entry will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_destroy_protected_err(void)
+{
+ const char * fcn_name = "check_destroy_protected_err";
+ H5C2_t * cache_ptr = NULL;
+
+ TESTING("destroy cache with protected entry error");
+
+ pass2 = TRUE;
+
+ /* allocate a cache, protect an entry, and try to flush. This
+ * should fail. Unprotect the entry and flush again -- should
+ * succeed.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ if ( H5C2_dest(cache_ptr, H5P_DATASET_XFER_DEFAULT) >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "destroy succeeded on cache with protected entry.\n";
+
+ } else {
+
+ unprotect_entry2(cache_ptr, 0, 0, TRUE, H5C2__NO_FLAGS_SET);
+
+ if ( H5C2_dest(cache_ptr, H5P_DATASET_XFER_DEFAULT) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "destroy failed after unprotect.\n";
+
+ }
+ }
+
+ /* call takedown_cache2() with a NULL cache_ptr parameter.
+ * This causes the function to close and delete the file,
+ * while skipping the call to H5C2_dest().
+ */
+ takedown_cache2(NULL, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_destroy_protected_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_duplicate_insert_err()
+ *
+ * Purpose: Verify that an attempt to insert and entry that is
+ * alread in the cache will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_duplicate_insert_err(void)
+{
+ const char * fcn_name = "check_duplicate_insert_err";
+ herr_t result;
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ TESTING("duplicate entry insertion error");
+
+ pass2 = TRUE;
+
+ /* allocate a cache, protect an entry, and then try to insert
+ * the entry again. This should fail. Unprotect the entry and
+ * destroy the cache -- should succeed.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ if ( pass2 ) {
+
+ base_addr = entries2[0];
+ entry_ptr = &(base_addr[0]);
+
+ result = H5C2_insert_entry(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[0]), entry_ptr->addr,
+ entry_ptr->size,
+ (void *)entry_ptr, H5C2__NO_FLAGS_SET);
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "insert of duplicate entry succeeded.\n";
+
+ } else {
+
+ unprotect_entry2(cache_ptr, 0, 0, TRUE, H5C2__NO_FLAGS_SET);
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+ }
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s(): failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_duplicate_insert_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_rename_err()
+ *
+ * Purpose: Verify that an attempt to rename an entry to the address
+ * of an existing entry will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_rename_err(void)
+{
+ const char * fcn_name = "check_rename_err()";
+ herr_t result;
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * entry_0_0_ptr;
+ test_entry_t * entry_0_1_ptr;
+ test_entry_t * entry_1_0_ptr;
+
+ TESTING("rename to existing entry errors");
+
+ pass2 = TRUE;
+
+ /* allocate a cache, and insert several entries. Try to rename
+ * entries to other entries resident in the cache. This should
+ * fail. Destroy the cache -- should succeed.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ insert_entry2(cache_ptr, 0, 0, TRUE, H5C2__NO_FLAGS_SET);
+ insert_entry2(cache_ptr, 0, 1, TRUE, H5C2__NO_FLAGS_SET);
+ insert_entry2(cache_ptr, 1, 0, TRUE, H5C2__NO_FLAGS_SET);
+
+ entry_0_0_ptr = &((entries2[0])[0]);
+ entry_0_1_ptr = &((entries2[0])[1]);
+ entry_1_0_ptr = &((entries2[1])[0]);
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_rename_entry(cache_ptr, &(types2[0]),
+ entry_0_0_ptr->addr, entry_0_1_ptr->addr);
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "rename to addr of same type succeeded.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_rename_entry(cache_ptr, &(types2[0]),
+ entry_0_0_ptr->addr, entry_1_0_ptr->addr);
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "rename to addr of different type succeeded.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_rename_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_double_pin_err()
+ *
+ * Purpose: Verify that an attempt to pin an entry that is already
+ * pinned will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/24/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_double_pin_err(void)
+{
+ const char * fcn_name = "check_double_pin_err()";
+ herr_t result;
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * entry_ptr;
+
+ TESTING("pin a pinned entry error");
+
+ pass2 = TRUE;
+
+ /* allocate a cache, protect an entry, unprotect it with the pin flag,
+ * protect it again, and then try to unprotect it again with the pin
+ * flag. This should fail. Unpin the entry and destroy the cache
+ * -- should succeed.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__PIN_ENTRY_FLAG);
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ entry_ptr = &((entries2[0])[0]);
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_unprotect(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[0]), entry_ptr->addr,
+ (void *)entry_ptr, H5C2__PIN_ENTRY_FLAG,
+ (size_t)0);
+
+ if ( result > 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "attempt to pin a pinned entry succeeded.\n";
+
+ } else {
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__UNPIN_ENTRY_FLAG);
+ }
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_double_pin_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_double_unpin_err()
+ *
+ * Purpose: Verify that an attempt to unpin an unpinned entry will
+ * generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/24/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_double_unpin_err(void)
+{
+ const char * fcn_name = "check_double_unpin_err()";
+ herr_t result;
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * entry_ptr;
+
+ TESTING("unpin an unpinned entry error");
+
+ pass2 = TRUE;
+
+ /* allocate a cache, protect an entry, unprotect it with the unpin flag.
+ * -- This should fail.
+ *
+ * Try again with H5C2_unpin_entry -- this should also fail.
+ *
+ * Destroy the cache -- should succeed.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ entry_ptr = &((entries2[0])[0]);
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_unprotect(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[0]), entry_ptr->addr,
+ (void *)entry_ptr, H5C2__UNPIN_ENTRY_FLAG,
+ (size_t)0);
+
+ if ( result > 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "attempt to unpin an unpinned entry succeeded 1.\n";
+
+ } else {
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__NO_FLAGS_SET);
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_unpin_entry(cache_ptr, (void *)entry_ptr);
+
+ if ( result > 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "attempt to unpin an unpinned entry succeeded 2.\n";
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_double_unpin_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_pin_entry_errs()
+ *
+ * Purpose: Verify that invalid calls to H5C2_pin_protected_entry()
+ * generate errors as expected.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/24/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_pin_entry_errs(void)
+{
+ const char * fcn_name = "check_pin_entry_errs()";
+ herr_t result;
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * entry_ptr;
+
+ TESTING("pin entry related errors");
+
+ pass2 = TRUE;
+
+ /* Allocate a cache, protect an entry, unprotect it with no flags,
+ * and then call H5C2_pin_protected_entry() to pin it -- This should fail.
+ *
+ * Protect the entry again, unprotect it with a pin flag, protect it
+ * again, and then call H5C2_pin_protected_entry() to pin it -- This
+ * should fail also.
+ *
+ * Unprotect the entry with the unpin flag.
+ *
+ * Destroy the cache -- should succeed.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__NO_FLAGS_SET);
+
+ entry_ptr = &((entries2[0])[0]);
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_pin_protected_entry(cache_ptr, (void *)entry_ptr);
+
+ if ( result > 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "attempt to pin an unprotected entry succeeded.\n";
+
+ } else {
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__PIN_ENTRY_FLAG);
+
+ protect_entry2(cache_ptr, 0, 0);
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_pin_protected_entry(cache_ptr, (void *)entry_ptr);
+
+ if ( result > 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "attempt to pin a pinned, protected entry succeeded.\n";
+
+ } else {
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__UNPIN_ENTRY_FLAG);
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_pin_entry_errs() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_double_protect_err()
+ *
+ * Purpose: Verify that an attempt to protect an entry that is already
+ * protected will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ * - Modified call to H5C2_protect() to pass H5C2__NO_FLAGS_SET in the
+ * the new flags parameter.
+ *
+ * JRM -- 3/28/07
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_double_protect_err(void)
+{
+ const char * fcn_name = "check_double_protect_err()";
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * entry_ptr;
+ H5C2_cache_entry_t * cache_entry_ptr;
+
+ TESTING("protect a protected entry error");
+
+ pass2 = TRUE;
+
+ /* allocate a cache, protect an entry, and then try to protect
+ * the entry again. This should fail. Unprotect the entry and
+ * destroy the cache -- should succeed.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ entry_ptr = &((entries2[0])[0]);
+ }
+
+ if ( pass2 ) {
+
+ cache_entry_ptr = H5C2_protect(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[0]), entry_ptr->addr,
+ entry_ptr->size, NULL,
+ H5C2__NO_FLAGS_SET);
+
+ if ( cache_entry_ptr != NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "attempt to protect a protected entry succeeded.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__NO_FLAGS_SET);
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_double_protect_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_double_unprotect_err()
+ *
+ * Purpose: Verify that an attempt to unprotect an entry that is already
+ * unprotected will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ * JRM -- 6/17/05
+ * Modified function to use the new dirtied parameter in
+ * H5C2_unprotect().
+ *
+ * JRM -- 9/8/05
+ * Updated function for the new size change parameter in
+ * H5C2_unprotect(). We don't use them for now.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_double_unprotect_err(void)
+{
+ const char * fcn_name = "check_double_unprotect_err()";
+ herr_t result;
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * entry_ptr;
+
+ TESTING("unprotect an unprotected entry error");
+
+ pass2 = TRUE;
+
+ /* allocate a cache, protect an entry, unprotect it, and then try to
+ * unprotect the entry again. This should fail. Destroy the cache
+ * -- should succeed.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__NO_FLAGS_SET);
+
+ entry_ptr = &((entries2[0])[0]);
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_unprotect(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[0]), entry_ptr->addr,
+ (void *)entry_ptr, H5C2__NO_FLAGS_SET,
+ (size_t)0);
+
+ if ( result > 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "attempt to unprotect an unprotected entry succeeded 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_double_unprotect_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_mark_entry_dirty_errs()
+ *
+ * Purpose: Verify that:
+ *
+ * 1) a call to H5C2_mark_pinned_entry_dirty with an upinned
+ * entry as the target will generate an error.
+ *
+ * 2) a call to H5C2_mark_pinned_entry_dirty with a protected
+ * entry as the target will generate an error.
+ *
+ * 3) a call to H5C2_mark_pinned_or_protected_entry_dirty with
+ * and unpinned and unprotected entry will generate an
+ * error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 5/17/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_mark_entry_dirty_errs(void)
+{
+ const char * fcn_name = "check_mark_entry_dirty_errs()";
+ herr_t result;
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * entry_ptr;
+
+ TESTING("mark entry dirty related errors");
+
+ pass2 = TRUE;
+
+ /* allocate a cache, protect an entry, and then attempt to mark it dirty
+ * with the H5C2_mark_pinned_entry_dirty() call -- This should fail.
+ *
+ * Then unprotect the entry without pinning it, and try to mark it dirty
+ * again -- this should fail too.
+ *
+ * Try it again using H5C2_mark_pinned_or_protected_entry_dirty -- this
+ * should fail as well.
+ *
+ * Destroy the cache -- should succeed.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__PIN_ENTRY_FLAG);
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ entry_ptr = &((entries2[0])[0]);
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_mark_pinned_entry_dirty(cache_ptr, (void *)entry_ptr,
+ FALSE, (size_t)0);
+
+ if ( result > 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "attempt dirty a pinned and protected entry succeeded.\n";
+
+ } else {
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__UNPIN_ENTRY_FLAG);
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_mark_pinned_entry_dirty(cache_ptr, (void *)entry_ptr,
+ FALSE, (size_t)0);
+
+
+ if ( result > 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "attempt to dirty a unpinned and unprotected entry succeeded 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_mark_pinned_or_protected_entry_dirty(cache_ptr,
+ (void *)entry_ptr);
+
+
+ if ( result > 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "attempt to dirty a unpinned and unprotected entry succeeded 2.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_mark_entry_dirty_errs() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_expunge_entry_errs()
+ *
+ * Purpose: Verify that invalid calls to H5C2_expunge_entry()
+ * generate errors as expected.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/6/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_expunge_entry_errs(void)
+{
+ const char * fcn_name = "check_expunge_entry_errs()";
+ herr_t result;
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * entry_ptr;
+
+ TESTING("expunge entry related errors");
+
+ pass2 = TRUE;
+
+ /* Allocate a cache, protect an entry, and then call H5C2_expunge_entry()
+ * to expunge it -- this should fail
+ *
+ * Unprotect the the entry with the pinned flag, and then call
+ * H5C2_expunge_entry() again. This should fail too.
+ *
+ * Finally, unpin the entry and call H5C2_expunge_entry() yet again.
+ * This should succeed.
+ *
+ * Destroy the cache -- should succeed.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ entry_ptr = &((entries2[0])[0]);
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_expunge_entry(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[0]), entry_ptr->addr);
+
+ if ( result > 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "attempt to expunge a protected entry succeeded.\n";
+
+ } else {
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__PIN_ENTRY_FLAG);
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_expunge_entry(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[0]), entry_ptr->addr);
+
+ if ( result > 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "attempt to expunge a pinned entry succeeded.\n";
+
+ } else {
+
+ unpin_entry2(cache_ptr, 0, 0);
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_expunge_entry(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[0]), entry_ptr->addr);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "attempt to expunge an unpinned and unprotected entry failed.\n";
+
+ }
+ }
+
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_expunge_entry_errs() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_resize_entry_errs()
+ *
+ * Purpose: Verify that invalid calls to H5C2_resize_pinned_entry()
+ * generates errors as expected.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/7/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_resize_entry_errs(void)
+{
+ const char * fcn_name = "check_resize_entry_errs()";
+ herr_t result;
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * entry_ptr;
+
+ TESTING("resize entry related errors");
+
+ pass2 = TRUE;
+
+ /* Allocate a cache, protect an entry, and then call
+ * H5C2_resize_pinned_entry() to resize it -- this should fail.
+ *
+ * Unprotect the the entry with the pinned flag, and then call
+ * H5C2_resize_pinned_entry() again with new size of zero.
+ * This should fail too.
+ *
+ * Finally, unpin the entry and destroy the cache.
+ * This should succeed.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ entry_ptr = &((entries2[0])[0]);
+
+ protect_entry2(cache_ptr, 0, 0);
+
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_resize_pinned_entry(cache_ptr, (void *)entry_ptr, (size_t)1);
+
+ if ( result > 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "Call to H5C2_resize_pinned_entry on a protected entry succeeded.\n";
+
+ } else {
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__PIN_ENTRY_FLAG);
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_resize_pinned_entry(cache_ptr, (void *)entry_ptr,
+ (size_t)0);
+
+ if ( result > 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "Call to H5C2_resize_pinned_entry with 0 new size succeeded.\n";
+
+ } else {
+
+ unpin_entry2(cache_ptr, 0, 0);
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_resize_entry_errs() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_unprotect_ro_dirty_err()
+ *
+ * Purpose: If an entry is protected read only, verify that unprotecting
+ * it dirty will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/3/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_unprotect_ro_dirty_err(void)
+{
+ const char * fcn_name = "check_unprotect_ro_dirty_err()";
+ herr_t result;
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * entry_ptr;
+
+ TESTING("unprotect a read only entry dirty error");
+
+ pass2 = TRUE;
+
+ /* allocate a cache, protect an entry read only, and then unprotect it
+ * with the dirtied flag set. This should fail. Destroy the cache
+ * -- should succeed.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry_ro2(cache_ptr, 0, 0);
+
+ entry_ptr = &((entries2[0])[0]);
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_unprotect(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[0]), entry_ptr->addr,
+ (void *)entry_ptr, H5C2__DIRTIED_FLAG,
+ (size_t)0);
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "attempt to unprotect a ro entry dirty succeeded 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__NO_FLAGS_SET);
+
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ /* allocate a another cache, protect an entry read only twice, and
+ * then unprotect it with the dirtied flag set. This should fail.
+ * Unprotect it with no flags set twice and then destroy the cache.
+ * This should succeed.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry_ro2(cache_ptr, 0, 0);
+ protect_entry_ro2(cache_ptr, 0, 0);
+
+ entry_ptr = &((entries2[0])[0]);
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_unprotect(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[0]), entry_ptr->addr,
+ (void *)entry_ptr, H5C2__DIRTIED_FLAG,
+ (size_t)0);
+
+ if ( result > 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "attempt to unprotect a ro entry dirty succeeded 2.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__NO_FLAGS_SET);
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__NO_FLAGS_SET);
+
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_unprotect_ro_dirty_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_protect_ro_rw_err()
+ *
+ * Purpose: If an entry is protected read only, verify that protecting
+ * it rw will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/9/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_protect_ro_rw_err(void)
+{
+ const char * fcn_name = "check_protect_ro_rw_err()";
+ H5C2_t * cache_ptr = NULL;
+ test_entry_t * entry_ptr;
+ void * thing_ptr = NULL;
+
+ TESTING("protect a read only entry rw error");
+
+ pass2 = TRUE;
+
+ /* allocate a cache, protect an entry read only, and then try to protect
+ * it again rw. This should fail.
+ *
+ * Unprotect the entry and destroy the cache -- should succeed.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry_ro2(cache_ptr, 0, 0);
+
+ entry_ptr = &((entries2[0])[0]);
+ }
+
+ if ( pass2 ) {
+
+ thing_ptr = H5C2_protect(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[0]), entry_ptr->addr,
+ entry_ptr->size, NULL, H5C2__NO_FLAGS_SET);
+
+ if ( thing_ptr != NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "attempt to protect a ro entry rw succeeded.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ unprotect_entry2(cache_ptr, 0, 0, FALSE, H5C2__NO_FLAGS_SET);
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_protect_ro_rw_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_evictions_enabled_err()
+ *
+ * Purpose: Verify that H5C2_get_evictions_enabled() and
+ * H5C2_set_evictions_enabled() generate errors as expected.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 8/3/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_check_evictions_enabled_err(void)
+{
+ const char * fcn_name = "check_evictions_enabled_err()";
+ herr_t result;
+ hbool_t evictions_enabled;
+ H5C2_t * cache_ptr = NULL;
+
+ TESTING("get/set evictions enabled errors");
+
+ pass2 = TRUE;
+
+ /* allocate a cache.
+ *
+ * Call H5C2_get_evictions_enabled(), passing it a NULL cache_ptr,
+ * should fail.
+ *
+ * Repeat with a NULL evictions_enabled_ptr, should fail as well.
+ *
+ * Configure the cache to use auto cache resize. Call
+ * H5C2_set_evictions_enabled() to disable evictions. Should fail.
+ *
+ * Unprotect the entry and destroy the cache -- should succeed.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_evictions_enabled(NULL, &evictions_enabled);
+
+ if ( result == SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_evictions_enabled succeeded() 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_evictions_enabled(cache_ptr, NULL);
+
+ if ( result == SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_evictions_enabled succeeded() 2.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_set_evictions_enabled(cache_ptr, TRUE);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_evictions_enabled failed().\n";
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ (cache_ptr->resize_ctl).incr_mode = H5C2_incr__threshold;
+
+ result = H5C2_get_evictions_enabled(cache_ptr, FALSE);
+
+ if ( result == SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_evictions_enabled succeeded() 1.\n";
+
+ } else if ( cache_ptr->evictions_enabled == TRUE ) {
+
+ }
+
+ (cache_ptr->resize_ctl).incr_mode = H5C2_incr__off;
+ }
+
+ if ( pass2 ) {
+
+ (cache_ptr->resize_ctl).decr_mode = H5C2_decr__threshold;
+
+ result = H5C2_get_evictions_enabled(cache_ptr, FALSE);
+
+ if ( result == SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_evictions_enabled succeeded() 2.\n";
+ }
+
+ (cache_ptr->resize_ctl).decr_mode = H5C2_decr__off;
+ }
+
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_evictions_enabled_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_auto_cache_resize()
+ *
+ * Purpose: Exercise the automatic cache resizing functionality.
+ * The objective is to operate the auto-resize code in
+ * all possible modes. Unfortunately, there are quite
+ * a few of them.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 10/29/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+hbool_t rpt_fcn_called = FALSE;
+enum H5C2_resize_status rpt_status;
+
+static void test_rpt_fcn(UNUSED H5C2_t * cache_ptr,
+ UNUSED int32_t version,
+ UNUSED double hit_rate,
+ UNUSED enum H5C2_resize_status status,
+ UNUSED size_t old_max_cache_size,
+ UNUSED size_t new_max_cache_size,
+ UNUSED size_t old_min_clean_size,
+ UNUSED size_t new_min_clean_size)
+{
+ rpt_fcn_called = TRUE;
+ rpt_status = status;
+}
+
+static void
+check_auto_cache_resize(void)
+{
+ const char * fcn_name = "check_auto_cache_resize()";
+ hbool_t show_progress = FALSE;
+ herr_t result;
+ int32_t i;
+ int32_t checkpoint = 0;
+ H5C2_t * cache_ptr = NULL;
+ H5C2_auto_size_ctl_t auto_size_ctl =
+ {
+ /* int32_t version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* H5C2_auto_resize_report_fcn rpt_fcn = */ test_rpt_fcn,
+
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (512 * 1024),
+
+ /* double min_clean_fraction = */ 0.5,
+
+ /* size_t max_size = */ (14 * 1024 * 1024),
+ /* size_t min_size = */ (512 * 1024),
+
+ /* int64_t epoch_length = */ 1000,
+
+
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+
+ /* double lower_hr_threshold = */ 0.75,
+
+ /* double increment = */ 2.0,
+
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+
+
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__threshold,
+
+ /* double upper_hr_threshold = */ 0.995,
+
+ /* double decrement = */ 0.1,
+
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+
+ /* int32_t epochs_before_eviction = */ 3,
+
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.05
+ };
+
+ TESTING("automatic cache resizing");
+
+ pass2 = TRUE;
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* allocate a cache, enable automatic cache resizing, and then force
+ * the cache through all its operational modes. Verify that all
+ * performs as expected.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (512 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (256 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after initialization.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate with cache not full -- should result in not
+ * full status.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, PICO_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, PICO_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != not_full ) ||
+ ( cache_ptr->max_cache_size != (512 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (256 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 1.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate with cache full -- should result in increase
+ * of cache size from .5 to 1 meg.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != increase ) ||
+ ( cache_ptr->max_cache_size != (1 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (512 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 2.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate with cache not full -- should result in not
+ * full status.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, PICO_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, PICO_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != not_full ) ||
+ ( cache_ptr->max_cache_size != (1 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (512 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 3.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate with cache full again -- should result in increase
+ * of cache size from 1 to 2 meg.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != increase ) ||
+ ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 4.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate with cache full again -- should result in increase
+ * of cache size from 2 to 4 meg.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != increase ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 5.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate with cache full again -- should result in increase
+ * of cache size from 4 to 8 meg.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != increase ) ||
+ ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 6.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate with cache full again -- should result in increase
+ * of cache size from 8 to 12 meg. Note that max increase reduced the
+ * size of the increase.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != increase ) ||
+ ( cache_ptr->max_cache_size != (12 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (6 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 7.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate with cache full again -- should result in increase
+ * of cache size from 12 to 14 meg.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != increase ) ||
+ ( cache_ptr->max_cache_size != (14 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (7 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 8.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate with cache full and at maximum size -- should
+ * in no change in size and a result of at_max_size.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_max_size ) ||
+ ( cache_ptr->max_cache_size != (14 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (7 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 9.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate with cache full and at maximum size -- should
+ * result in a decrease from 14 to 13 Meg -- note that max decrease
+ * reduced the size of the reduction
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (13 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (6 * 1024 * 1024 + 512 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 10.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* the current cache configuration is unconvenient for testing cache
+ * size reduction, so lets change it some something easier to work
+ * with.
+ */
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 4 * 1000 * 1000 + 10;
+
+ auto_size_ctl.min_clean_fraction = 0.1;
+
+ auto_size_ctl.max_size = 8 * 1000 * 1000;
+ auto_size_ctl.min_size = 500 * 1000;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (4 * 1000 * 1000);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = TRUE;
+ auto_size_ctl.max_decrement = (1 * 1000 * 1000);
+
+ auto_size_ctl.epochs_before_eviction = 3;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 2.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (4 * 1000 * 1000 + 10) ) ||
+ ( cache_ptr->min_clean_size != (400 * 1000 + 1) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 1.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate -- should result in a decrease from ~4 to ~3
+ * M -- note that max decrease reduces the size of the reduction
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (3 * 1000 * 1000 + 10) ) ||
+ ( cache_ptr->min_clean_size != (300 * 1000 + 1) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 11.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate again -- should result in a decrease from ~3
+ * to ~2 M -- again note that max decrease reduces the size of the
+ * reduction.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (2 * 1000 * 1000 + 10) ) ||
+ ( cache_ptr->min_clean_size != (200 * 1000 + 1) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 12.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate again -- should result in a decrease from ~2
+ * to ~1 M -- again note that max decrease reduces the size of the
+ * reduction, but only by five bites.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (1 * 1000 * 1000 + 10) ) ||
+ ( cache_ptr->min_clean_size != (100 * 1000 + 1) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 13.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate again -- should result in a decrease from ~1
+ * to ~0.5 M -- max decrease is no longer a factor. New size is five
+ * bytes above the minimum.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (500 * 1000 + 5) ) ||
+ ( cache_ptr->min_clean_size != (50 * 1000) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 14.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate again -- should result in a decrease of five
+ * bytes to the minimum cache size.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (500 * 1000) ) ||
+ ( cache_ptr->min_clean_size != (50 * 1000) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 15.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate again -- Already at minimum size so no change in
+ * cache size and result should be at_min_size.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_min_size ) ||
+ ( cache_ptr->max_cache_size != (500 * 1000) ) ||
+ ( cache_ptr->min_clean_size != (50 * 1000) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 16.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force in range hit rate -- should be no change in cache size,
+ * and result should be in_spec.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 900 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i + 1000);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i + 1000,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (500 * 1000) ) ||
+ ( cache_ptr->min_clean_size != (50 * 1000) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 17.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate with cache full -- should
+ * increase cache size from .5 to 1 M.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != increase ) ||
+ ( cache_ptr->max_cache_size != (1 * 1000 * 1000) ) ||
+ ( cache_ptr->min_clean_size != (100 * 1000) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 18.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate -- should result in a decrease to the
+ * minimum cache size.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (500 * 1000) ) ||
+ ( cache_ptr->min_clean_size != (50 * 1000) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 19.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /******************************************************************
+ * now do some tests with the maximum increase and decrease sizes
+ * disabled.
+ ******************************************************************/
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 16 * 1024 * 1024;
+ auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 4.0;
+
+ auto_size_ctl.apply_max_increment = FALSE;
+ auto_size_ctl.max_increment = (4 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.25;
+
+ auto_size_ctl.apply_max_decrement = FALSE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 3;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 3.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 2.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate -- should result in a decrease to the
+ * minimum cache size.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (1 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (512 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 20.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate with cache full -- should increase cache size
+ * from 1 to 4 Meg.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != increase ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 21.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate again with cache full -- should increase cache
+ * size from 4 to 16 Meg.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != increase ) ||
+ ( cache_ptr->max_cache_size != (16 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != ( 8 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 22.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate -- should result in a decrease cache size from
+ * 16 to 4 Meg.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 23.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /******************************************************************
+ * We have tested the threshold increment and decrement modes.
+ * must now test the ageout decrement mode.
+ *
+ * Reconfigure the cache for this testing.
+ ******************************************************************/
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 8 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 8 * 1024 * 1024;
+ auto_size_ctl.min_size = 512 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (4 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__age_out;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = FALSE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 3;
+
+ auto_size_ctl.apply_empty_reserve = FALSE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 4.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 3.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+
+ /* fill the cache with 1024 byte entries -- nothing should happen
+ * for three epochs while the markers are inserted into the cache
+ *
+ * Note that hit rate will be zero, so the cache will attempt to
+ * increase its size. Since we are already at max size, it will
+ * not be able to.
+ */
+ if ( pass2 ) { /* first epoch */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_max_size ) ||
+ ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 24.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* second epoch */
+
+ rpt_fcn_called = FALSE;
+ i = 1000;
+ while ( ( pass2 ) && ( i < 2000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_max_size ) ||
+ ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 25.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* third epoch */
+
+ rpt_fcn_called = FALSE;
+ i = 2000;
+ while ( ( pass2 ) && ( i < 3000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_max_size ) ||
+ ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 26.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* fourth epoch -- If the hit rate were above the lower threshold,
+ * we would see cache size reduction now. However, nothing will
+ * happen until we get the hit rate above the lower threshold.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 3000;
+ while ( ( pass2 ) && ( i < 4000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_max_size ) ||
+ ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 27.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* fifth epoch -- force the hit rate to 100%. We should see cache size
+ * reduction now.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 3000;
+ while ( ( pass2 ) && ( i < 4000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (2001 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (int)(2001 * 512) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 28.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* sixth epoch -- force the hit rate to 100% again.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 3000;
+ while ( ( pass2 ) && ( i < 4000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (1001 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (int)(1001 * 512) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 29.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* seventh epoch -- force the hit rate to 100% again.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 3000;
+ while ( ( pass2 ) && ( i < 4000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (int)(1000 * 512) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 30.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* eigth epoch -- force the hit rate to 100% again -- should be steady
+ * state.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 3000;
+ while ( ( pass2 ) && ( i < 4000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (int)(1000 * 512) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 31.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "*check point %d\n", checkpoint++);
+
+ /* now just bang on one entry -- after three epochs, this should
+ * get all entries other than the one evicted, and the cache size
+ * should be decreased to the minimum.
+ */
+ if ( pass2 ) { /* ninth epoch */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (int)(1000 * 512) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 32.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* tenth epoch */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (int)(1000 * 512) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 33.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* eleventh epoch -- cache size reduction */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (512 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (256 * 1024) ) ||
+ ( cache_ptr->index_len != 2 ) ||
+ ( cache_ptr->index_size !=
+ MONSTER_ENTRY_SIZE + MEDIUM_ENTRY_SIZE ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 34.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* twelth epoch -- at minimum size so no more ageouts */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_min_size ) ||
+ ( cache_ptr->max_cache_size != (512 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (256 * 1024) ) ||
+ ( cache_ptr->index_len != 2 ) ||
+ ( cache_ptr->index_size !=
+ MONSTER_ENTRY_SIZE + MEDIUM_ENTRY_SIZE ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 35.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+
+ /* repeat the above test, but with max_decrement enabled to see
+ * if that features works as it should. Note that this will change
+ * the structure of the test a bit.
+ */
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 8 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 8 * 1024 * 1024;
+ auto_size_ctl.min_size = 512 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (4 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__age_out;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = TRUE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 3;
+
+ auto_size_ctl.apply_empty_reserve = FALSE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 5.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 4.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+
+ /* fill the cache with 1024 byte entries -- nothing should happen
+ * for three epochs while the markers are inserted into the cache
+ *
+ * Note that hit rate will be zero, so the cache will attempt to
+ * increase its size. Since we are already at max size, it will
+ * not be able to.
+ */
+ if ( pass2 ) { /* first epoch */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_max_size ) ||
+ ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 36.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* second epoch */
+
+ rpt_fcn_called = FALSE;
+ i = 1000;
+ while ( ( pass2 ) && ( i < 2000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_max_size ) ||
+ ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 37.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* third epoch */
+
+ rpt_fcn_called = FALSE;
+ i = 2000;
+ while ( ( pass2 ) && ( i < 3000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_max_size ) ||
+ ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 38.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* fourth epoch -- If the hit rate were above the lower threshold,
+ * we would see cache size reduction now. However, nothing will
+ * happen until we get the hit rate above the lower threshold.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 3000;
+ while ( ( pass2 ) && ( i < 4000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_max_size ) ||
+ ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 39.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* fifth epoch -- force the hit rate to 100%. We should see cache size
+ * reduction now.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 3000;
+ while ( ( pass2 ) && ( i < 4000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (7 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (7 * 512 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 40.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* sixth epoch -- force the hit rate to 100% again.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 2000;
+ while ( ( pass2 ) && ( i < 3000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (6 * 512 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 41.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* seventh epoch -- keep hit rate at 100%, and keep 2K entries active.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 3000;
+ while ( ( pass2 ) && ( i < 4000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (5 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (5 * 512 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 42.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* eigth epoch -- still 100% hit rate
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 2000;
+ while ( ( pass2 ) && ( i < 3000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 512 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 43.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* ninth epoch --hit rate at 100%.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 3000;
+ while ( ( pass2 ) && ( i < 4000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (3 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 512 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 44.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* tenth epoch -- still 100% hit rate
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 2000;
+ while ( ( pass2 ) && ( i < 3000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 512 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 45.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* eleventh epoch -- hit rate at 100% -- starting to stableize
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 3000;
+ while ( ( pass2 ) && ( i < 4000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (2000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (int)(2000 * 512) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 46.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* twelth epoch -- force the hit rate to 100% again -- should be steady
+ * state.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 2000;
+ while ( ( pass2 ) && ( i < 3000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (2000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (int)(2000 * 512) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 47.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* now just bang on one entry -- after three epochs, this should
+ * get all entries other than the one evicted, and the cache size
+ * should be decreased to the minimum.
+ */
+ if ( pass2 ) { /* thirteenth epoch */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (2000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (int)(2000 * 512) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 48.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* fourteenth epoch */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size !=
+ (1001 * 1024 + MONSTER_ENTRY_SIZE) ) ||
+ ( cache_ptr->min_clean_size !=
+ (1001 * 512 + MONSTER_ENTRY_SIZE / 2) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 49.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* fifteenth epoch -- cache size reduction */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (512 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (256 * 1024) ) ||
+ ( cache_ptr->index_len != 2 ) ||
+ ( cache_ptr->index_size !=
+ MONSTER_ENTRY_SIZE + MEDIUM_ENTRY_SIZE ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 50.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* sixteenth epoch -- at minimum size so no more ageouts */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_min_size ) ||
+ ( cache_ptr->max_cache_size != (512 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (256 * 1024) ) ||
+ ( cache_ptr->index_len != 2 ) ||
+ ( cache_ptr->index_size !=
+ MONSTER_ENTRY_SIZE + MEDIUM_ENTRY_SIZE ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 51.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+
+ /* repeat the test yet again, this time with empty reserve enabled.
+ * Again, some structural changes in the test are necessary.
+ */
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 8 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 8 * 1024 * 1024;
+ auto_size_ctl.min_size = 512 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (4 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__age_out;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = FALSE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 3;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.5; /* for ease of testing */
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 6.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 5.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+
+ /* fill the cache with 1024 byte entries -- nothing should happen
+ * for three epochs while the markers are inserted into the cache
+ *
+ * Note that hit rate will be zero, so the cache will attempt to
+ * increase its size. Since we are already at max size, it will
+ * not be able to.
+ */
+ if ( pass2 ) { /* first epoch */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_max_size ) ||
+ ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 52.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* second epoch */
+
+ rpt_fcn_called = FALSE;
+ i = 1000;
+ while ( ( pass2 ) && ( i < 2000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_max_size ) ||
+ ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 53.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* third epoch */
+
+ rpt_fcn_called = FALSE;
+ i = 2000;
+ while ( ( pass2 ) && ( i < 3000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_max_size ) ||
+ ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 54.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* fourth epoch -- If the hit rate were above the lower threshold,
+ * we would see cache size reduction now. However, nothing will
+ * happen until we get the hit rate above the lower threshold.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 3000;
+ while ( ( pass2 ) && ( i < 4000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_max_size ) ||
+ ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 55.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* fifth epoch -- force the hit rate to 100%. We should see cache size
+ * reduction now.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 3000;
+ while ( ( pass2 ) && ( i < 4000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (4002 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (int)(4002 * 512) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 56.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* sixth epoch -- force the hit rate to 100% again.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 3000;
+ while ( ( pass2 ) && ( i < 4000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (2002 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (int)(2002 * 512) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 57.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* seventh epoch -- force the hit rate to 100% again.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 3000;
+ while ( ( pass2 ) && ( i < 4000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (2000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (int)(2000 * 512) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 58.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* eigth epoch -- force the hit rate to 100% again -- should be steady
+ * state.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 3000;
+ while ( ( pass2 ) && ( i < 4000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (2000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (int)(2000 * 512) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 59.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* now just bang on one entry -- after three epochs, this should
+ * get all entries other than the one evicted, and the cache size
+ * should be decreased to the minimum.
+ */
+ if ( pass2 ) { /* ninth epoch */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (2000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (int)(2000 * 512) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 60.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* tenth epoch */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (2000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2000 * 512) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 61.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* eleventh epoch -- cache size reduction */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (512 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (256 * 1024) ) ||
+ ( cache_ptr->index_len != 2 ) ||
+ ( cache_ptr->index_size !=
+ MONSTER_ENTRY_SIZE + MEDIUM_ENTRY_SIZE ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 62.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* twelth epoch -- at minimum size so no more ageouts */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_min_size ) ||
+ ( cache_ptr->max_cache_size != (512 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (256 * 1024) ) ||
+ ( cache_ptr->index_len != 2 ) ||
+ ( cache_ptr->index_size !=
+ MONSTER_ENTRY_SIZE + MEDIUM_ENTRY_SIZE ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 63.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+
+ /* Repeat the test again, this time using the age out with threshold
+ * mode. To simplify the testing, set epochs to eviction to 1.
+ *
+ * Again, there are some minor structural changes in the test.
+ */
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 8 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 8 * 1024 * 1024;
+ auto_size_ctl.min_size = 512 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__off;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (4 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__age_out_with_threshold;
+
+ auto_size_ctl.upper_hr_threshold = 0.999; /* for ease of testing */
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = FALSE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 1; /* for ease of testing */
+
+ auto_size_ctl.apply_empty_reserve = FALSE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 7.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 6.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+
+ /* fill the cache with 4K byte entries -- increment mode is off,
+ * so cache size reduction should kick in as soon as we get the
+ * hit rate above .999.
+ */
+ if ( pass2 ) { /* first epoch -- hit rate 0 */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 64.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* second epoch -- hit rate 0 */
+
+ rpt_fcn_called = FALSE;
+ i = 1000;
+ while ( ( pass2 ) && ( i < 2000 ) )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 65.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* third epoch -- hit rate 1.0 -- should see decrease */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (1001 * LARGE_ENTRY_SIZE) ) ||
+ ( cache_ptr->min_clean_size != (1001 * LARGE_ENTRY_SIZE / 2) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 66.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* fourth epoch -- load up the cache again -- hit rate 0 */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (1001 * LARGE_ENTRY_SIZE) ) ||
+ ( cache_ptr->min_clean_size != (1001 * LARGE_ENTRY_SIZE / 2) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 67.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* fifth epoch -- still loading up the cache -- hit rate 0 */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 1000;
+ while ( ( pass2 ) && ( i < 2000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (1001 * LARGE_ENTRY_SIZE) ) ||
+ ( cache_ptr->min_clean_size != (1001 * LARGE_ENTRY_SIZE / 2) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 68.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* sixth epoch -- force hit rate to .998 -- should be no reduction */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 1002;
+ while ( ( pass2 ) && ( i < 2002 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (1001 * LARGE_ENTRY_SIZE) ) ||
+ ( cache_ptr->min_clean_size != (1001 * LARGE_ENTRY_SIZE / 2) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 69.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* seventh epoch -- force hit rate to .999 -- should see reduction
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 1003;
+ while ( ( pass2 ) && ( i < 2003 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (1000 * MEDIUM_ENTRY_SIZE) ) ||
+ ( cache_ptr->min_clean_size != (1000 * MEDIUM_ENTRY_SIZE / 2) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 70.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+
+ /* We have now tested all the major ageout modes individually.
+ * Lets try them all together to look for unexpected interactions
+ * and/or bugs.
+ */
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 8 * 1000 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 8 * 1000 * 1024;
+ auto_size_ctl.min_size = 512 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (4 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__age_out_with_threshold;
+
+ auto_size_ctl.upper_hr_threshold = 0.999; /* for ease of testing */
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = TRUE;
+ auto_size_ctl.max_decrement = (1 * 1000 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 1; /* for ease of testing */
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.5; /* for ease of testing */
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 8.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 7.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* fill the cache with 4K byte entries -- increment mode is threshold,
+ * so the decrease code will not be executed until the hit rate exceeds
+ * .75.
+ */
+ if ( pass2 ) { /* first epoch -- hit rate 0 */
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_max_size ) ||
+ ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 71.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { /* second epoch -- hit rate 0 */
+
+ rpt_fcn_called = FALSE;
+ i = 1000;
+ while ( ( pass2 ) && ( i < 2000 ) )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_max_size ) ||
+ ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 72.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* third epoch -- force the hit rate to 1.0. Should be no change
+ * in the cache size due to the combination of the empty reserve
+ * and the max decrease. Max decrease will limit the evictions
+ * in any one epoch, and the empty reserve will not permit cache
+ * size reduction unless the specified empty reserve is maintained.
+ *
+ * In this epoch, all we should see is a reduction in the index size.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) ||
+ ( cache_ptr->index_size != (7 * 1000 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 73.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* fourth epoch -- hit rate still 1.0. Index size should decrease,
+ * but otherwise no change expected.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) ||
+ ( cache_ptr->index_size != (6 * 1000 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 74.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* fifth epoch -- hit rate still 1.0. Index size should decrease,
+ * but otherwise no change expected.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) ||
+ ( cache_ptr->index_size != (5 * 1000 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 75.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* sixth epoch -- hit rate still 1.0. Index size should decrease,
+ * but otherwise no change expected. Note that the cache size is
+ * now just on the edge of meeting the clean reserve.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) ||
+ ( cache_ptr->index_size != (4 * 1000 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 76.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* seventh epoch -- hit rate still 1.0. No change in index size expected.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, LARGE_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) ||
+ ( cache_ptr->index_size != (4 * 1000 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 77.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* eighth epoch -- start loading 1 KB entries. Hit rate 0 so
+ * decrease code shouldn't be called.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != at_max_size ) ||
+ ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) ||
+ ( cache_ptr->index_size != (5 * 1000 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 78.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* ninth epoch -- access the 1 KB entries again, driving the hit rate
+ * to 1.0. Decrease code should be triggered, but the max decrease
+ * should prevent the empty reserve from being met in this epoch.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) ||
+ ( cache_ptr->index_size != (4 * 1000 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 79.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* tenth epoch -- access the 1 KB entries yet again, forcing hit rate
+ * to 1.0. Decrease code should be triggered, and the empty reserve
+ * should finally be met.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (7 * 1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (7 * 1000 * 1024 / 2) ) ||
+ ( cache_ptr->index_size != (3 * 1000 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 80.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* eleventh epoch -- access the 1 KB entries yet again, forcing hit rate
+ * to 1.0. Decrease code should be triggered, and the empty reserve
+ * should be met again.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (6 * 1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 1000 * 1024) ) ||
+ ( cache_ptr->index_size != (2 * 1000 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 81.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* twelth epoch -- hit rate 1.0 -- decrease as before.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (5 * 1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (5 * 1000 * 1024 / 2) ) ||
+ ( cache_ptr->index_size != (1 * 1000 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 82.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* thirteenth epoch -- hit rate 1.0 -- decrease as before.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (4 * 1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1000 * 1024) ) ||
+ ( cache_ptr->index_size != (1 * 1000 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 83.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* fourteenth epoch -- hit rate 1.0 -- decrease as before.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (3 * 1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 1000 * 1024 / 2) ) ||
+ ( cache_ptr->index_size != (1 * 1000 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 84.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* fifteenth epoch -- hit rate 1.0 -- decrease as before.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (2 * 1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1000 * 1024) ) ||
+ ( cache_ptr->index_size != (1 * 1000 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 85.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* sixteenth epoch -- hit rate 1.0 -- should be stable now
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (2 * 1000 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1000 * 1024) ) ||
+ ( cache_ptr->index_size != (1 * 1000 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 86.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_auto_cache_resize() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_auto_cache_resize_disable()
+ *
+ * Purpose: Test the various ways in which the resize code can
+ * be disabled. Unfortunately, there are quite a few of them.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 12/16/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_auto_cache_resize_disable(void)
+{
+ const char * fcn_name = "check_auto_cache_resize_disable()";
+ hbool_t show_progress = FALSE;
+ herr_t result;
+ int32_t i;
+ int32_t checkpoint = 0;
+ H5C2_t * cache_ptr = NULL;
+ H5C2_auto_size_ctl_t auto_size_ctl =
+ {
+ /* int32_t version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* H5C2_auto_resize_report_fcn rpt_fcn = */ test_rpt_fcn,
+
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (512 * 1024),
+
+ /* double min_clean_fraction = */ 0.5,
+
+ /* size_t max_size = */ (14 * 1024 * 1024),
+ /* size_t min_size = */ (512 * 1024),
+
+ /* int64_t epoch_length = */ 1000,
+
+
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+
+ /* double lower_hr_threshold = */ 0.75,
+
+ /* double increment = */ 2.0,
+
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+
+
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__threshold,
+
+ /* double upper_hr_threshold = */ 0.995,
+
+ /* double decrement = */ 0.1,
+
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+
+ /* int32_t epochs_before_eviction = */ 3,
+
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.05
+ };
+
+ TESTING("automatic cache resize disable");
+
+ pass2 = TRUE;
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* allocate a cache, enable automatic cache resizing, and then force
+ * the cache through all its operational modes. Verify that all
+ * performs as expected.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (512 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (256 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after initialization.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+
+ /******************************************************************
+ * So far, we have forced the auto cache resize through all modes
+ * other than increase_disabled and decrease_disabled. Force these
+ * modes now. Note that there are several ways we can reach these
+ * modes.
+ ******************************************************************/
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 16 * 1024 * 1024;
+ auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 1.0; /* disable size increases */
+
+ auto_size_ctl.apply_max_increment = FALSE;
+ auto_size_ctl.max_increment = (4 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = FALSE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 3;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 2.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 1.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate with cache full -- increase disabled so should
+ * be no change in cache size, and result should be increase_disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( rpt_status != increase_disabled ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 1.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate -- make sure that we haven't disabled decreases.
+ * should result in a decrease cache size from 4 to 2 Meg.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 2.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate again -- increase disabled so should
+ * be no change in cache size, and result should be increase_disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( rpt_status != increase_disabled ) ||
+ ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 3.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* Repeat the above tests, disabling increase through the lower
+ * threshold instead of the increment.
+ */
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 16 * 1024 * 1024;
+ auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.0; /* disable size increases */
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = FALSE;
+ auto_size_ctl.max_increment = (4 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = FALSE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 3;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 3.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 2.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate with cache full -- increase disabled so should
+ * be no change in cache size, and result should be in_spec.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 4.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate -- make sure that we haven't disabled decreases.
+ * should result in a decrease cache size from 4 to 2 Meg.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 5.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate again -- increase disabled so should
+ * be no change in cache size, and result should be increase_disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 6.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* Repeat the above tests yet again, disabling increase through the
+ * incr_mode.
+ */
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 16 * 1024 * 1024;
+ auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__off;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = FALSE;
+ auto_size_ctl.max_increment = (4 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = FALSE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 3;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 4.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 3.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate with cache full -- increase disabled so should
+ * be no change in cache size, and result should be in_spec.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 7.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate -- make sure that we haven't disabled decreases.
+ * should result in a decrease cache size from 4 to 2 Meg.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 8.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate again -- increase disabled so should
+ * be no change in cache size, and result should be increase_disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 9.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* Now, disable size decreases, and repeat the above tests.
+ */
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 16 * 1024 * 1024;
+ auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 1.0; /* disable size decreases */
+
+ auto_size_ctl.apply_max_decrement = TRUE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 3;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 5.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 4.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate -- should be no change in cache size,
+ * and result should be decrease_disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease_disabled ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 10.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate -- cache size should increase from 4 to 6 Meg.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != increase ) ||
+ ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 11.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate again -- should be no change in cache size,
+ * and result should be decrease_disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease_disabled ) ||
+ ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 12.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* Repeat the above tests, disabling decrease through the upper
+ * threshold instead of the decrement.
+ */
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 16 * 1024 * 1024;
+ auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ auto_size_ctl.upper_hr_threshold = 1.0; /* disable size decreases */
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = TRUE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 3;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 6.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 5.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate -- should be no change in cache size,
+ * and result should be in_spec.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 13.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate -- cache size should increase from 4 to 6 Meg.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != increase ) ||
+ ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 14.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate again -- should be no change in cache size,
+ * and result should be in_spec.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 15.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* Repeat the above tests, disabling decrease through the decr_mode.
+ */
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 16 * 1024 * 1024;
+ auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__off;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = TRUE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 3;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 7.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 6.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate -- should be no change in cache size,
+ * and result should be in_spec.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 16.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate -- cache size should increase from 4 to 6 Meg.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != increase ) ||
+ ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 17.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate again -- should be no change in cache size,
+ * and result should be in_spec.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 18.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* Now do tests disabling size decrement in age out mode.
+ *
+ * Start by disabling size decrement by setting max_decrement to zero.
+ */
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 16 * 1024 * 1024;
+ auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__age_out;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = TRUE;
+ auto_size_ctl.max_decrement = 0; /* disable decrement */
+
+ auto_size_ctl.epochs_before_eviction = 1;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 8.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 7.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* flush the cache and destroy all entries so we start from a known point */
+ flush_cache2(cache_ptr, TRUE, FALSE, FALSE);
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* load up the cache with small entries. Note that it will take an
+ * epoch for the ageout code to initialize itself if it is enabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( rpt_status != not_full ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 19.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* Load up some more small entries.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 1000;
+ while ( ( pass2 ) && ( i < 2000 ) )
+ {
+ protect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( rpt_status != not_full ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 20.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* Now force a high hit rate so that the size increase code is
+ * is satisfied. We would see a decrease here if decrease were
+ * possible.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( rpt_status != decrease_disabled ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 21.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate -- cache size should increase from 4 to 6 Meg.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != increase ) ||
+ ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 22.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* just bang on a single entry. This will see to it that there are
+ * many entries that could be aged out were decreases enabled.
+ * Should be no change in cache size, and result should be
+ * decrease_disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( rpt_status != decrease_disabled ) ||
+ ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 23.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* Now disable size decrement in age out mode via the empty reserve.
+ */
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 16 * 1024 * 1024;
+ auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__age_out;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = TRUE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 1;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 1.0; /* disable decrement */
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 9.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 8.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* flush the cache and destroy all entries so we start from a known point */
+ flush_cache2(cache_ptr, TRUE, FALSE, FALSE);
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* load up the cache with small entries. Note that it will take an
+ * epoch for the ageout code to initialize itself if it is enabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( rpt_status != not_full ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 24.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* Load up some more small entries.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 1000;
+ while ( ( pass2 ) && ( i < 2000 ) )
+ {
+ protect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( rpt_status != not_full ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 25.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* Now force a high hit rate so that the size increase code is
+ * is satisfied. We would see a decrease here if decrease were
+ * possible.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( rpt_status != decrease_disabled ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 26.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate -- cache size should increase from 4 to 6 Meg.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != increase ) ||
+ ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 27.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* just bang on a single entry. This will see to it that there are
+ * many entries that could be aged out were decreases enabled.
+ * Should be no change in cache size, and result should be
+ * decrease_disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( rpt_status != decrease_disabled ) ||
+ ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 28.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* Now work with age out with threshold. One can argue that we should
+ * repeat the above age out tests with age out with threshold, but the
+ * same code is executed in both cases so I don't see the point. If
+ * that ever changes, this test should be updated.
+ *
+ * There is only one way of disabling decrements that is peculiar
+ * to age out with threshold, which is to set the upper threshold
+ * to 1.0. Test this now.
+ */
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 16 * 1024 * 1024;
+ auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__age_out_with_threshold;
+
+ auto_size_ctl.upper_hr_threshold = 1.0;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = TRUE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 1;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 10.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 9.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* flush the cache and destroy all entries so we start from a known point */
+ flush_cache2(cache_ptr, TRUE, FALSE, FALSE);
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* load up the cache with small entries. Note that it will take an
+ * epoch for the ageout code to initialize itself if it is enabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( rpt_status != not_full ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 29.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* Load up some more small entries.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 1000;
+ while ( ( pass2 ) && ( i < 2000 ) )
+ {
+ protect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( rpt_status != not_full ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 30.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* Now force a high hit rate so that the size increase code is
+ * is satisfied. We would see a decrease here if decrease were
+ * possible, but the upper threshold cannot be met, so no decrease.
+ *
+ * rpt_status should be decrease_disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( rpt_status != decrease_disabled ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ||
+ ( cache_ptr->index_len != 2000 ) ||
+ ( cache_ptr->index_size != 2000 * SMALL_ENTRY_SIZE ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 31.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate -- cache size should increase from 4 to 6 Meg.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != increase ) ||
+ ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 32.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* just bang on a single entry. This keeps the hit rate high, and sees
+ * to it that there are many entries that could be aged out were
+ * decreases enabled.
+ *
+ * Should be no change in cache size, and result should be
+ * decrease_disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 999);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 999,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( rpt_status != decrease_disabled ) ||
+ ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 33.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+
+ /*********************************************************************
+ * Finally, use the auto cache resize code to set the size of the
+ * cache and keep it there. Again, due to the complexity of the
+ * interface, there are lots of ways of doing this. We have to
+ * check them all.
+ *********************************************************************/
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 2 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 16 * 1024 * 1024;
+ auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.0; /* disable size increases */
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ auto_size_ctl.upper_hr_threshold = 1.0; /* disable size decreases */
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = TRUE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 3;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 11.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 10.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate -- should be no response as the auto-resize
+ * code should be disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( rpt_fcn_called ) ||
+ ( cache_ptr->resize_enabled ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 34.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate -- should be no response as the auto-resize
+ * code should be disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( rpt_fcn_called ) ||
+ ( cache_ptr->resize_enabled ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 35.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.25;
+
+ auto_size_ctl.max_size = 16 * 1024 * 1024;
+ auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 1.0; /* disable size increment */
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 1.0; /* disable size decrement */
+
+ auto_size_ctl.apply_max_decrement = TRUE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 3;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 12.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 11.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate -- should be no response as the auto-resize
+ * code should be disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( rpt_fcn_called ) ||
+ ( cache_ptr->resize_enabled ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 36.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate -- should be no response as the auto-resize
+ * code should be disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( rpt_fcn_called ) ||
+ ( cache_ptr->resize_enabled ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 37.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = FALSE;
+ auto_size_ctl.initial_size = 2 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 6 * 1024 * 1024; /* no resize */
+ auto_size_ctl.min_size = 6 * 1024 * 1024; /* no resize */
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = TRUE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 3;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 13.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 12.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate -- should be no response as the auto-resize
+ * code should be disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( rpt_fcn_called ) ||
+ ( cache_ptr->resize_enabled ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 38.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate -- should be no response as the auto-resize
+ * code should be disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( rpt_fcn_called ) ||
+ ( cache_ptr->resize_enabled ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 39.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.25;
+
+ auto_size_ctl.max_size = 16 * 1024 * 1024;
+ auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 1.0; /* disable size increment */
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ auto_size_ctl.upper_hr_threshold = 1.0; /* disable size decrement */
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = TRUE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 3;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 14.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 13.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate -- should be no response as the auto-resize
+ * code should be disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( rpt_fcn_called ) ||
+ ( cache_ptr->resize_enabled ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 40.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate -- should be no response as the auto-resize
+ * code should be disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( rpt_fcn_called ) ||
+ ( cache_ptr->resize_enabled ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 41.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 16 * 1024 * 1024;
+ auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ auto_size_ctl.lower_hr_threshold = 0.0; /* disable size increment */
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 1.0; /* disable size decrement */
+
+ auto_size_ctl.apply_max_decrement = TRUE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 3;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 15.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 14.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate -- should be no response as the auto-resize
+ * code should be disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( rpt_fcn_called ) ||
+ ( cache_ptr->resize_enabled ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 42.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate -- should be no response as the auto-resize
+ * code should be disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( rpt_fcn_called ) ||
+ ( cache_ptr->resize_enabled ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 43.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 16 * 1024 * 1024;
+ auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__off;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__off;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = TRUE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 3;
+
+ auto_size_ctl.apply_empty_reserve = TRUE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 16.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 15.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force low hit rate -- should be no response as the auto-resize
+ * code should be disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( rpt_fcn_called ) ||
+ ( cache_ptr->resize_enabled ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 44.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* force high hit rate -- should be no response as the auto-resize
+ * code should be disabled.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( rpt_fcn_called ) ||
+ ( cache_ptr->resize_enabled ) ||
+ ( cache_ptr->size_increase_possible ) ||
+ ( cache_ptr->size_decrease_possible ) ||
+ ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 45.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_auto_cache_resize_disable() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_auto_cache_resize_epoch_markers()
+ *
+ * Purpose: Verify that the auto-resize code manages epoch markers
+ * correctly.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 12/16/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_auto_cache_resize_epoch_markers(void)
+{
+ const char * fcn_name = "check_auto_cache_resize_epoch_markers()";
+ hbool_t show_progress = FALSE;
+ herr_t result;
+ int32_t i;
+ int32_t j;
+ int32_t checkpoint = 0;
+ H5C2_t * cache_ptr = NULL;
+ H5C2_auto_size_ctl_t auto_size_ctl =
+ {
+ /* int32_t version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* H5C2_auto_resize_report_fcn rpt_fcn = */ test_rpt_fcn,
+
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (512 * 1024),
+
+ /* double min_clean_fraction = */ 0.5,
+
+ /* size_t max_size = */ (14 * 1024 * 1024),
+ /* size_t min_size = */ (512 * 1024),
+
+ /* int64_t epoch_length = */ 1000,
+
+
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+
+ /* double lower_hr_threshold = */ 0.75,
+
+ /* double increment = */ 2.0,
+
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+
+
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__threshold,
+
+ /* double upper_hr_threshold = */ 0.995,
+
+ /* double decrement = */ 0.1,
+
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+
+ /* int32_t epochs_before_eviction = */ 3,
+
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.05
+ };
+
+ TESTING("automatic cache resize epoch marker management");
+
+ pass2 = TRUE;
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (512 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (256 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after initialization.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+
+ /* Now make sure that we are managing the epoch markers correctly.
+ */
+
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 8 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 8 * 1024 * 1024;
+ auto_size_ctl.min_size = 512 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__off;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (4 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__age_out;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = FALSE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 10;
+
+ auto_size_ctl.apply_empty_reserve = FALSE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 2.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 1.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* Since we just created the cache, there should be no epoch markers
+ * active. Verify that this is true.
+ */
+
+ if ( pass2 ) {
+
+ if ( cache_ptr->epoch_markers_active != 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected # of epoch markers 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MEDIUM_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ||
+ ( cache_ptr->index_size != (1 * 1000 * MEDIUM_ENTRY_SIZE) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 0.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+
+ if ( pass2 ) {
+
+ j = 2;
+ while ( ( pass2 ) && ( j <= 10 ) )
+ {
+
+ rpt_fcn_called = FALSE;
+ i = (j - 2) * 1000;
+ while ( ( pass2 ) && ( i < (j - 1) * 1000 ) )
+ {
+ protect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->epoch_markers_active != j ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected # of epoch markers 2.\n";
+ }
+
+ j++;
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* we now have a full complement of epoch markers -- see if
+ * we get the expected reduction.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 9000;
+ while ( ( pass2 ) && ( i < 10000 ) )
+ {
+ protect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size !=
+ (10 * 1000 * SMALL_ENTRY_SIZE + MEDIUM_ENTRY_SIZE) ) ||
+ ( cache_ptr->min_clean_size !=
+ ((10 * 1000 * SMALL_ENTRY_SIZE + MEDIUM_ENTRY_SIZE) / 2) ) ||
+ ( cache_ptr->index_size !=
+ (10 * 1000 * SMALL_ENTRY_SIZE + MEDIUM_ENTRY_SIZE) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 1.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* now reduce the epochs before eviction, and see if the cache
+ * deletes the extra markers
+ */
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 8 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 8 * 1024 * 1024;
+ auto_size_ctl.min_size = 512 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__off;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (4 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__age_out;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = FALSE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 1;
+
+ auto_size_ctl.apply_empty_reserve = FALSE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 3.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 2.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* There should be exactly one active epoch marker at present.
+ */
+ if ( pass2 ) {
+
+ if ( cache_ptr->epoch_markers_active != 1 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected # of epoch markers 3.\n";
+ }
+ }
+
+ /* Now do an epochs worth of accesses, and verify that everything
+ * not accessed in this epoch gets evicted, and the cache size
+ * is reduced.
+ */
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 9000;
+ while ( ( pass2 ) && ( i < 10000 ) )
+ {
+ protect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != decrease ) ||
+ ( cache_ptr->max_cache_size != (512 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (256 * 1024) ) ||
+ ( cache_ptr->index_size != (1 * 1000 * SMALL_ENTRY_SIZE) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 2.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* There should be exactly one active epoch marker at present...
+ */
+ if ( pass2 ) {
+
+ if ( cache_ptr->epoch_markers_active != 1 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected # of epoch markers 4.\n";
+ }
+ }
+
+ /* shift the decrement mode to threshold, and verify that we remove
+ * all epoch markers.
+ */
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 8 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 8 * 1024 * 1024;
+ auto_size_ctl.min_size = 512 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__off;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (4 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = FALSE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 1;
+
+ auto_size_ctl.apply_empty_reserve = FALSE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 4.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after set resize re-config 3.\n";
+ }
+ }
+
+ /* ... and now there should be none.
+ */
+ if ( pass2 ) {
+
+ if ( cache_ptr->epoch_markers_active != 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected # of epoch markers 5.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* shift the decrement mode to age out with threshold. Set epochs
+ * before eviction to 10 again.
+ */
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 8 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 8 * 1024 * 1024;
+ auto_size_ctl.min_size = 512 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__off;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (4 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__age_out_with_threshold;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = FALSE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 10;
+
+ auto_size_ctl.apply_empty_reserve = FALSE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 5.\n";
+ }
+ }
+
+ /* Verify that there are no active epoch markers.
+ */
+ if ( pass2 ) {
+
+ if ( cache_ptr->epoch_markers_active != 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected # of epoch markers 6.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* build up a full set of epoch markers. */
+ if ( pass2 ) {
+
+ j = 1;
+ while ( ( pass2 ) && ( j <= 10 ) )
+ {
+
+ rpt_fcn_called = FALSE;
+ i = (j - 1) * 1000;
+ while ( ( pass2 ) && ( i < j * 1000 ) )
+ {
+ protect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, SMALL_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+
+ if ( ( ! rpt_fcn_called ) ||
+ ( rpt_status != in_spec ) ||
+ ( cache_ptr->epoch_markers_active != j ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected # of epoch markers 7.\n";
+ }
+
+ j++;
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* Verify that there are now 10 active epoch markers.
+ */
+ if ( pass2 ) {
+
+ if ( cache_ptr->epoch_markers_active != 10 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected # of epoch markers 8.\n";
+ }
+ }
+
+ /* shift the decrement mode to off. This should cause all epoch
+ * markers to be removed.
+ */
+ if ( pass2 ) {
+
+ auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ auto_size_ctl.rpt_fcn = test_rpt_fcn;
+
+ auto_size_ctl.set_initial_size = TRUE;
+ auto_size_ctl.initial_size = 8 * 1024 * 1024;
+
+ auto_size_ctl.min_clean_fraction = 0.5;
+
+ auto_size_ctl.max_size = 8 * 1024 * 1024;
+ auto_size_ctl.min_size = 512 * 1024;
+
+ auto_size_ctl.epoch_length = 1000;
+
+
+ auto_size_ctl.incr_mode = H5C2_incr__off;
+
+ auto_size_ctl.lower_hr_threshold = 0.75;
+
+ auto_size_ctl.increment = 2.0;
+
+ auto_size_ctl.apply_max_increment = TRUE;
+ auto_size_ctl.max_increment = (4 * 1024 * 1024);
+
+
+ auto_size_ctl.decr_mode = H5C2_decr__off;
+
+ auto_size_ctl.upper_hr_threshold = 0.995;
+
+ auto_size_ctl.decrement = 0.5;
+
+ auto_size_ctl.apply_max_decrement = FALSE;
+ auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ auto_size_ctl.epochs_before_eviction = 10;
+
+ auto_size_ctl.apply_empty_reserve = FALSE;
+ auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 6.\n";
+ }
+ }
+
+ /* Verify that there are now no active epoch markers.
+ */
+ if ( pass2 ) {
+
+ if ( cache_ptr->epoch_markers_active != 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected # of epoch markers 9.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ /* verify that we still have the expected number of entries in the cache,
+ * and that the cache is of the expected size.
+ */
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (4 * 1024 * 1024) )||
+ ( cache_ptr->index_size != (10 * 1000 * SMALL_ENTRY_SIZE) ) ||
+ ( cache_ptr->index_len != 10000 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache size change results 3.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_auto_cache_resize_epoch_markers() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_auto_cache_resize_input_errs()
+ *
+ * Purpose: Verify that H5C2_set_cache_auto_resize_config() detects
+ * and rejects invalid input.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 10/29/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define RESIZE_CONFIGS_ARE_EQUAL(a, b, compare_init) \
+( ( (a).version == (b).version ) && \
+ ( (a).rpt_fcn == (b).rpt_fcn ) && \
+ ( ( ! compare_init ) || \
+ ( (a).set_initial_size == (b).set_initial_size ) ) && \
+ ( ( ! compare_init ) || \
+ ( (a).initial_size == (b).initial_size ) ) && \
+ ( (a).min_clean_fraction == (b).min_clean_fraction ) && \
+ ( (a).max_size == (b).max_size ) && \
+ ( (a).min_size == (b).min_size ) && \
+ ( (a).epoch_length == (b).epoch_length ) && \
+ ( (a).incr_mode == (b).incr_mode ) && \
+ ( (a).lower_hr_threshold == (b).lower_hr_threshold ) && \
+ ( (a).increment == (b).increment ) && \
+ ( (a).apply_max_increment == (b).apply_max_increment ) && \
+ ( (a).max_increment == (b).max_increment ) && \
+ ( (a).decr_mode == (b).decr_mode ) && \
+ ( (a).upper_hr_threshold == (b).upper_hr_threshold ) && \
+ ( (a).decrement == (b).decrement ) && \
+ ( (a).apply_max_decrement == (b).apply_max_decrement ) && \
+ ( (a).max_decrement == (b).max_decrement ) && \
+ ( (a).epochs_before_eviction == (b).epochs_before_eviction ) && \
+ ( (a).apply_empty_reserve == (b).apply_empty_reserve ) && \
+ ( (a).empty_reserve == (b).empty_reserve ) )
+
+static void
+check_auto_cache_resize_input_errs(void)
+{
+ const char * fcn_name = "check_auto_cache_resize_input_errs()";
+ herr_t result;
+ H5C2_t * cache_ptr = NULL;
+ H5C2_auto_size_ctl_t ref_auto_size_ctl =
+ {
+ /* int32_t version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* H5C2_auto_resize_report_fcn rpt_fcn = */ test_rpt_fcn,
+
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (512 * 1024),
+
+ /* double min_clean_fraction = */ 0.5,
+
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ (512 * 1024),
+
+ /* int64_t epoch_length = */ 1000,
+
+
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+
+ /* double lower_hr_threshold = */ 0.75,
+
+ /* double increment = */ 2.0,
+
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+
+
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__threshold,
+
+ /* double upper_hr_threshold = */ 0.995,
+
+ /* double decrement = */ 0.1,
+
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+
+ /* int32_t epochs_before_eviction = */ 3,
+
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.05
+ };
+
+ H5C2_auto_size_ctl_t invalid_auto_size_ctl;
+ H5C2_auto_size_ctl_t test_auto_size_ctl;
+
+ TESTING("automatic cache resize input errors");
+
+ pass2 = TRUE;
+
+ /* allocate a cache, and set a reference automatic cache control
+ * configuration. Then feed H5C2_set_cache_auto_resize_config()
+ * invalid input, and verify that the correct error is returned,
+ * and that the configuration is not modified.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &ref_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (512 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (256 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after initialization.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 1.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 1.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.5;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.7;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.5;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(NULL,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted NULL cache_ptr.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 2.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 2.";
+ }
+ }
+
+
+ /* check bad version rejection. */
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = -1; /* INVALID */
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.5;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.7;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.5;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad version.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 3.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 3.";
+ }
+ }
+
+
+ /* check bad initial size rejection */
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 16 * 1024 * 1024 + 1;
+ /* INVALID */
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.5;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.5;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad init size 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 4.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 4.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 1 * 1024 * 1024 - 1;
+ /* INVALID */
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.5;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.5;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad init size 2.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 5.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 5.";
+ }
+ }
+
+
+ /* test for invalid min clean fraction rejection. */
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 1.00001; /* INVALID */
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.5;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad min clean frac 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 6.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 6.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = -0.00001; /* INVALID */
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.5;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad min clean frac 2.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 7.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 7.";
+ }
+ }
+
+
+ /* test for invalid max_size and/or min_size rejection */
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.5;
+
+ invalid_auto_size_ctl.max_size = H5C2__MAX_MAX_CACHE_SIZE + 1;
+ /* INVALID */
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.5;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad max_size.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 8.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 8.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.5;
+
+ invalid_auto_size_ctl.max_size = 1 * 1024 * 1024;/* INVALID */
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024 + 1;/*PAIR */
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.5;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad size pair.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 9.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 9.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.5;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = H5C2__MIN_MAX_CACHE_SIZE - 1;
+ /* INVALID */
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.5;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad min_size.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 10.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 10.";
+ }
+ }
+
+
+ /* test for invalid epoch_length rejection */
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.1;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = H5C2__MAX_AR_EPOCH_LENGTH + 1;
+ /* INVALID */
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.9;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad epoch len 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 11.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 11.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.1;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = H5C2__MIN_AR_EPOCH_LENGTH - 1;
+ /* INVALID */
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.9;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad epoch len 2.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 12.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 12.";
+ }
+ }
+
+
+ /* test for bad incr_mode rejection */
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.1;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode =
+ (enum H5C2_cache_incr_mode) -1; /* INVALID */
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.9;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad incr_mode 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 13.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 13.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.1;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode =
+ (enum H5C2_cache_incr_mode) 2; /* INVALID */
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.9;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad incr_mode 2.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 14.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 14.";
+ }
+ }
+
+
+ /* check for bad upper and/or lower threshold rejection */
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.5;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.7;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 1.01; /* INVALID */
+
+ invalid_auto_size_ctl.decrement = 0.5;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad upper threshold.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 15.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 15.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.5;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.8; /* INVALID */
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.7; /* INVALID */
+
+ invalid_auto_size_ctl.decrement = 0.5;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad threshold pair.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 16.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 16.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.5;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = -0.0001; /* INVALID */
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.5;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad lower threshold.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 17.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 17.";
+ }
+ }
+
+
+ /* test for bad increment rejection */
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.1;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 0.99999; /* INVALID */
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.5;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad increment.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 18.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 18.";
+ }
+ }
+
+
+ /* test for bad decr_mode rejection */
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.1;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode =
+ (enum H5C2_cache_decr_mode) -1; /* INVALID */
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.9;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad decr_mode 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 19.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 19.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.1;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode =
+ (enum H5C2_cache_decr_mode) 4; /* INVALID */
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.9;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad decr_mode 2.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 20.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 20.";
+ }
+ }
+
+
+ /* check for bad decrement rejection */
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.1;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 1.000001; /* INVALID */
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad decrement 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 21.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 21.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.1;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = -0.000001; /* INVALID */
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_set_cache_auto_resize_config accepted bad decrement 2.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 22.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 22.";
+ }
+ }
+
+
+ /* check for rejection of bad epochs_before_eviction */
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.1;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__age_out;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.9;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 0; /* INVALID */
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config accepted bad epochs_before_eviction 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 23.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 23.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.1;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__age_out_with_threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.9;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction =
+ H5C2__MAX_EPOCH_MARKERS + 1; /* INVALID */
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config accepted bad epochs_before_eviction 2.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 24.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 24.";
+ }
+ }
+
+
+ /* Check for bad apply_empty_reserve rejection */
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.1;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__age_out;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.9;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction = 3;
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = -0.0000001; /* INVALID */
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config accepted bad empty_reserve 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 25.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 25.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ invalid_auto_size_ctl.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ invalid_auto_size_ctl.rpt_fcn = NULL;
+
+ invalid_auto_size_ctl.set_initial_size = TRUE;
+ invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024;
+
+ invalid_auto_size_ctl.min_clean_fraction = 0.1;
+
+ invalid_auto_size_ctl.max_size = 16 * 1024 * 1024;
+ invalid_auto_size_ctl.min_size = 1 * 1024 * 1024;
+
+ invalid_auto_size_ctl.epoch_length = 5000;
+
+
+ invalid_auto_size_ctl.incr_mode = H5C2_incr__threshold;
+
+ invalid_auto_size_ctl.lower_hr_threshold = 0.75;
+
+ invalid_auto_size_ctl.increment = 2.0;
+
+ invalid_auto_size_ctl.apply_max_increment = TRUE;
+ invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024);
+
+
+ invalid_auto_size_ctl.decr_mode = H5C2_decr__age_out_with_threshold;
+
+ invalid_auto_size_ctl.upper_hr_threshold = 0.999;
+
+ invalid_auto_size_ctl.decrement = 0.9;
+
+ invalid_auto_size_ctl.apply_max_decrement = TRUE;
+ invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024);
+
+ invalid_auto_size_ctl.epochs_before_eviction =
+ H5C2__MAX_EPOCH_MARKERS + 1; /* INVALID */
+
+ invalid_auto_size_ctl.apply_empty_reserve = TRUE;
+ invalid_auto_size_ctl.empty_reserve = 0.05;
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &invalid_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config accepted bad empty_reserve 2.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr,
+ &test_auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_auto_resize_config failed 26.";
+
+ } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \
+ ref_auto_size_ctl, FALSE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected auto resize config 26.";
+ }
+ }
+
+
+ /* finally, before we finish, try feeding
+ * H5C2_get_cache_auto_resize_config invalid data.
+ */
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(NULL, &test_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_auto_resize_config accepted NULL cache_ptr.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config((H5C2_t *)&test_auto_size_ctl,
+ &test_auto_size_ctl);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_auto_resize_config accepted bad cache_ptr.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_auto_resize_config(cache_ptr, NULL);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_auto_resize_config accepted NULL config ptr.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_auto_cache_resize_input_errs() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_auto_cache_resize_aux_fcns()
+ *
+ * Purpose: Verify that the auxilary functions associated with
+ * the automatic cache resize capability are operating
+ * correctly. These functions are:
+ *
+ * H5C2_get_cache_size()
+ * H5C2_get_cache_hit_rate()
+ * H5C2_reset_cache_hit_rate_stats()
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 11/4/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_auto_cache_resize_aux_fcns(void)
+{
+ const char * fcn_name = "check_auto_cache_resize_aux_fcns()";
+ herr_t result;
+ int32_t i;
+ H5C2_t * cache_ptr = NULL;
+ double hit_rate;
+ size_t max_size;
+ size_t min_clean_size;
+ size_t cur_size;
+ int32_t cur_num_entries;
+ H5C2_auto_size_ctl_t auto_size_ctl =
+ {
+ /* int32_t version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+#if 1
+ /* H5C2_auto_resize_report_fcn rpt_fcn = */ NULL,
+#else
+ /* H5C2_auto_resize_report_fcn rpt_fcn = */ H5C2_def_auto_resize_rpt_fcn,
+#endif
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+
+ /* double min_clean_fraction = */ 0.5,
+
+ /* size_t max_size = */ (16 * 1024 * 1025),
+ /* size_t min_size = */ (512 * 1024),
+
+ /* int64_t epoch_length = */ 50000,
+
+
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__off,
+
+ /* double lower_hr_threshold = */ 0.75,
+
+ /* double increment = */ 2.0,
+
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+
+
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__off,
+
+ /* double upper_hr_threshold = */ 0.995,
+
+ /* double decrement = */ 0.9,
+
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+
+ /* int32_t epochs_before_eviction = */ 3,
+
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.5
+ };
+
+
+ TESTING("automatic cache resize auxilary functions");
+
+ pass2 = TRUE;
+
+ /* allocate a cache, and then test the various auxilary functions.
+ */
+
+ if ( pass2 ) {
+
+ reset_entries2();
+
+ cache_ptr = setup_cache2((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_set_cache_auto_resize_config(cache_ptr,
+ &auto_size_ctl);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_set_cache_auto_resize_config failed 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( cache_ptr->max_cache_size != (1 * 1024 * 1024) ) ||
+ ( cache_ptr->min_clean_size != (512 * 1024) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "bad cache size after initialization.\n";
+ }
+ }
+
+ /* lets start with the H5C2_get_cache_hit_rate(),
+ * H5C2_reset_cache_hit_rate_stats() pair.
+ */
+
+ if ( pass2 ) {
+
+ if ( ( H5C2_get_cache_hit_rate(NULL, &hit_rate) != FAIL ) ||
+ ( H5C2_get_cache_hit_rate(cache_ptr, NULL) != FAIL ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_hit_rate accepts bad params.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_hit_rate(cache_ptr, &hit_rate);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_hit_rate failed.\n";
+
+ } else if ( hit_rate != 0.0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_hit_rate returned unexpected hit rate 1.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, PICO_ENTRY_TYPE, i);
+
+ if ( pass2 ) {
+
+ unprotect_entry2(cache_ptr, PICO_ENTRY_TYPE, i,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_hit_rate(cache_ptr, &hit_rate);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_hit_rate failed.\n";
+
+ } else if ( hit_rate != 0.0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_hit_rate returned unexpected hit rate 2.\n";
+
+ } else if ( ( cache_ptr->cache_accesses != 1000 ) ||
+ ( cache_ptr->cache_hits != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache hit rate stats.\n";
+
+ } else if ( rpt_fcn_called ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Report function called?.\n";
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, PICO_ENTRY_TYPE, 0);
+
+ if ( pass2 ) {
+
+ unprotect_entry2(cache_ptr, PICO_ENTRY_TYPE, 0,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_hit_rate(cache_ptr, &hit_rate);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_hit_rate failed.\n";
+
+ } else if ( hit_rate != 0.5 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_hit_rate returned unexpected hit rate 3.\n";
+
+ } else if ( ( cache_ptr->cache_accesses != 2000 ) ||
+ ( cache_ptr->cache_hits != 1000 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache hit rate stats.\n";
+
+ } else if ( rpt_fcn_called ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Report function called?.\n";
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_reset_cache_hit_rate_stats(NULL);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_reset_cache_hit_rate_stats accepted NULL cache_ptr.\n";
+
+ } else if ( ( cache_ptr->cache_accesses != 2000 ) ||
+ ( cache_ptr->cache_hits != 1000 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "Failed call to H5C2_reset_cache_hit_rate_stats altered stats?\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ result = H5C2_reset_cache_hit_rate_stats(cache_ptr);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_reset_cache_hit_rate_stats failed.\n";
+
+ } else if ( ( cache_ptr->cache_accesses != 0 ) ||
+ ( cache_ptr->cache_hits != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache hit rate stats.\n";
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ rpt_fcn_called = FALSE;
+ i = 0;
+ while ( ( pass2 ) && ( i < 1000 ) )
+ {
+ protect_entry2(cache_ptr, PICO_ENTRY_TYPE, i + 500);
+
+ if ( pass2 ) {
+
+ unprotect_entry2(cache_ptr, PICO_ENTRY_TYPE, i + 500,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ }
+ i++;
+ }
+ }
+
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_hit_rate(cache_ptr, &hit_rate);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_hit_rate failed.\n";
+
+ } else if ( hit_rate != 0.5 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_hit_rate returned unexpected hit rate 4.\n";
+
+ } else if ( ( cache_ptr->cache_accesses != 1000 ) ||
+ ( cache_ptr->cache_hits != 500 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected cache hit rate stats.\n";
+
+ } else if ( rpt_fcn_called ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Report function called?.\n";
+
+ }
+ }
+
+ /***************************************************
+ * So much for testing H5C2_get_cache_hit_rate() and
+ * H5C2_reset_cache_hit_rate_stats(). Now on to
+ * H5C2_get_cache_size().
+ ***************************************************/
+
+ if ( pass2 ) {
+
+ result = H5C2_get_cache_size(NULL, &max_size, &min_clean_size,
+ &cur_size, &cur_num_entries);
+
+ if ( result != FAIL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_size accepted NULL cache_ptr.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ max_size = 0;
+ min_clean_size = 0;
+ cur_size = 0;
+ cur_num_entries = 0;
+
+ result = H5C2_get_cache_size(cache_ptr, &max_size, &min_clean_size,
+ &cur_size, &cur_num_entries);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_size failed 1.\n";
+
+ } else if ( max_size != (1 * 1024 * 1024) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_size reports unexpected max_size 1.\n";
+
+ } else if ( min_clean_size != (512 * 1024) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_size reports unexpected min_clean_size 1.\n";
+
+ } else if ( cur_size != (1500 * PICO_ENTRY_SIZE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_size reports unexpected cur_size 1.\n";
+
+ } else if ( cur_num_entries != 1500 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_size reports unexpected cur_num_entries 1.\n";
+ }
+ }
+
+ /* read a larger entry so that cur_size and cur_num_entries will be
+ * different.
+ */
+ if ( pass2 ) {
+
+ protect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0);
+ }
+
+ if ( pass2 ) {
+ unprotect_entry2(cache_ptr, MONSTER_ENTRY_TYPE, 0, NO_CHANGE,
+ H5C2__NO_FLAGS_SET);
+ }
+
+ if ( pass2 ) {
+
+ max_size = 0;
+ min_clean_size = 0;
+ cur_size = 0;
+ cur_num_entries = 0;
+
+ result = H5C2_get_cache_size(cache_ptr, &max_size, &min_clean_size,
+ &cur_size, &cur_num_entries);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_size failed 2.\n";
+
+ } else if ( max_size != (1 * 1024 * 1024) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_size reports unexpected max_size 2.\n";
+
+ } else if ( min_clean_size != (512 * 1024) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_size reports unexpected min_clean_size 2.\n";
+
+ } else if ( cur_size !=
+ ((1500 * PICO_ENTRY_SIZE) + MONSTER_ENTRY_SIZE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_size reports unexpected cur_size 2.\n";
+
+ } else if ( cur_num_entries != 1501 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_size reports unexpected cur_num_entries 2.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ max_size = 0;
+ min_clean_size = 0;
+ cur_size = 0;
+ cur_num_entries = 0;
+
+ result = H5C2_get_cache_size(cache_ptr, &max_size, NULL, NULL, NULL);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_size failed 3.\n";
+
+ } else if ( max_size != (1 * 1024 * 1024) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_size reports unexpected max_size 3.\n";
+
+ } else if ( ( min_clean_size != 0 ) ||
+ ( cur_size != 0 ) ||
+ ( cur_num_entries != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Phantom returns from H5C2_get_cache_size?\n";
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ max_size = 0;
+ min_clean_size = 0;
+ cur_size = 0;
+ cur_num_entries = 0;
+
+ result = H5C2_get_cache_size(cache_ptr, NULL, &min_clean_size,
+ NULL, NULL);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_size failed 4.\n";
+
+ } else if ( min_clean_size != (512 * 1024) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_size reports unexpected min_clean_size 4.\n";
+
+ } else if ( ( max_size != 0 ) ||
+ ( cur_size != 0 ) ||
+ ( cur_num_entries != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Phantom returns from H5C2_get_cache_size?\n";
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ max_size = 0;
+ min_clean_size = 0;
+ cur_size = 0;
+ cur_num_entries = 0;
+
+ result = H5C2_get_cache_size(cache_ptr, NULL, NULL, &cur_size, NULL);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_size failed 5.\n";
+
+ } else if ( cur_size !=
+ ((1500 * PICO_ENTRY_SIZE) + MONSTER_ENTRY_SIZE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_size reports unexpected cur_size 5.\n";
+
+ } else if ( ( max_size != 0 ) ||
+ ( min_clean_size != 0 ) ||
+ ( cur_num_entries != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Phantom returns from H5C2_get_cache_size?\n";
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ max_size = 0;
+ min_clean_size = 0;
+ cur_size = 0;
+ cur_num_entries = 0;
+
+ result = H5C2_get_cache_size(cache_ptr, NULL, NULL, NULL,
+ &cur_num_entries);
+
+ if ( result != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_get_cache_size failed 6.\n";
+
+ } else if ( cur_num_entries != 1501 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5C2_get_cache_size reports unexpected cur_num_entries 2.\n";
+
+ } else if ( ( max_size != 0 ) ||
+ ( min_clean_size != 0 ) ||
+ ( cur_size != 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Phantom returns from H5C2_get_cache_size?\n";
+
+ }
+ }
+
+ if ( pass2 ) {
+
+ takedown_cache2(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 ) {
+
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+ }
+
+ return;
+
+} /* check_auto_cache_resize_aux_fcns() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Run tests on the cache code contained in H5C.c
+ *
+ * Return: Success:
+ *
+ * Failure:
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+int
+main(void)
+{
+ H5open();
+
+ skip_long_tests2 = FALSE;
+
+#ifdef NDEBUG
+ run_full_test2 = TRUE;
+#else /* NDEBUG */
+ run_full_test2 = FALSE;
+#endif /* NDEBUG */
+
+#if 0
+ run_full_test2 = TRUE;
+#endif
+
+#if 0
+ smoke_check_1();
+ smoke_check_2();
+ smoke_check_3();
+ smoke_check_4();
+ smoke_check_5();
+ smoke_check_6();
+ smoke_check_7();
+ smoke_check_8();
+ smoke_check_9();
+ smoke_check_10();
+#endif
+#if 0
+ write_permitted_check();
+#endif
+ check_insert_entry();
+ check_flush_cache();
+ check_get_entry_status();
+ check_expunge_entry();
+ check_multiple_read_protect();
+ check_rename_entry();
+ check_pin_protected_entry();
+ check_resize_entry();
+ check_evictions_enabled();
+ check_flush_protected_err();
+ check_destroy_pinned_err();
+ check_destroy_protected_err();
+ check_duplicate_insert_err();
+ check_rename_err();
+ check_double_pin_err();
+ check_double_unpin_err();
+ check_pin_entry_errs();
+ check_double_protect_err();
+ check_double_unprotect_err();
+ check_mark_entry_dirty_errs();
+ check_expunge_entry_errs();
+ check_resize_entry_errs();
+ check_unprotect_ro_dirty_err();
+ check_protect_ro_rw_err();
+ check_check_evictions_enabled_err();
+ check_auto_cache_resize();
+ check_auto_cache_resize_disable();
+ check_auto_cache_resize_epoch_markers();
+ check_auto_cache_resize_input_errs();
+ check_auto_cache_resize_aux_fcns();
+
+ return(0);
+
+} /* main() */
diff --git a/test/cache2_api.c b/test/cache2_api.c
new file mode 100644
index 0000000..31a836a
--- /dev/null
+++ b/test/cache2_api.c
@@ -0,0 +1,3791 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Programmer: John Mainzer
+ * 11/10/05
+ *
+ * This file contains tests for the API calls associated
+ * with the cache implemented in H5C2.c
+ */
+
+#include "h5test.h"
+#include "H5Iprivate.h"
+#include "H5AC2private.h"
+#include "cache2_common.h"
+
+
+/* global variable declarations: */
+
+const char *FILENAMES[] = {
+ "cache_test",
+ "cache_api_test",
+ NULL
+};
+
+/* macro definitions */
+
+#define RESIZE_CONFIGS_ARE_EQUAL(a, b, compare_init) \
+( ( (a).version == (b).version ) && \
+ ( (a).rpt_fcn == (b).rpt_fcn ) && \
+ ( ( ! compare_init ) || \
+ ( (a).set_initial_size == (b).set_initial_size ) ) && \
+ ( ( ! compare_init ) || \
+ ( (a).initial_size == (b).initial_size ) ) && \
+ ( (a).min_clean_fraction == (b).min_clean_fraction ) && \
+ ( (a).max_size == (b).max_size ) && \
+ ( (a).min_size == (b).min_size ) && \
+ ( (a).epoch_length == (b).epoch_length ) && \
+ ( (a).incr_mode == (b).incr_mode ) && \
+ ( (a).lower_hr_threshold == (b).lower_hr_threshold ) && \
+ ( (a).increment == (b).increment ) && \
+ ( (a).apply_max_increment == (b).apply_max_increment ) && \
+ ( (a).max_increment == (b).max_increment ) && \
+ ( (a).decr_mode == (b).decr_mode ) && \
+ ( (a).upper_hr_threshold == (b).upper_hr_threshold ) && \
+ ( (a).decrement == (b).decrement ) && \
+ ( (a).apply_max_decrement == (b).apply_max_decrement ) && \
+ ( (a).max_decrement == (b).max_decrement ) && \
+ ( (a).epochs_before_eviction == (b).epochs_before_eviction ) && \
+ ( (a).apply_empty_reserve == (b).apply_empty_reserve ) && \
+ ( (a).empty_reserve == (b).empty_reserve ) )
+
+
+/* private function declarations: */
+
+static void check_fapl_mdc_api_calls(void);
+
+static void validate_mdc_config(hid_t file_id,
+ H5AC2_cache_config_t * ext_config_ptr,
+ hbool_t compare_init,
+ int test_num);
+
+static void check_file_mdc_api_calls(void);
+
+static void check_and_validate_cache_hit_rate(hid_t file_id,
+ double * hit_rate_ptr,
+ hbool_t dump_data,
+ int64_t min_accesses,
+ double min_hit_rate);
+
+static void check_and_validate_cache_size(hid_t file_id,
+ size_t * max_size_ptr,
+ size_t * min_clean_size_ptr,
+ size_t * cur_size_ptr,
+ int32_t * cur_num_entries_ptr,
+ hbool_t dump_data);
+#if 0
+/* This test can't be run until we start using H5C2, so comment
+ * out declaration for now.
+ */
+static void mdc_api_call_smoke_check(void);
+#endif
+
+static void check_fapl_mdc_api_errs(void);
+
+static void check_file_mdc_api_errs(void);
+
+
+/**************************************************************************/
+/**************************************************************************/
+/********************************* tests: *********************************/
+/**************************************************************************/
+/**************************************************************************/
+
+/*-------------------------------------------------------------------------
+ * Function: check_fapl_mdc_api_calls()
+ *
+ * Purpose: Verify that the file access property list related
+ * metadata cache related API calls are functioning
+ * correctly.
+ *
+ * Since we have tested the H5C2 code elsewhere, it should
+ * be sufficient to verify that the desired configuration
+ * data is getting to the cache.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/12/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CACHE_CONFIGS_EQUAL(a, b, cmp_set_init, cmp_init_size) \
+ ( ( (a).version == (b).version ) && \
+ ( (a).rpt_fcn_enabled == (b).rpt_fcn_enabled ) && \
+ ( (a).open_trace_file == (b).open_trace_file ) && \
+ ( (a).close_trace_file == (b).close_trace_file ) && \
+ ( ( (a).open_trace_file == FALSE ) || \
+ ( strcmp((a).trace_file_name, (b).trace_file_name) == 0 ) ) && \
+ ( (a).evictions_enabled == (b).evictions_enabled ) && \
+ ( ( ! cmp_set_init ) || \
+ ( (a).set_initial_size == (b).set_initial_size ) ) && \
+ ( ( ! cmp_init_size ) || \
+ ( (a).initial_size == (b).initial_size ) ) && \
+ ( (a).min_clean_fraction == (b).min_clean_fraction ) && \
+ ( (a).max_size == (b).max_size ) && \
+ ( (a).min_size == (b).min_size ) && \
+ ( (a).epoch_length == (b).epoch_length ) && \
+ ( (a).incr_mode == (b).incr_mode ) && \
+ ( (a).lower_hr_threshold == (b).lower_hr_threshold ) && \
+ ( (a).increment == (b).increment ) && \
+ ( (a).apply_max_increment == (b).apply_max_increment ) && \
+ ( (a).max_increment == (b).max_increment ) && \
+ ( (a).decr_mode == (b).decr_mode ) && \
+ ( (a).upper_hr_threshold == (b).upper_hr_threshold ) && \
+ ( (a).decrement == (b).decrement ) && \
+ ( (a).apply_max_decrement == (b).apply_max_decrement ) && \
+ ( (a).max_decrement == (b).max_decrement ) && \
+ ( (a).epochs_before_eviction == (b).epochs_before_eviction ) && \
+ ( (a).apply_empty_reserve == (b).apply_empty_reserve ) && \
+ ( (a).empty_reserve == (b).empty_reserve ) )
+
+#define XLATE_EXT_TO_INT_MDC_CONFIG(i, e) \
+{ \
+ (i).version = H5C2__CURR_AUTO_SIZE_CTL_VER; \
+ if ( (e).rpt_fcn_enabled ) \
+ (i).rpt_fcn = H5C2_def_auto_resize_rpt_fcn; \
+ else \
+ (i).rpt_fcn = NULL; \
+ (i).set_initial_size = (e).set_initial_size; \
+ (i).initial_size = (e).initial_size; \
+ (i).min_clean_fraction = (e).min_clean_fraction; \
+ (i).max_size = (e).max_size; \
+ (i).min_size = (e).min_size; \
+ (i).epoch_length = (long int)((e).epoch_length); \
+ (i).incr_mode = (e).incr_mode; \
+ (i).lower_hr_threshold = (e).lower_hr_threshold; \
+ (i).increment = (e).increment; \
+ (i).apply_max_increment = (e).apply_max_increment; \
+ (i).max_increment = (e).max_increment; \
+ (i).decr_mode = (e).decr_mode; \
+ (i).upper_hr_threshold = (e).upper_hr_threshold; \
+ (i).decrement = (e).decrement; \
+ (i).apply_max_decrement = (e).apply_max_decrement; \
+ (i).max_decrement = (e).max_decrement; \
+ (i).epochs_before_eviction = (int)((e).epochs_before_eviction); \
+ (i).apply_empty_reserve = (e).apply_empty_reserve; \
+ (i).empty_reserve = (e).empty_reserve; \
+}
+
+static void
+check_fapl_mdc_api_calls(void)
+{
+ const char * fcn_name = "check_fapl_mdc_api_calls()";
+ char filename[512];
+ herr_t result;
+ hid_t fapl_id = -1;
+ hid_t test_fapl_id = -1;
+ hid_t file_id = -1;
+ H5F_t * file_ptr = NULL;
+ H5C2_t * cache_ptr = NULL;
+ H5AC2_cache_config_t default_config = H5AC2__DEFAULT_CACHE_CONFIG;
+ H5AC2_cache_config_t mod_config =
+ {
+ /* int version = */
+ H5AC2__CURR_CACHE_CONFIG_VERSION,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024 + 1),
+ /* double min_clean_fraction = */ 0.2,
+ /* size_t max_size = */ (16 * 1024 * 1024 + 1),
+ /* size_t min_size = */ ( 1 * 1024 * 1024 + 1),
+ /* long int epoch_length = */ 50001,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.91,
+ /* double increment = */ 2.1,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024 + 1),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out,
+ /* double upper_hr_threshold = */ 0.998,
+ /* double decrement = */ 0.91,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024 - 1),
+ /* int epochs_before_eviction = */ 4,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.05,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ };
+ H5AC2_cache_config_t scratch;
+ H5C2_auto_size_ctl_t default_auto_size_ctl;
+ H5C2_auto_size_ctl_t mod_auto_size_ctl;
+
+ TESTING("MDC/FAPL related API calls");
+
+ pass2 = TRUE;
+
+ XLATE_EXT_TO_INT_MDC_CONFIG(default_auto_size_ctl, default_config)
+ XLATE_EXT_TO_INT_MDC_CONFIG(mod_auto_size_ctl, mod_config)
+
+ /* Create a FAPL and verify that it contains the default
+ * initial mdc configuration
+ */
+
+ if ( pass2 ) {
+
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+
+ if ( fapl_id < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pcreate(H5P_FILE_ACCESS) failed.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ scratch.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+
+ result = H5Pget_mdc_config(fapl_id, (H5AC_cache_config_t *)&scratch);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pget_mdc_config() failed.\n";
+
+ } else if (!CACHE_CONFIGS_EQUAL(default_config, scratch, TRUE, TRUE)) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "retrieved config doesn't match default.";
+ }
+ }
+
+
+ /* Modify the initial mdc configuration in a FAPL, and verify that
+ * the changes can be read back
+ */
+
+ if ( pass2 ) {
+
+ result = H5Pset_mdc_config(fapl_id, (H5AC_cache_config_t *)&mod_config);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pset_mdc_config() failed.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ scratch.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+
+ result = H5Pget_mdc_config(fapl_id, (H5AC_cache_config_t *)&scratch);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pget_mdc_config() failed.\n";
+
+ } else if ( ! CACHE_CONFIGS_EQUAL(mod_config, scratch, TRUE, TRUE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "retrieved config doesn't match mod config.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( H5Pclose(fapl_id) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pclose() failed.\n";
+ }
+ }
+
+ /* Open a file using the default FAPL. Verify that the resulting
+ * metadata cache uses the default configuration as well. Get a
+ * copy of the FAPL from the file, and verify that it contains the
+ * default initial meta data cache configuration. Close and delete
+ * the file.
+ */
+
+ /* setup the file name */
+ if ( pass2 ) {
+
+ if ( h5_fixname(FILENAMES[1], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "h5_fixname() failed.\n";
+ }
+ }
+
+ /* create the file using the default FAPL */
+ if ( pass2 ) {
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+
+ if ( file_id < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fcreate() failed.\n";
+ }
+ }
+
+ /* get a pointer to the files internal data structure */
+ if ( pass2 ) {
+
+ file_ptr = (H5F_t *)H5I_object_verify(file_id, H5I_FILE);
+
+ if ( file_ptr == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Can't get file_ptr.\n";
+
+ } else {
+
+ cache_ptr = file_ptr->shared->cache2;
+ }
+ }
+#if 0 /* JRM */
+ /* since this is the test cache, must build the cache explicitly */
+ /* remove this when we start using the new cache */
+ if ( pass2 )
+ {
+ if ( H5AC2_create(file_ptr, &default_config) != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Can't construct test cache.\n";
+
+ } else {
+
+ cache_ptr = file_ptr->shared->cache2;
+ }
+ }
+#endif /* JRM */
+ /* verify that we can access the internal version of the cache config */
+ if ( pass2 ) {
+
+ if ( ( cache_ptr == NULL ) ||
+ ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ||
+ ( cache_ptr->resize_ctl.version != H5C2__CURR_AUTO_SIZE_CTL_VER ) ){
+
+ pass2 = FALSE;
+ failure_mssg2 = "Can't access cache resize_ctl.\n";
+ }
+ }
+
+ /* conpare the cache's internal configuration with the expected value */
+ if ( pass2 ) {
+
+ if ( ! RESIZE_CONFIGS_ARE_EQUAL(default_auto_size_ctl, \
+ cache_ptr->resize_ctl, TRUE) ) {
+
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected value(s) in cache resize_ctl 1.\n";
+ }
+ }
+
+ /* get a copy of the files FAPL */
+ if ( pass2 ) {
+
+ fapl_id = H5Fget_access_plist(file_id);
+
+ if ( fapl_id < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fget_access_plist() failed.\n";
+ }
+ }
+
+ /* compare the initial cache config from the copy of the file's FAPL
+ * to the expected value. If all goes well, close the copy of the FAPL.
+ */
+ if ( pass2 ) {
+
+ scratch.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+
+ result = H5Pget_mdc_config(fapl_id, (H5AC_cache_config_t *)&scratch);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pget_mdc_config() failed.\n";
+
+ } else if (!CACHE_CONFIGS_EQUAL(default_config, scratch, TRUE, TRUE)) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "config retrieved from file doesn't match default.";
+
+ } else if ( H5Pclose(fapl_id) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pclose() failed.\n";
+ }
+ }
+
+ /* close the file and delete it */
+ if ( pass2 ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fclose() failed.\n";
+
+ } else if ( HDremove(filename) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "HDremove() failed.\n";
+ }
+ }
+
+
+ /* Open a file using a FAPL with a modified initial metadata cache
+ * configuration. Verify that the resulting metadata cache uses the
+ * modified configuration as well. Get a copy of the FAPL from the
+ * file, and verify that it contains the modified initial meta data
+ * cache configuration. Close and delete the file.
+ */
+
+ /* Create a FAPL */
+ if ( pass2 ) {
+
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+
+ if ( fapl_id < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pcreate(H5P_FILE_ACCESS) failed.\n";
+ }
+ }
+
+ /* Modify the initial mdc configuration in the FAPL. */
+
+ if ( pass2 ) {
+
+ result = H5Pset_mdc_config(fapl_id, (H5AC_cache_config_t *)&mod_config);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pset_mdc_config() failed.\n";
+ }
+ }
+
+ /* setup the file name */
+ if ( pass2 ) {
+
+ if ( h5_fixname(FILENAMES[1], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "h5_fixname() failed.\n";
+ }
+ }
+
+ /* create the file using the modified FAPL */
+ if ( pass2 ) {
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+
+ if ( file_id < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fcreate() failed.\n";
+ }
+ }
+
+ /* get a pointer to the files internal data structure */
+ if ( pass2 ) {
+
+ file_ptr = (H5F_t *)H5I_object_verify(file_id, H5I_FILE);
+
+ if ( file_ptr == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Can't get file_ptr.\n";
+
+ } else {
+
+ cache_ptr = file_ptr->shared->cache2;
+ }
+ }
+#if 0 /* JRM */
+ /* since this is the test cache, must build the cache explicitly */
+ /* remove this when we start using the new cache */
+ if ( pass2 )
+ {
+ if ( H5AC2_create(file_ptr, &default_config) != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Can't construct test cache.\n";
+
+ } else {
+
+ cache_ptr = file_ptr->shared->cache2;
+ }
+ }
+#endif /* JRM */
+ /* verify that we can access the internal version of the cache config */
+ if ( pass2 ) {
+
+ if ( ( cache_ptr == NULL ) ||
+ ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ||
+ ( cache_ptr->resize_ctl.version != H5C2__CURR_AUTO_SIZE_CTL_VER ) ){
+
+ pass2 = FALSE;
+ failure_mssg2 = "Can't access cache resize_ctl.\n";
+ }
+ }
+
+ /* conpare the cache's internal configuration with the expected value */
+ if ( pass2 ) {
+
+ if ( ! RESIZE_CONFIGS_ARE_EQUAL(mod_auto_size_ctl, \
+ cache_ptr->resize_ctl, TRUE) ) {
+
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpected value(s) in cache resize_ctl 2.\n";
+ }
+ }
+
+ /* get a copy of the files FAPL */
+ if ( pass2 ) {
+
+ test_fapl_id = H5Fget_access_plist(file_id);
+
+ if ( test_fapl_id < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fget_access_plist() failed.\n";
+ }
+ }
+
+ /* compare the initial cache config from the copy of the file's FAPL
+ * to the expected value. If all goes well, close the copy of the FAPL.
+ */
+ if ( pass2 ) {
+
+ scratch.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+
+ result = H5Pget_mdc_config(test_fapl_id,
+ (H5AC_cache_config_t *)&scratch);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pget_mdc_config() failed.\n";
+
+ } else if ( ! CACHE_CONFIGS_EQUAL(mod_config, scratch, TRUE, TRUE) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "config retrieved from file doesn't match.";
+
+ } else if ( H5Pclose(test_fapl_id) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pclose() failed.\n";
+ }
+ }
+
+ /* close the fapl used to create the file */
+ if ( pass2 ) {
+
+ if ( H5Pclose(fapl_id) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pclose() failed.\n";
+ }
+ }
+
+ /* close the file and delete it */
+ if ( pass2 ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fclose() failed.\n";
+
+ } else if ( HDremove(filename) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "HDremove() failed.\n";
+ }
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 )
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+
+} /* check_fapl_mdc_api_calls() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: validate_mdc_config()
+ *
+ * Purpose: Verify that the file indicated by the file_id parameter
+ * has both internal and external configuration matching
+ * *config_ptr.
+ *
+ * Do nothin on success. On failure, set pass2 to FALSE, and
+ * load an error message into failue_mssg. Note that
+ * failure_msg is assumed to be at least 128 bytes in length.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/14/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+validate_mdc_config(hid_t file_id,
+ H5AC2_cache_config_t * ext_config_ptr,
+ hbool_t compare_init,
+ int test_num)
+{
+ /* const char * fcn_name = "validate_mdc_config()"; */
+ static char msg[256];
+ H5F_t * file_ptr = NULL;
+ H5C2_t * cache_ptr = NULL;
+ H5AC2_cache_config_t scratch;
+ H5C2_auto_size_ctl_t int_config;
+
+ XLATE_EXT_TO_INT_MDC_CONFIG(int_config, (*ext_config_ptr))
+
+ /* get a pointer to the files internal data structure */
+ if ( pass2 ) {
+
+ file_ptr = (H5F_t *)H5I_object_verify(file_id, H5I_FILE);
+
+ if ( file_ptr == NULL ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128, "Can't get file_ptr #%d.", test_num);
+ failure_mssg2 = msg;
+
+ } else {
+
+ cache_ptr = file_ptr->shared->cache2;
+ }
+ }
+
+ /* verify that we can access the internal version of the cache config */
+ if ( pass2 ) {
+
+ if ( ( cache_ptr == NULL ) ||
+ ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ||
+ ( cache_ptr->resize_ctl.version != H5C2__CURR_AUTO_SIZE_CTL_VER ) ){
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Can't access cache resize_ctl #%d.", test_num);
+ failure_mssg2 = msg;
+ }
+ }
+
+ /* compare the cache's internal configuration with the expected value */
+ if ( pass2 ) {
+
+ if ( ! RESIZE_CONFIGS_ARE_EQUAL(int_config, cache_ptr->resize_ctl,
+ compare_init) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Unexpected internal config #%d.", test_num);
+ failure_mssg2 = msg;
+ }
+ }
+
+ /* obtain external cache config */
+ if ( pass2 ) {
+
+ scratch.version = H5AC2__CURR_CACHE_CONFIG_VERSION;
+
+ if ( H5Fget_mdc_config(file_id, (H5AC_cache_config_t *)&scratch) < 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5Fget_mdc_config() failed #%d.", test_num);
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( pass2 ) {
+
+ /* Recall that in any configuration supplied by the cache
+ * at run time, the set_initial_size field will always
+ * be FALSE, regardless of the value pass2ed in. Thus we
+ * always resume that this field need not match that of
+ * the supplied external configuration.
+ *
+ * The cache also sets the initial_size field to the current
+ * cache max size instead of the value initialy supplied.
+ * Depending on circumstances, this may or may not match
+ * the original. Hence the compare_init parameter.
+ */
+ if ( ! CACHE_CONFIGS_EQUAL((*ext_config_ptr), scratch, \
+ FALSE, compare_init) ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "Unexpected external config #%d.", test_num);
+ failure_mssg2 = msg;
+ }
+ }
+
+ return;
+
+} /* validate_mdc_config() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_file_mdc_api_calls()
+ *
+ * Purpose: Verify that the file related metadata cache API calls are
+ * functioning correctly.
+ *
+ * Since we have tested the H5C2 code elsewhere, it should
+ * be sufficient to verify that the desired configuration
+ * data is getting in and out of the cache. Similarly,
+ * we need only verify that the cache monitoring calls
+ * return the data that the cache thinks they should return.
+ * We shouldn't need to verify data correctness beyond that
+ * point.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/14/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_file_mdc_api_calls(void)
+{
+ const char * fcn_name = "check_file_mdc_api_calls()";
+ char filename[512];
+ hid_t file_id = -1;
+ size_t max_size;
+ size_t min_clean_size;
+ size_t cur_size;
+ int cur_num_entries;
+ double hit_rate;
+ H5F_t * file_ptr = NULL;
+ H5AC2_cache_config_t default_config = H5AC2__DEFAULT_CACHE_CONFIG;
+ H5AC2_cache_config_t mod_config_1 =
+ {
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024 + 1),
+ /* double min_clean_fraction = */ 0.2,
+ /* size_t max_size = */ (16 * 1024 * 1024 + 1),
+ /* size_t min_size = */ ( 1 * 1024 * 1024 + 1),
+ /* long int epoch_length = */ 50001,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.91,
+ /* double increment = */ 2.1,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024 + 1),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out,
+ /* double upper_hr_threshold = */ 0.998,
+ /* double decrement = */ 0.91,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024 - 1),
+ /* int epochs_before_eviction = */ 4,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.05,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ };
+ H5AC2_cache_config_t mod_config_2 =
+ {
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ TRUE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (512 * 1024),
+ /* double min_clean_fraction = */ 0.1,
+ /* size_t max_size = */ ( 8 * 1024 * 1024),
+ /* size_t min_size = */ ( 512 * 1024),
+ /* long int epoch_length = */ 25000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (2 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__threshold,
+ /* double upper_hr_threshold = */ 0.9995,
+ /* double decrement = */ 0.95,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (512 * 1024),
+ /* int epochs_before_eviction = */ 4,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.05,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ };
+ H5AC2_cache_config_t mod_config_3 =
+ {
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.2,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__off,
+ /* double lower_hr_threshold = */ 0.90,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__off,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ FALSE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024 - 1),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ FALSE,
+ /* double empty_reserve = */ 0.05,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ };
+ H5AC2_cache_config_t mod_config_4 =
+ {
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.15,
+ /* size_t max_size = */ (20 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 75000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (2 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */
+ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ };
+
+ TESTING("MDC/FILE related API calls");
+
+ pass2 = TRUE;
+
+ /* Open a file with the default FAPL. Verify that the cache is
+ * configured as per the default both by looking at its internal
+ * configuration, and via the H5Fget_mdc_config() call.
+ *
+ * Then set serveral different configurations, and verify that
+ * they took as per above.
+ */
+
+ /* setup the file name */
+ if ( pass2 ) {
+
+ if ( h5_fixname(FILENAMES[1], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "h5_fixname() failed.\n";
+ }
+ }
+
+ /* create the file using the default FAPL */
+ if ( pass2 ) {
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+
+ if ( file_id < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fcreate() failed.\n";
+ }
+ }
+
+ /* verify that the cache is set to the default config */
+ validate_mdc_config(file_id, &default_config, TRUE, 1);
+
+ /* set alternate config 1 */
+ if ( pass2 ) {
+
+ if ( H5Fset_mdc_config(file_id, (H5AC_cache_config_t *)&mod_config_1)
+ < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fset_mdc_config() failed 1.\n";
+ }
+ }
+
+ /* verify that the cache is now set to the alternate config */
+ validate_mdc_config(file_id, &mod_config_1, TRUE, 2);
+
+ /* set alternate config 2 */
+ if ( pass2 ) {
+
+ if ( H5Fset_mdc_config(file_id, (H5AC_cache_config_t *)&mod_config_2)
+ < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fset_mdc_config() failed 2.\n";
+ }
+ }
+
+ /* verify that the cache is now set to the alternate config */
+ validate_mdc_config(file_id, &mod_config_2, TRUE, 3);
+
+ /* set alternate config 3 */
+ if ( pass2 ) {
+
+ if ( H5Fset_mdc_config(file_id, (H5AC_cache_config_t *)&mod_config_3)
+ < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fset_mdc_config() failed 3.\n";
+ }
+ }
+
+ /* verify that the cache is now set to the alternate config */
+ validate_mdc_config(file_id, &mod_config_3, TRUE, 4);
+
+ /* set alternate config 4 */
+ if ( pass2 ) {
+
+ if ( H5Fset_mdc_config(file_id, (H5AC_cache_config_t *)&mod_config_4)
+ < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fset_mdc_config() failed 4.\n";
+ }
+ }
+
+ /* verify that the cache is now set to the alternate config */
+ validate_mdc_config(file_id, &mod_config_4, TRUE, 5);
+
+
+ /* Run some quick smoke checks on the cache status monitoring
+ * calls -- no interesting data as the cache hasn't had a
+ * chance to do much yet.
+ */
+
+ if ( pass2 ) {
+
+ if ( H5Fget_mdc_hit_rate(file_id, &hit_rate) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fget_mdc_hit_rate() failed 1.\n";
+
+ } else if ( hit_rate != 0.0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5Fget_mdc_hit_rate() returned unexpected hit rate.\n";
+
+ }
+#if 0 /* this may be useful now and then -- keep it around */
+ else {
+
+ HDfprintf(stdout,
+ "H5Fget_mdc_hit_rate() reports hit_rate = %lf:\n",
+ hit_rate);
+ }
+#endif
+ }
+
+ if ( pass2 ) {
+
+ if ( H5Fget_mdc_size(file_id, &max_size, &min_clean_size,
+ &cur_size, &cur_num_entries) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fget_mdc_size() failed 1.\n";
+
+ } else if ( ( mod_config_4.initial_size != max_size ) ||
+ ( min_clean_size != (size_t)
+ ((double)max_size * mod_config_4.min_clean_fraction) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fget_mdc_size() returned unexpected value(s).\n";
+
+ }
+#if 0 /* this may be useful now and then -- keep it around */
+ else {
+
+ HDfprintf(stdout, "H5Fget_mdc_size() reports:\n");
+ HDfprintf(stdout, " max_size: %ld, min_clean_size: %ld\n",
+ (long)max_size, (long)min_clean_size);
+ HDfprintf(stdout, " cur_size: %ld, cur_num_entries: %d\n",
+ (long)cur_size, cur_num_entries);
+ }
+#endif
+ }
+
+ /* close the file and delete it */
+ if ( pass2 ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fclose() failed.\n";
+
+ } else if ( HDremove(filename) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "HDremove() failed.\n";
+ }
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 )
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+
+} /* check_file_mdc_api_calls() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_and_validate_cache_hit_rate()
+ *
+ * Purpose: Use the API functions to get and reset the cache hit rate.
+ * Verify that the value returned by the API call agrees with
+ * the cache internal data structures.
+ *
+ * If the number of cache accesses exceeds the value provided
+ * in the min_accesses parameter, and the hit rate is less than
+ * min_hit_rate, set pass2 to FALSE, and set failure_mssg2 to
+ * a string indicating that hit rate was unexpectedly low.
+ *
+ * Return hit rate in *hit_rate_ptr, and print the data to
+ * stdout if requested.
+ *
+ * If an error is detected, set pass2 to FALSE, and set
+ * failure_mssg2 to an appropriate value.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/18/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_and_validate_cache_hit_rate(hid_t file_id,
+ double * hit_rate_ptr,
+ hbool_t dump_data,
+ int64_t min_accesses,
+ double min_hit_rate)
+{
+ /* const char * fcn_name = "check_and_validate_cache_hit_rate()"; */
+ herr_t result;
+ int64_t cache_hits = 0;
+ int64_t cache_accesses = 0;
+ double expected_hit_rate;
+ double hit_rate;
+ H5F_t * file_ptr = NULL;
+ H5C2_t * cache_ptr = NULL;
+
+ /* get a pointer to the files internal data structure */
+ if ( pass2 ) {
+
+ file_ptr = (H5F_t *)H5I_object_verify(file_id, H5I_FILE);
+
+ if ( file_ptr == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Can't get file_ptr.";
+
+ } else {
+
+ cache_ptr = file_ptr->shared->cache2;
+ }
+ }
+
+ /* verify that we can access the cache data structure */
+ if ( pass2 ) {
+
+ if ( ( cache_ptr == NULL ) ||
+ ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Can't access cache resize_ctl.";
+ }
+ }
+
+ /* compare the cache's internal configuration with the expected value */
+ if ( pass2 ) {
+
+ cache_hits = cache_ptr->cache_hits;
+ cache_accesses = cache_ptr->cache_accesses;
+
+ if ( cache_accesses > 0 ) {
+
+ expected_hit_rate = ((double)cache_hits) / ((double)cache_accesses);
+
+ } else {
+
+ expected_hit_rate = 0.0;
+ }
+
+ result = H5Fget_mdc_hit_rate(file_id, &hit_rate);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fget_mdc_hit_rate() failed.";
+
+ } else if ( hit_rate != expected_hit_rate ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "unexpected hit rate.";
+ }
+ }
+
+ if ( pass2 ) { /* reset the hit rate */
+
+ result = H5Freset_mdc_hit_rate_stats(file_id);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Freset_mdc_hit_rate_stats() failed.";
+ }
+ }
+
+ /* set *hit_rate_ptr if appropriate */
+ if ( ( pass2 ) && ( hit_rate_ptr != NULL ) ) {
+
+ *hit_rate_ptr = hit_rate;
+ }
+
+ /* dump data to stdout if requested */
+ if ( ( pass2 ) && ( dump_data ) ) {
+
+ HDfprintf(stdout,
+ "cache_hits: %ld, cache_accesses: %ld, hit_rate: %lf\n",
+ (long)cache_hits, (long)cache_accesses, hit_rate);
+ }
+
+ if ( ( pass2 ) &&
+ ( cache_accesses > min_accesses ) &&
+ ( hit_rate < min_hit_rate ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Unexpectedly low hit rate.";
+ }
+
+ return;
+
+} /* check_and_validate_cache_hit_rate() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_and_validate_cache_size()
+ *
+ * Purpose: Use the API function to get the cache size data. Verify
+ * that the values returned by the API call agree with
+ * the cache internal data structures.
+ *
+ * Return size data in the locations specified by the pointer
+ * parameters if these parameters are not NULL. Print the
+ * data to stdout if requested.
+ *
+ * If an error is detected, set pass2 to FALSE, and set
+ * failure_mssg2 to an appropriate value.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/18/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_and_validate_cache_size(hid_t file_id,
+ size_t * max_size_ptr,
+ size_t * min_clean_size_ptr,
+ size_t * cur_size_ptr,
+ int32_t * cur_num_entries_ptr,
+ hbool_t dump_data)
+{
+ /* const char * fcn_name = "check_and_validate_cache_size()"; */
+ herr_t result;
+ size_t expected_max_size;
+ size_t max_size;
+ size_t expected_min_clean_size;
+ size_t min_clean_size;
+ size_t expected_cur_size;
+ size_t cur_size;
+ int32_t expected_cur_num_entries;
+ int cur_num_entries;
+ H5F_t * file_ptr = NULL;
+ H5C2_t * cache_ptr = NULL;
+
+ /* get a pointer to the files internal data structure */
+ if ( pass2 ) {
+
+ file_ptr = (H5F_t *)H5I_object_verify(file_id, H5I_FILE);
+
+ if ( file_ptr == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Can't get file_ptr.";
+
+ } else {
+
+ cache_ptr = file_ptr->shared->cache2;
+ }
+ }
+
+ /* verify that we can access the cache data structure */
+ if ( pass2 ) {
+
+ if ( ( cache_ptr == NULL ) ||
+ ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Can't access cache data structure.";
+ }
+ }
+
+ /* compare the cache's internal configuration with the expected value */
+ if ( pass2 ) {
+
+ expected_max_size = cache_ptr->max_cache_size;
+ expected_min_clean_size = cache_ptr->min_clean_size;
+ expected_cur_size = cache_ptr->index_size;
+ expected_cur_num_entries = cache_ptr->index_len;
+
+ result = H5Fget_mdc_size(file_id,
+ &max_size,
+ &min_clean_size,
+ &cur_size,
+ &cur_num_entries);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fget_mdc_size() failed.";
+
+ } else if ( ( max_size != expected_max_size ) ||
+ ( min_clean_size != expected_min_clean_size ) ||
+ ( cur_size != expected_cur_size ) ||
+ ( cur_num_entries != (int)expected_cur_num_entries ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fget_mdc_size() returned unexpected value(s).";
+
+ }
+ }
+
+ /* return size values if requested */
+ if ( ( pass2 ) && ( max_size_ptr != NULL ) ) {
+
+ *max_size_ptr = max_size;
+ }
+
+ if ( ( pass2 ) && ( min_clean_size_ptr != NULL ) ) {
+
+ *min_clean_size_ptr = min_clean_size;
+ }
+
+ if ( ( pass2 ) && ( cur_size_ptr != NULL ) ) {
+
+ *cur_size_ptr = cur_size;
+ }
+
+ if ( ( pass2 ) && ( cur_num_entries_ptr != NULL ) ) {
+
+ *cur_num_entries_ptr = cur_num_entries;
+ }
+
+
+ /* dump data to stdout if requested */
+ if ( ( pass2 ) && ( dump_data ) ) {
+
+ HDfprintf(stdout,
+ "max_sz: %ld, min_clean_sz: %ld, cur_sz: %ld, cur_ent: %ld\n",
+ (long)max_size, (long)min_clean_size, (long)cur_size,
+ (long)cur_num_entries);
+ }
+
+ return;
+
+} /* check_and_validate_cache_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: mdc_api_call_smoke_check()
+ *
+ * Purpose:
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/14/04
+ *
+ * Modifications:
+ *
+ * JRM -- 7/12/06
+ * Added progress reporting code.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CHUNK_SIZE 2
+#define DSET_SIZE (200 * CHUNK_SIZE)
+#define NUM_DSETS 6
+#define NUM_RANDOM_ACCESSES 200000
+
+/* can't run this test until we start using H5C2 -- thus comment it
+ * out for now.
+ * JRM -- 10/19/07
+ */
+#if 0
+
+static void
+mdc_api_call_smoke_check(void)
+{
+ const char * fcn_name = "mdc_api_call_smoke_check()";
+ char filename[512];
+ hbool_t valid_chunk;
+ hbool_t report_progress = FALSE;
+ hbool_t dump_hit_rate = FALSE;
+ int64_t min_accesses = 1000;
+ double min_hit_rate = 0.90;
+ hbool_t dump_cache_size = FALSE;
+ hid_t file_id = -1;
+ hid_t dataspace_id = -1;
+ hid_t filespace_ids[NUM_DSETS];
+ hid_t memspace_id = -1;
+ hid_t dataset_ids[NUM_DSETS];
+ hid_t properties;
+ char dset_name[64];
+ int i, j, k, l, m, n;
+ int progress_counter;
+ herr_t status;
+ hsize_t dims[2];
+ hsize_t a_size[2];
+ hsize_t offset[2];
+ hsize_t chunk_size[2];
+ int data_chunk[CHUNK_SIZE][CHUNK_SIZE];
+ H5AC2_cache_config_t default_config = H5AC2__DEFAULT_CACHE_CONFIG;
+ H5AC2_cache_config_t mod_config_1 =
+ {
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ 500000,
+ /* double min_clean_fraction = */ 0.1,
+ /* size_t max_size = */ 16000000,
+ /* size_t min_size = */ 250000,
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__off,
+ /* double lower_hr_threshold = */ 0.95,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ FALSE,
+ /* size_t max_increment = */ 4000000,
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__off,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ FALSE,
+ /* size_t max_decrement = */ 1000000,
+ /* int epochs_before_eviction = */ 2,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.05,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ };
+ H5AC2_cache_config_t mod_config_2 =
+ {
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ 12000000,
+ /* double min_clean_fraction = */ 0.1,
+ /* size_t max_size = */ 16000000,
+ /* size_t min_size = */ 250000,
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__off,
+ /* double lower_hr_threshold = */ 0.95,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ FALSE,
+ /* size_t max_increment = */ 4000000,
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__off,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ FALSE,
+ /* size_t max_decrement = */ 1000000,
+ /* int epochs_before_eviction = */ 2,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.05,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ };
+ H5AC2_cache_config_t mod_config_3 =
+ {
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ 2000000,
+ /* double min_clean_fraction = */ 0.1,
+ /* size_t max_size = */ 16000000,
+ /* size_t min_size = */ 250000,
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__off,
+ /* double lower_hr_threshold = */ 0.95,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ FALSE,
+ /* size_t max_increment = */ 4000000,
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__off,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ FALSE,
+ /* size_t max_decrement = */ 1000000,
+ /* int epochs_before_eviction = */ 2,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.05,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ };
+
+ TESTING("MDC API smoke check");
+
+ pass2 = TRUE;
+
+ /* Open a file with the default FAPL. Verify that the cache is
+ * configured as per the default both by looking at its internal
+ * configuration, and via the H5Fget_mdc_config() call.
+ *
+ * Then set the cache to mod_config_1, which fixes cache size at
+ * 500000 bytes, and turns off automatic cache resize.
+ */
+
+ /* setup the file name */
+
+ if ( ( pass2 ) && ( report_progress ) ) {
+
+ HDfprintf(stdout,"\nSetting up file ... ");
+ HDfflush(stdout);
+ }
+
+ if ( pass2 ) {
+
+ if ( h5_fixname(FILENAMES[1], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "h5_fixname() failed.\n";
+ }
+ }
+
+ /* create the file using the default FAPL */
+ if ( pass2 ) {
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+
+ if ( file_id < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fcreate() failed.\n";
+ }
+ }
+
+ /* verify that the cache is set to the default config */
+ validate_mdc_config(file_id, &default_config, TRUE, 1);
+
+ /* set alternate config 1 */
+ if ( pass2 ) {
+
+ if ( H5Fset_mdc_config(file_id, (H5AC_cache_config_t *)&mod_config_1)
+ < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fset_mdc_config() failed 1.\n";
+ }
+ }
+
+ /* verify that the cache is now set to the alternate config */
+ validate_mdc_config(file_id, &mod_config_1, TRUE, 2);
+
+ if ( ( pass2 ) && ( report_progress ) ) {
+
+ HDfprintf(stdout,"Done.\n"); /* setting up file */
+ HDfflush(stdout);
+ }
+
+
+ /* create the datasets */
+
+ if ( ( pass2 ) && ( report_progress ) ) {
+
+ HDfprintf(stdout,"Creating datasets ... ");
+ HDfflush(stdout);
+ }
+
+ if ( pass2 ) {
+
+ i = 0;
+
+ while ( ( pass2 ) && ( i < NUM_DSETS ) )
+ {
+ /* create a dataspace for the chunked dataset */
+ dims[0] = DSET_SIZE;
+ dims[1] = DSET_SIZE;
+ dataspace_id = H5Screate_simple(2, dims, NULL);
+
+ if ( dataspace_id < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Screate_simple() failed.";
+ }
+
+ /* set the dataset creation plist to specify that the raw data is
+ * to be partioned into 10X10 element chunks.
+ */
+
+ if ( pass2 ) {
+
+ chunk_size[0] = CHUNK_SIZE;
+ chunk_size[1] = CHUNK_SIZE;
+ properties = H5Pcreate(H5P_DATASET_CREATE);
+
+ if ( properties < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pcreate() failed.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( H5Pset_chunk(properties, 2, chunk_size) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pset_chunk() failed.";
+ }
+ }
+
+ /* create the dataset */
+ if ( pass2 ) {
+
+ sprintf(dset_name, "/dset%03d", i);
+ dataset_ids[i] = H5Dcreate(file_id, dset_name, H5T_STD_I32BE,
+ dataspace_id, properties);
+
+ if ( dataset_ids[i] < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Dcreate() failed.";
+ }
+ }
+
+ /* get the file space ID */
+ if ( pass2 ) {
+
+ filespace_ids[i] = H5Dget_space(dataset_ids[i]);
+
+ if ( filespace_ids[i] < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Dget_space() failed.";
+ }
+ }
+
+ i++;
+ }
+ }
+
+ /* create the mem space to be used to read and write chunks */
+ if ( pass2 ) {
+
+ dims[0] = CHUNK_SIZE;
+ dims[1] = CHUNK_SIZE;
+ memspace_id = H5Screate_simple(2, dims, NULL);
+
+ if ( memspace_id < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Screate_simple() failed.";
+ }
+ }
+
+ /* select in memory hyperslab */
+ if ( pass2 ) {
+
+ offset[0] = 0; /*offset of hyperslab in memory*/
+ offset[1] = 0;
+ a_size[0] = CHUNK_SIZE; /*size of hyperslab*/
+ a_size[1] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL,
+ a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Sselect_hyperslab() failed.";
+ }
+ }
+
+ if ( ( pass2 ) && ( report_progress ) ) {
+
+ HDfprintf(stdout,"Done.\n");
+ HDfflush(stdout);
+ }
+
+ /* initialize all datasets on a round robin basis */
+ i = 0;
+ progress_counter = 0;
+
+ if ( ( pass2 ) && ( report_progress ) ) {
+
+ HDfprintf(stdout, "Initializing datasets ");
+ HDfflush(stdout);
+ }
+
+ while ( ( pass2 ) && ( i < DSET_SIZE ) )
+ {
+ j = 0;
+ while ( ( pass2 ) && ( j < DSET_SIZE ) )
+ {
+ m = 0;
+ while ( ( pass2 ) && ( m < NUM_DSETS ) )
+ {
+ /* initialize the slab */
+ for ( k = 0; k < CHUNK_SIZE; k++ )
+ {
+ for ( l = 0; l < CHUNK_SIZE; l++ )
+ {
+ data_chunk[k][l] = (DSET_SIZE * DSET_SIZE * m) +
+ (DSET_SIZE * (i + k)) + j + l;
+ }
+ }
+
+ /* select on disk hyperslab */
+ offset[0] = i; /*offset of hyperslab in file*/
+ offset[1] = j;
+ a_size[0] = CHUNK_SIZE; /*size of hyperslab*/
+ a_size[1] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET,
+ offset, NULL, a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "disk H5Sselect_hyperslab() failed.";
+ }
+
+ /* write the chunk to file */
+ status = H5Dwrite(dataset_ids[m], H5T_NATIVE_INT, memspace_id,
+ filespace_ids[m], H5P_DEFAULT, data_chunk);
+
+ if ( status < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Dwrite() failed.";
+ }
+ m++;
+ }
+ j += CHUNK_SIZE;
+ }
+
+ /* check the cache hit rate, and reset the counters.
+ * Hit rate should be just about unity here, so we will just
+ * get the data and (possibly) print it without checking it
+ * beyond ensuring that it agrees with the cache internal
+ * data structures.
+ *
+ * similarly, check cache size.
+ */
+
+ if ( ( pass2 ) && ( i % (DSET_SIZE / 4) == 0 ) ) {
+
+ check_and_validate_cache_hit_rate(file_id, NULL, dump_hit_rate,
+ min_accesses, min_hit_rate);
+
+ check_and_validate_cache_size(file_id, NULL, NULL, NULL, NULL,
+ dump_cache_size);
+ }
+
+ i += CHUNK_SIZE;
+
+ if ( ( pass2 ) && ( report_progress ) ) {
+
+ progress_counter += CHUNK_SIZE;
+
+ if ( progress_counter >= DSET_SIZE / 20 ) {
+
+ progress_counter = 0;
+ HDfprintf(stdout, ".");
+ HDfflush(stdout);
+ }
+ }
+ }
+
+ if ( ( pass2 ) && ( report_progress ) ) {
+
+ HDfprintf(stdout," Done.\n"); /* initializing data sets */
+ HDfflush(stdout);
+ }
+
+ /* set alternate config 2 */
+ if ( pass2 ) {
+
+ if ( H5Fset_mdc_config(file_id, (H5AC_cache_config_t *)&mod_config_2)
+ < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fset_mdc_config() failed 2.\n";
+ }
+ }
+
+ /* verify that the cache is now set to the alternate config */
+ validate_mdc_config(file_id, &mod_config_2, TRUE, 3);
+
+ /* do random reads on all datasets */
+
+ if ( ( pass2 ) && ( report_progress ) ) {
+
+ HDfprintf(stdout, "Doing random reads on all datasets ");
+ HDfflush(stdout);
+ }
+
+ n = 0;
+ progress_counter = 0;
+ while ( ( pass2 ) && ( n < NUM_RANDOM_ACCESSES ) )
+ {
+ m = rand() % NUM_DSETS;
+ i = (rand() % (DSET_SIZE / CHUNK_SIZE)) * CHUNK_SIZE;
+ j = (rand() % (DSET_SIZE / CHUNK_SIZE)) * CHUNK_SIZE;
+
+ /* select on disk hyperslab */
+ offset[0] = i; /*offset of hyperslab in file*/
+ offset[1] = j;
+ a_size[0] = CHUNK_SIZE; /*size of hyperslab*/
+ a_size[1] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET,
+ offset, NULL, a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "disk hyperslab create failed.";
+ }
+
+ /* read the chunk from file */
+ if ( pass2 ) {
+
+ status = H5Dread(dataset_ids[m], H5T_NATIVE_INT, memspace_id,
+ filespace_ids[m], H5P_DEFAULT, data_chunk);
+
+ if ( status < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "disk hyperslab create failed.";
+ }
+ }
+
+ /* validate the slab */
+ if ( pass2 ) {
+
+ valid_chunk = TRUE;
+ for ( k = 0; k < CHUNK_SIZE; k++ )
+ {
+ for ( l = 0; l < CHUNK_SIZE; l++ )
+ {
+ if ( data_chunk[k][l]
+ !=
+ ((DSET_SIZE * DSET_SIZE * m) +
+ (DSET_SIZE * (i + k)) + j + l) ) {
+
+ valid_chunk = FALSE;
+#if 0 /* this will be useful from time to time -- lets keep it*/
+ HDfprintf(stdout,
+ "data_chunk[%0d][%0d] = %0d, expect %0d.\n",
+ k, l, data_chunk[k][l],
+ ((DSET_SIZE * DSET_SIZE * m) +
+ (DSET_SIZE * (i + k)) + j + l));
+ HDfprintf(stdout,
+ "m = %d, i = %d, j = %d, k = %d, l = %d\n",
+ m, i, j, k, l);
+#endif
+ }
+ }
+ }
+
+ if ( ! valid_chunk ) {
+#if 1
+ pass2 = FALSE;
+ failure_mssg2 = "slab validation failed.";
+#else /* as above */
+ fprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n",
+ i, j, m);
+#endif
+ }
+ }
+
+ if ( ( pass2 ) && ( n % (NUM_RANDOM_ACCESSES / 4) == 0 ) ) {
+
+ check_and_validate_cache_hit_rate(file_id, NULL, dump_hit_rate,
+ min_accesses, min_hit_rate);
+
+ check_and_validate_cache_size(file_id, NULL, NULL, NULL, NULL,
+ dump_cache_size);
+ }
+
+ n++;
+
+ if ( ( pass2 ) && ( report_progress ) ) {
+
+ progress_counter++;
+
+ if ( progress_counter >= NUM_RANDOM_ACCESSES / 20 ) {
+
+ progress_counter = 0;
+ HDfprintf(stdout, ".");
+ HDfflush(stdout);
+ }
+ }
+ }
+
+ if ( ( pass2 ) && ( report_progress ) ) {
+
+ HDfprintf(stdout, " Done.\n"); /* random reads on all data sets */
+ HDfflush(stdout);
+ }
+
+
+ /* close the file spaces we are done with */
+ i = 1;
+ while ( ( pass2 ) && ( i < NUM_DSETS ) )
+ {
+ if ( H5Sclose(filespace_ids[i]) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Sclose() failed.";
+ }
+ i++;
+ }
+
+
+ /* close the datasets we are done with */
+ i = 1;
+ while ( ( pass2 ) && ( i < NUM_DSETS ) )
+ {
+ if ( H5Dclose(dataset_ids[i]) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Dclose() failed.";
+ }
+ i++;
+ }
+
+ /* set alternate config 3 */
+ if ( pass2 ) {
+
+ if ( H5Fset_mdc_config(file_id, (H5AC_cache_config_t *)&mod_config_3)
+ < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fset_mdc_config() failed 3.\n";
+ }
+ }
+
+ /* verify that the cache is now set to the alternate config */
+ validate_mdc_config(file_id, &mod_config_3, TRUE, 4);
+
+ /* do random reads on data set 0 only */
+
+ if ( ( pass2 ) && ( report_progress ) ) {
+
+ HDfprintf(stdout, "Doing random reads on dataset 0 ");
+ HDfflush(stdout);
+ }
+
+ m = 0;
+ n = 0;
+ progress_counter = 0;
+ while ( ( pass2 ) && ( n < NUM_RANDOM_ACCESSES ) )
+ {
+ i = (rand() % (DSET_SIZE / CHUNK_SIZE)) * CHUNK_SIZE;
+ j = (rand() % (DSET_SIZE / CHUNK_SIZE)) * CHUNK_SIZE;
+
+ /* select on disk hyperslab */
+ offset[0] = i; /*offset of hyperslab in file*/
+ offset[1] = j;
+ a_size[0] = CHUNK_SIZE; /*size of hyperslab*/
+ a_size[1] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET,
+ offset, NULL, a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "disk hyperslab create failed.";
+ }
+
+ /* read the chunk from file */
+ if ( pass2 ) {
+
+ status = H5Dread(dataset_ids[m], H5T_NATIVE_INT, memspace_id,
+ filespace_ids[m], H5P_DEFAULT, data_chunk);
+
+ if ( status < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "disk hyperslab create failed.";
+ }
+ }
+
+ /* validate the slab */
+ if ( pass2 ) {
+
+ valid_chunk = TRUE;
+ for ( k = 0; k < CHUNK_SIZE; k++ )
+ {
+ for ( l = 0; l < CHUNK_SIZE; l++ )
+ {
+ if ( data_chunk[k][l]
+ !=
+ ((DSET_SIZE * DSET_SIZE * m) +
+ (DSET_SIZE * (i + k)) + j + l) ) {
+
+ valid_chunk = FALSE;
+ }
+#if 0 /* this will be useful from time to time -- lets keep it */
+ HDfprintf(stdout, "data_chunk[%0d][%0d] = %0d, expect %0d.\n",
+ k, l, data_chunk[k][l],
+ ((DSET_SIZE * DSET_SIZE * m) +
+ (DSET_SIZE * (i + k)) + j + l));
+#endif
+ }
+ }
+
+ if ( ! valid_chunk ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "slab validation failed.";
+#if 0 /* as above */
+ fprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n",
+ i, j, m);
+#endif
+ }
+ }
+
+ if ( ( pass2 ) && ( n % (NUM_RANDOM_ACCESSES / 4) == 0 ) ) {
+
+ check_and_validate_cache_hit_rate(file_id, NULL, dump_hit_rate,
+ min_accesses, min_hit_rate);
+
+ check_and_validate_cache_size(file_id, NULL, NULL, NULL, NULL,
+ dump_cache_size);
+ }
+
+ n++;
+
+ if ( ( pass2 ) && ( report_progress ) ) {
+
+ progress_counter++;
+
+ if ( progress_counter >= NUM_RANDOM_ACCESSES / 20 ) {
+
+ progress_counter = 0;
+ HDfprintf(stdout, ".");
+ HDfflush(stdout);
+ }
+ }
+ }
+
+ if ( ( pass2 ) && ( report_progress ) ) {
+
+ HDfprintf(stdout, " Done.\n"); /* random reads data set 0 */
+ HDfflush(stdout);
+ }
+
+
+ if ( ( pass2 ) && ( report_progress ) ) {
+
+ HDfprintf(stdout,"Shutting down ... ");
+ HDfflush(stdout);
+ }
+
+
+ /* close file space 0 */
+ if ( pass2 ) {
+
+ if ( H5Sclose(filespace_ids[0]) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Sclose(filespace_ids[0]) failed.";
+ }
+ }
+
+ /* close the data space */
+ if ( pass2 ) {
+
+ if ( H5Sclose(dataspace_id) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Sclose(dataspace) failed.";
+ }
+ }
+
+ /* close the mem space */
+ if ( pass2 ) {
+
+ if ( H5Sclose(memspace_id) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Sclose(memspace_id) failed.";
+ }
+ }
+
+ /* close dataset 0 */
+ if ( pass2 ) {
+
+ if ( H5Dclose(dataset_ids[0]) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Dclose(dataset_ids[0]) failed.";
+ }
+ }
+
+ /* close the file and delete it */
+ if ( pass2 ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fclose() failed.\n";
+
+ }
+ else if ( HDremove(filename) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "HDremove() failed.\n";
+ }
+ }
+
+ if ( ( pass2 ) && ( report_progress ) ) {
+
+ HDfprintf(stdout,"Done.\n"); /* shutting down */
+ HDfflush(stdout);
+ }
+
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 )
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+
+} /* mdc_api_call_smoke_check() */
+
+#endif /* commented out for now */
+
+
+/* The following array of invalid external MDC cache configurations is
+ * used to test error rejection in the MDC related API calls.
+ */
+
+#define NUM_INVALID_CONFIGS 36
+
+H5AC2_cache_config_t invalid_configs[NUM_INVALID_CONFIGS] =
+{
+ {
+ /* 0 -- bad version */
+ /* int version = */ -1,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 1 -- bad rpt_fcn_enabled */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ (hbool_t)-1,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 2 -- bad open_trace_file */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ (hbool_t)-1,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 3 -- bad close_trace_file */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ (hbool_t)-1,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 4 -- open_trace_file == TRUE and empty trace_file_name */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ TRUE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 5 -- bad set_initial_size */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ 2,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 6 -- max_size too big */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ H5C2__MAX_MAX_CACHE_SIZE + 1,
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 7 -- min_size too small */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ H5C2__MIN_MAX_CACHE_SIZE - 1,
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 8 -- min_size > max_size */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ FALSE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ (16 * 1024 * 1024 + 1),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 9 -- initial size out of range (too big) */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (16 * 1024 * 1024 + 1),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 10 -- initial_size out of range (too small) */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024 - 1),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 11 -- min_clean_fraction too big */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 1.000001,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 12 -- min_clean_fraction too small */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ -0.00000001,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 13 -- epoch_length too small */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ H5C2__MIN_AR_EPOCH_LENGTH - 1,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 14 -- epoch_length too big */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ H5C2__MAX_AR_EPOCH_LENGTH + 1,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 15 -- invalid incr_mode */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ -1,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 16 -- lower_hr_threshold too small */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ -0.000001,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 17 -- lower_hr_threshold too big */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 1.00000001,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 18 -- increment too small */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 0.999999999999,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 19 -- bad apply_max_increment */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ (hbool_t)-1,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 20 -- bad decr_mode */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ -1,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 21 -- upper_hr_threshold too big */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__threshold,
+ /* double upper_hr_threshold = */ 1.00001,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 22 -- decrement too small */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ -0.0000000001,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 23 -- decrement too big */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 1.0000000001,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 24 -- epochs_before_eviction too small */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 0,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 24 -- epochs_before_eviction too big */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ H5C2__MAX_EPOCH_MARKERS + 1,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 26 -- invalid apply_empty_reserve */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ 2,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 27 -- empty_reserve too small */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ -0.0000000001,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 28 -- empty_reserve too big */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 1.00000000001,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 29 -- upper_hr_threshold too small */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ -0.000000001,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 30 -- upper_hr_threshold too big */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 1.00000001,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 31 -- upper_hr_threshold <= lower_hr_threshold */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.9,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 32 -- dirty_bytes_threshold too small */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.999,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (H5C2__MIN_MAX_CACHE_SIZE / 2) - 1
+ },
+ {
+ /* 33 -- dirty_bytes_threshold too big */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ TRUE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out_with_threshold,
+ /* double upper_hr_threshold = */ 0.9,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (H5C2__MAX_MAX_CACHE_SIZE / 4) + 1
+ },
+ {
+ /* 34 -- attempt to disable evictions when auto incr enabled */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ FALSE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__off,
+ /* double upper_hr_threshold = */ 0.9,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ },
+ {
+ /* 35 -- attempt to disable evictions when auto decr enabled */
+ /* int version = */ H5C2__CURR_AUTO_SIZE_CTL_VER,
+ /* hbool_t rpt_fcn_enabled = */ FALSE,
+ /* hbool_t open_trace_file = */ FALSE,
+ /* hbool_t close_trace_file = */ FALSE,
+ /* char trace_file_name[] = */ "",
+ /* hbool_t evictions_enabled = */ FALSE,
+ /* hbool_t set_initial_size = */ TRUE,
+ /* size_t initial_size = */ (1 * 1024 * 1024),
+ /* double min_clean_fraction = */ 0.25,
+ /* size_t max_size = */ (16 * 1024 * 1024),
+ /* size_t min_size = */ ( 1 * 1024 * 1024),
+ /* long int epoch_length = */ 50000,
+ /* enum H5C2_cache_incr_mode incr_mode = */ H5C2_incr__threshold,
+ /* double lower_hr_threshold = */ 0.9,
+ /* double increment = */ 2.0,
+ /* hbool_t apply_max_increment = */ TRUE,
+ /* size_t max_increment = */ (4 * 1024 * 1024),
+ /* enum H5C2_cache_decr_mode decr_mode = */ H5C2_decr__age_out,
+ /* double upper_hr_threshold = */ 0.9,
+ /* double decrement = */ 0.9,
+ /* hbool_t apply_max_decrement = */ TRUE,
+ /* size_t max_decrement = */ (1 * 1024 * 1024),
+ /* int epochs_before_eviction = */ 3,
+ /* hbool_t apply_empty_reserve = */ TRUE,
+ /* double empty_reserve = */ 0.1,
+ /* int dirty_bytes_threshold = */ (256 * 1024)
+ }
+};
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_fapl_mdc_api_errs()
+ *
+ * Purpose: Verify that the FAPL related MDC API calls reject input
+ * errors gracefully.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/19/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_fapl_mdc_api_errs(void)
+{
+ const char * fcn_name = "check_fapl_mdc_api_errs()";
+ static char msg[128];
+ int i;
+ herr_t result;
+ hid_t fapl_id = -1;
+ H5AC2_cache_config_t default_config = H5AC2__DEFAULT_CACHE_CONFIG;
+ H5AC2_cache_config_t scratch;
+
+ TESTING("MDC/FAPL related API input errors");
+
+ pass2 = TRUE;
+
+
+ /* first test H5Pget_mdc_config().
+ */
+
+ scratch.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ if ( pass2 ) {
+
+ H5E_BEGIN_TRY {
+ result = H5Pget_mdc_config(-1, (H5AC_cache_config_t *)&scratch);
+ } H5E_END_TRY;
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pget_mdc_config() accepted invalid plist_id.";
+ }
+ }
+
+ /* Create a FAPL for test purposes, and verify that it contains the
+ * default MDC configuration.
+ */
+
+ if ( pass2 ) {
+
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+
+ if ( fapl_id < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pcreate(H5P_FILE_ACCESS) failed.\n";
+ }
+ }
+
+ scratch.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ if ( ( pass2 ) &&
+ ( ( H5Pget_mdc_config(fapl_id, (H5AC_cache_config_t *)&scratch) < 0) ||
+ ( !CACHE_CONFIGS_EQUAL(default_config, scratch, TRUE, TRUE) ) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "New FAPL has unexpected metadata cache config?!?!?.\n";
+ }
+
+ if ( pass2 ) {
+
+ H5E_BEGIN_TRY {
+ result = H5Pget_mdc_config(fapl_id, NULL);
+ } H5E_END_TRY;
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pget_mdc_config() accepted NULL config_ptr.";
+ }
+ }
+
+ /* one last test for H5Pget_mdc_config() */
+
+ scratch.version = -1; /* a convenient, invalid value */
+ if ( pass2 ) {
+
+ H5E_BEGIN_TRY {
+ result =
+ H5Pget_mdc_config(fapl_id, (H5AC_cache_config_t *)&scratch);
+ } H5E_END_TRY;
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pget_mdc_config() accepted bad config version.";
+ }
+ }
+
+
+ /* now test H5Pset_mdc_config()
+ */
+
+ scratch.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ if ( pass2 ) {
+
+ H5E_BEGIN_TRY {
+ result =
+ H5Pset_mdc_config(-1, (H5AC_cache_config_t *)&default_config);
+ } H5E_END_TRY;
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pset_mdc_config() accepted bad invalid plist_id.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ H5E_BEGIN_TRY {
+ result = H5Pset_mdc_config(fapl_id, NULL);
+ } H5E_END_TRY;
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Pset_mdc_config() accepted NULL config_ptr.";
+ }
+ }
+
+ i = 0;
+ while ( ( pass2 ) && ( i < NUM_INVALID_CONFIGS ) )
+ {
+ H5E_BEGIN_TRY {
+ result =
+ H5Pset_mdc_config(fapl_id,
+ (H5AC_cache_config_t *)&(invalid_configs[i]));
+ } H5E_END_TRY;
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5Pset_mdc_config() accepted invalid_configs[%d].", i);
+ failure_mssg2 = msg;
+ }
+ i++;
+ }
+
+ /* verify that none of the above calls to H5Pset_mdc_config() changed
+ * the configuration in the FAPL.
+ */
+ scratch.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ if ( ( pass2 ) &&
+ ( ( H5Pget_mdc_config(fapl_id, (H5AC_cache_config_t *)&scratch) < 0 )
+ ||
+ ( !CACHE_CONFIGS_EQUAL(default_config, scratch, TRUE, TRUE) ) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "FAPL metadata cache config changed???.\n";
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 )
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+
+} /* check_fapl_mdc_api_errs() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_file_mdc_api_errs()
+ *
+ * Purpose: Verify that the file related MDC API calls reject input
+ * errors gracefully.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/19/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_file_mdc_api_errs(void)
+{
+ const char * fcn_name = "check_file_mdc_api_errs()";
+ char filename[512];
+ static char msg[128];
+ hbool_t show_progress = FALSE;
+ int i;
+ herr_t result;
+ hid_t file_id = -1;
+ size_t max_size;
+ size_t min_clean_size;
+ size_t cur_size;
+ int cur_num_entries;
+ double hit_rate;
+ H5F_t * file_ptr = NULL;
+ H5AC2_cache_config_t default_config = H5AC2__DEFAULT_CACHE_CONFIG;
+ H5AC2_cache_config_t scratch;
+
+ TESTING("MDC/FILE related API input errors");
+
+ pass2 = TRUE;
+
+ /* Create a file for test purposes, and veify that its metadata cache
+ * set to the default MDC configuration.
+ */
+
+ /* setup the file name */
+ if ( pass2 ) {
+
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: calling h5_fixname().\n", fcn_name);
+ }
+
+ if ( h5_fixname(FILENAMES[1], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: calling H5Fcreate().\n", fcn_name);
+ }
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+
+ if ( file_id < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fcreate() failed.\n";
+ }
+ }
+#if 0 /* JRM */
+ /* get a pointer to the files internal data structure */
+ if ( pass2 ) {
+
+ file_ptr = (H5F_t *)H5I_object_verify(file_id, H5I_FILE);
+
+ if ( file_ptr == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Can't get file_ptr.\n";
+
+ }
+ }
+
+ /* since this is the test cache, must build the cache explicitly */
+ /* remove this when we start using the new cache */
+ if ( pass2 )
+ {
+ if ( H5AC2_create(file_ptr, &default_config) != SUCCEED ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Can't construct test cache.\n";
+
+ }
+ }
+#endif /* JRM */
+ validate_mdc_config(file_id, &default_config, TRUE, 1);
+
+
+ /* test H5Fget_mdc_config(). */
+
+ scratch.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ if ( pass2 ) {
+
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: testing H5Fget_mdc_config() 1.\n", fcn_name);
+ }
+
+ H5E_BEGIN_TRY {
+ result = H5Fget_mdc_config(-1, (H5AC_cache_config_t *)&scratch);
+ } H5E_END_TRY;
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fget_mdc_config() accepted invalid file_id.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: testing H5Fget_mdc_config() 2.\n", fcn_name);
+ }
+
+ H5E_BEGIN_TRY {
+ result = H5Fget_mdc_config(file_id, NULL);
+ } H5E_END_TRY;
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fget_mdc_config() accepted NULL config_ptr.";
+ }
+ }
+
+ scratch.version = -1; /* a convenient, invalid value */
+ if ( pass2 ) {
+
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: testing H5Fget_mdc_config() 3.\n", fcn_name);
+ }
+
+ H5E_BEGIN_TRY {
+ result = H5Fget_mdc_config(file_id,
+ (H5AC_cache_config_t *)&scratch);
+ } H5E_END_TRY;
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fget_mdc_config() accepted bad config version.";
+ }
+ }
+
+
+ /* test H5Fset_mdc_config() */
+
+ scratch.version = H5C2__CURR_AUTO_SIZE_CTL_VER;
+ if ( pass2 ) {
+
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: testing H5Fset_mdc_config() 1.\n", fcn_name);
+ }
+
+ H5E_BEGIN_TRY {
+ result = H5Fset_mdc_config(-1,
+ (H5AC_cache_config_t *)&default_config);
+ } H5E_END_TRY;
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fset_mdc_config() accepted bad invalid file_id.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: testing H5Fset_mdc_config() 2.\n", fcn_name);
+ }
+
+ H5E_BEGIN_TRY {
+ result = H5Fset_mdc_config(file_id, NULL);
+ } H5E_END_TRY;
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fset_mdc_config() accepted NULL config_ptr.";
+ }
+ }
+
+ i = 0;
+ while ( ( pass2 ) && ( i < NUM_INVALID_CONFIGS ) )
+ {
+ if ( show_progress ) {
+
+ HDfprintf(stdout,
+ "%s: testing H5Fset_mdc_config() with invalid config %d.\n",
+ fcn_name, i);
+ }
+
+ H5E_BEGIN_TRY {
+ result =
+ H5Fset_mdc_config(file_id,
+ (H5AC_cache_config_t *)&(invalid_configs[i]));
+ } H5E_END_TRY;
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ HDsnprintf(msg, (size_t)128,
+ "H5Fset_mdc_config() accepted invalid_configs[%d].", i);
+ failure_mssg2 = msg;
+ }
+ i++;
+ }
+
+ /* verify that none of the above calls to H5Fset_mdc_config() changed
+ * the configuration in the FAPL.
+ */
+ validate_mdc_config(file_id, &default_config, TRUE, 2);
+
+
+ /* test H5Fget_mdc_hit_rate() */
+ if ( pass2 ) {
+
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: testing H5Fget_mdc_hit_rate() 1.\n",
+ fcn_name);
+ }
+
+ H5E_BEGIN_TRY {
+ result = H5Fget_mdc_hit_rate(-1, &hit_rate);
+ } H5E_END_TRY;
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fget_mdc_hit_rate() accepted bad file_id.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: testing H5Fget_mdc_hit_rate() 2.\n",
+ fcn_name);
+ }
+
+ H5E_BEGIN_TRY {
+ result = H5Fget_mdc_hit_rate(file_id, NULL);
+ } H5E_END_TRY;
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fget_mdc_hit_rate() accepted NULL hit_rate_ptr.";
+ }
+ }
+
+
+ /* test H5Freset_mdc_hit_rate_stats() */
+ if ( pass2 ) {
+
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: testing H5Freset_mdc_hit_rate_stats().\n",
+ fcn_name);
+ }
+
+ H5E_BEGIN_TRY {
+ result = H5Freset_mdc_hit_rate_stats(-1);
+ } H5E_END_TRY;
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "H5Freset_mdc_hit_rate_stats() accepted bad file_id.";
+ }
+ }
+
+
+ /* test H5Fget_mdc_size() */
+ if ( pass2 ) {
+
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: testing H5Fget_mdc_size() 1.\n", fcn_name);
+ }
+
+ H5E_BEGIN_TRY {
+ result = H5Fget_mdc_size(-1, &max_size, &min_clean_size,
+ &cur_size, &cur_num_entries);
+ } H5E_END_TRY;
+
+ if ( result >= 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fget_mdc_size() accepted bad file_id.";
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: testing H5Fget_mdc_size() 2.\n", fcn_name);
+ }
+
+ if ( ( H5Fget_mdc_size(file_id, &max_size, NULL, NULL, NULL) < 0 ) ||
+ ( H5Fget_mdc_size(file_id, NULL, &min_clean_size,
+ NULL, NULL) < 0 ) ||
+ ( H5Fget_mdc_size(file_id, NULL, NULL, &cur_size, NULL) < 0 ) ||
+ ( H5Fget_mdc_size(file_id, NULL, NULL, NULL,
+ &cur_num_entries) < 0 ) ||
+ ( H5Fget_mdc_size(file_id, NULL, NULL, NULL, NULL) < 0 ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fget_mdc_size() failed to handle NULL params.";
+ }
+ }
+
+
+ /* close the file and delete it */
+ if ( pass2 ) {
+
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: cleaning up from tests.\n", fcn_name);
+ }
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fclose() failed.\n";
+
+ } else if ( HDremove(filename) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "HDremove() failed.\n";
+ }
+ }
+
+ if ( pass2 ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass2 )
+ HDfprintf(stdout, "%s: failure_mssg2 = \"%s\".\n",
+ fcn_name, failure_mssg2);
+
+} /* check_file_mdc_api_errs() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Run tests on the cache code contained in H5C2.c
+ *
+ * Return: Success:
+ *
+ * Failure:
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+int
+main(void)
+{
+ H5open();
+
+ skip_long_tests2 = FALSE;
+
+#ifdef NDEBUG
+ run_full_test2 = TRUE;
+#else /* NDEBUG */
+ run_full_test2 = FALSE;
+#endif /* NDEBUG */
+
+#if 1
+ check_fapl_mdc_api_calls();
+#endif
+#if 1
+ check_file_mdc_api_calls();
+#endif
+#if 0
+ /* this test can't be run until we start using H5C2 */
+ mdc_api_call_smoke_check();
+#endif
+#if 1
+ check_fapl_mdc_api_errs();
+#endif
+#if 1
+ check_file_mdc_api_errs();
+#endif
+
+ return(0);
+
+} /* main() */
diff --git a/test/cache2_common.c b/test/cache2_common.c
new file mode 100644
index 0000000..51f4f17
--- /dev/null
+++ b/test/cache2_common.c
@@ -0,0 +1,5199 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Programmer: John Mainzer
+ * 9/13/07
+ *
+ * This file contains common code for tests of the cache
+ * implemented in H5C2.c
+ */
+
+#include "h5test.h"
+#include "H5Iprivate.h"
+#include "H5ACprivate.h"
+#include "H5MFprivate.h"
+#include "H5MMprivate.h"
+#include "cache2_common.h"
+
+/* global variable declarations: */
+
+const char *FILENAME[] = {
+ "cache_test",
+ "cache_api_test",
+ NULL
+};
+
+hid_t saved_fid = -1; /* store the file id here between cache setup
+ * and takedown.
+ */
+
+hbool_t write_permitted2 = TRUE;
+hbool_t pass2 = TRUE; /* set to false on error */
+hbool_t skip_long_tests2 = TRUE;
+hbool_t run_full_test2 = TRUE;
+const char *failure_mssg2 = NULL;
+
+test_entry_t pico_entries2[NUM_PICO_ENTRIES];
+test_entry_t nano_entries2[NUM_NANO_ENTRIES];
+test_entry_t micro_entries2[NUM_MICRO_ENTRIES];
+test_entry_t tiny_entries2[NUM_TINY_ENTRIES];
+test_entry_t small_entries2[NUM_SMALL_ENTRIES];
+test_entry_t medium_entries2[NUM_MEDIUM_ENTRIES];
+test_entry_t large_entries2[NUM_LARGE_ENTRIES];
+test_entry_t huge_entries2[NUM_HUGE_ENTRIES];
+test_entry_t monster_entries2[NUM_MONSTER_ENTRIES];
+test_entry_t variable_entries2[NUM_VARIABLE_ENTRIES];
+
+test_entry_t * entries2[NUMBER_OF_ENTRY_TYPES] =
+{
+ pico_entries2,
+ nano_entries2,
+ micro_entries2,
+ tiny_entries2,
+ small_entries2,
+ medium_entries2,
+ large_entries2,
+ huge_entries2,
+ monster_entries2,
+ variable_entries2
+};
+
+const int32_t max_indices2[NUMBER_OF_ENTRY_TYPES] =
+{
+ NUM_PICO_ENTRIES - 1,
+ NUM_NANO_ENTRIES - 1,
+ NUM_MICRO_ENTRIES - 1,
+ NUM_TINY_ENTRIES - 1,
+ NUM_SMALL_ENTRIES - 1,
+ NUM_MEDIUM_ENTRIES - 1,
+ NUM_LARGE_ENTRIES - 1,
+ NUM_HUGE_ENTRIES - 1,
+ NUM_MONSTER_ENTRIES - 1,
+ NUM_VARIABLE_ENTRIES - 1
+};
+
+const size_t entry_sizes2[NUMBER_OF_ENTRY_TYPES] =
+{
+ PICO_ENTRY_SIZE,
+ NANO_ENTRY_SIZE,
+ MICRO_ENTRY_SIZE,
+ TINY_ENTRY_SIZE,
+ SMALL_ENTRY_SIZE,
+ MEDIUM_ENTRY_SIZE,
+ LARGE_ENTRY_SIZE,
+ HUGE_ENTRY_SIZE,
+ MONSTER_ENTRY_SIZE,
+ VARIABLE_ENTRY_SIZE
+};
+
+const haddr_t base_addrs2[NUMBER_OF_ENTRY_TYPES] =
+{
+ PICO_BASE_ADDR,
+ NANO_BASE_ADDR,
+ MICRO_BASE_ADDR,
+ TINY_BASE_ADDR,
+ SMALL_BASE_ADDR,
+ MEDIUM_BASE_ADDR,
+ LARGE_BASE_ADDR,
+ HUGE_BASE_ADDR,
+ MONSTER_BASE_ADDR,
+ VARIABLE_BASE_ADDR
+};
+
+const haddr_t alt_base_addrs2[NUMBER_OF_ENTRY_TYPES] =
+{
+ PICO_ALT_BASE_ADDR,
+ NANO_ALT_BASE_ADDR,
+ MICRO_ALT_BASE_ADDR,
+ TINY_ALT_BASE_ADDR,
+ SMALL_ALT_BASE_ADDR,
+ MEDIUM_ALT_BASE_ADDR,
+ LARGE_ALT_BASE_ADDR,
+ HUGE_ALT_BASE_ADDR,
+ MONSTER_ALT_BASE_ADDR,
+ VARIABLE_ALT_BASE_ADDR
+};
+
+const char * entry_type_names2[NUMBER_OF_ENTRY_TYPES] =
+{
+ "pico entries -- 1 B",
+ "nano entries -- 4 B",
+ "micro entries -- 16 B",
+ "tiny entries -- 64 B",
+ "small entries -- 256 B",
+ "medium entries -- 1 KB",
+ "large entries -- 4 KB",
+ "huge entries -- 16 KB",
+ "monster entries -- 64 KB",
+ "variable entries -- 1B - 10KB"
+};
+
+
+/* callback table declaration */
+
+const H5C2_class_t types2[NUMBER_OF_ENTRY_TYPES] =
+{
+ {
+ PICO_ENTRY_TYPE,
+ "pico_entry",
+ H5FD_MEM_DEFAULT,
+ (H5C2_deserialize_func_t)pico_deserialize,
+ (H5C2_image_len_func_t)pico_image_len,
+ (H5C2_serialize_func_t)pico_serialize,
+ (H5C2_free_icr_func_t)pico_free_icr,
+ (H5C2_clear_dirty_bits_func_t)pico_clear_dirty_bits
+ },
+ {
+ NANO_ENTRY_TYPE,
+ "nano_entry",
+ H5FD_MEM_DEFAULT,
+ (H5C2_deserialize_func_t)nano_deserialize,
+ (H5C2_image_len_func_t)nano_image_len,
+ (H5C2_serialize_func_t)nano_serialize,
+ (H5C2_free_icr_func_t)nano_free_icr,
+ (H5C2_clear_dirty_bits_func_t)nano_clear_dirty_bits
+ },
+ {
+ MICRO_ENTRY_TYPE,
+ "micro_entry",
+ H5FD_MEM_DEFAULT,
+ (H5C2_deserialize_func_t)micro_deserialize,
+ (H5C2_image_len_func_t)micro_image_len,
+ (H5C2_serialize_func_t)micro_serialize,
+ (H5C2_free_icr_func_t)micro_free_icr,
+ (H5C2_clear_dirty_bits_func_t)micro_clear_dirty_bits
+ },
+ {
+ TINY_ENTRY_TYPE,
+ "tiny_entry",
+ H5FD_MEM_DEFAULT,
+ (H5C2_deserialize_func_t)tiny_deserialize,
+ (H5C2_image_len_func_t)tiny_image_len,
+ (H5C2_serialize_func_t)tiny_serialize,
+ (H5C2_free_icr_func_t)tiny_free_icr,
+ (H5C2_clear_dirty_bits_func_t)tiny_clear_dirty_bits
+ },
+ {
+ SMALL_ENTRY_TYPE,
+ "small_entry",
+ H5FD_MEM_DEFAULT,
+ (H5C2_deserialize_func_t)small_deserialize,
+ (H5C2_image_len_func_t)small_image_len,
+ (H5C2_serialize_func_t)small_serialize,
+ (H5C2_free_icr_func_t)small_free_icr,
+ (H5C2_clear_dirty_bits_func_t)small_clear_dirty_bits
+ },
+ {
+ MEDIUM_ENTRY_TYPE,
+ "medium_entry",
+ H5FD_MEM_DEFAULT,
+ (H5C2_deserialize_func_t)medium_deserialize,
+ (H5C2_image_len_func_t)medium_image_len,
+ (H5C2_serialize_func_t)medium_serialize,
+ (H5C2_free_icr_func_t)medium_free_icr,
+ (H5C2_clear_dirty_bits_func_t)medium_clear_dirty_bits
+ },
+ {
+ LARGE_ENTRY_TYPE,
+ "large_entry",
+ H5FD_MEM_DEFAULT,
+ (H5C2_deserialize_func_t)large_deserialize,
+ (H5C2_image_len_func_t)large_image_len,
+ (H5C2_serialize_func_t)large_serialize,
+ (H5C2_free_icr_func_t)large_free_icr,
+ (H5C2_clear_dirty_bits_func_t)large_clear_dirty_bits
+ },
+ {
+ HUGE_ENTRY_TYPE,
+ "huge_entry",
+ H5FD_MEM_DEFAULT,
+ (H5C2_deserialize_func_t)huge_deserialize,
+ (H5C2_image_len_func_t)huge_image_len,
+ (H5C2_serialize_func_t)huge_serialize,
+ (H5C2_free_icr_func_t)huge_free_icr,
+ (H5C2_clear_dirty_bits_func_t)huge_clear_dirty_bits
+ },
+ {
+ MONSTER_ENTRY_TYPE,
+ "monster_entry",
+ H5FD_MEM_DEFAULT,
+ (H5C2_deserialize_func_t)monster_deserialize,
+ (H5C2_image_len_func_t)monster_image_len,
+ (H5C2_serialize_func_t)monster_serialize,
+ (H5C2_free_icr_func_t)monster_free_icr,
+ (H5C2_clear_dirty_bits_func_t)monster_clear_dirty_bits
+ },
+ {
+ VARIABLE_ENTRY_TYPE,
+ "variable_entry",
+ H5FD_MEM_DEFAULT,
+ (H5C2_deserialize_func_t)variable_deserialize,
+ (H5C2_image_len_func_t)variable_image_len,
+ (H5C2_serialize_func_t)variable_serialize,
+ (H5C2_free_icr_func_t)variable_free_icr,
+ (H5C2_clear_dirty_bits_func_t)variable_clear_dirty_bits
+ }
+};
+
+static herr_t clear_dirty_bits(haddr_t addr,
+ size_t len,
+ void * thing);
+
+static void * deserialize(haddr_t addr,
+ size_t len,
+ const void * image_ptr,
+ const void * udata_ptr,
+ hbool_t * dirty_ptr);
+
+static herr_t image_len(void *thing,
+ size_t *image_len_ptr);
+
+static herr_t serialize(haddr_t addr,
+ size_t len,
+ void * image_ptr,
+ void * thing,
+ unsigned * flags_ptr,
+ haddr_t * new_addr_ptr,
+ size_t * new_len_ptr,
+ void ** new_image_ptr_ptr);
+
+static herr_t free_icr(haddr_t addr,
+ size_t len,
+ void * thing);
+
+
+/* address translation funtions: */
+
+/*-------------------------------------------------------------------------
+ * Function: addr_to_type_and_index2
+ *
+ * Purpose: Given an address, compute the type and index of the
+ * associated entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+addr_to_type_and_index2(haddr_t addr,
+ int32_t * type_ptr,
+ int32_t * index_ptr)
+{
+ int i;
+ int32_t type;
+ int32_t idx;
+
+ HDassert( type_ptr );
+ HDassert( index_ptr );
+
+ /* we only have a small number of entry types, so just do a
+ * linear search. If NUMBER_OF_ENTRY_TYPES grows, we may want
+ * to do a binary search instead.
+ */
+ i = 1;
+ if ( addr >= PICO_ALT_BASE_ADDR ) {
+
+ while ( ( i < NUMBER_OF_ENTRY_TYPES ) &&
+ ( addr >= alt_base_addrs2[i] ) )
+ {
+ i++;
+ }
+
+ } else {
+
+ while ( ( i < NUMBER_OF_ENTRY_TYPES ) &&
+ ( addr >= base_addrs2[i] ) )
+ {
+ i++;
+ }
+ }
+
+ type = i - 1;
+
+ HDassert( ( type >= 0 ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+
+ if ( addr >= PICO_ALT_BASE_ADDR ) {
+
+ idx = (int32_t)((addr - alt_base_addrs2[type]) / entry_sizes2[type]);
+ HDassert( ( idx >= 0 ) && ( idx <= max_indices2[type] ) );
+ HDassert( !((entries2[type])[idx].at_main_addr) );
+ HDassert( addr == (entries2[type])[idx].alt_addr );
+
+ } else {
+
+ idx = (int32_t)((addr - base_addrs2[type]) / entry_sizes2[type]);
+ HDassert( ( idx >= 0 ) && ( idx <= max_indices2[type] ) );
+ HDassert( (entries2[type])[idx].at_main_addr );
+ HDassert( addr == (entries2[type])[idx].main_addr );
+ }
+
+ HDassert( addr == (entries2[type])[idx].addr );
+
+ *type_ptr = type;
+ *index_ptr = idx;
+
+ return;
+
+} /* addr_to_type_and_index2() */
+
+
+#if 0 /* This function has never been used, but we may want it
+ * some time. Lets keep it for now.
+ */
+/*-------------------------------------------------------------------------
+ * Function: type_and_index_to_addr2
+ *
+ * Purpose: Given a type and index of an entry, compute the associated
+ * addr and return that value.
+ *
+ * Return: computed addr
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+haddr_t
+type_and_index_to_addr2(int32_t type,
+ int32_t idx)
+{
+ haddr_t addr;
+
+ HDassert( ( type >= 0 ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( idx >= 0 ) && ( idx <= max_indices2[type] ) );
+
+ addr = base_addrs2[type] + (((haddr_t)idx) * entry_sizes2[type]);
+
+ HDassert( addr == (entries2[type])[idx].addr );
+
+ if ( (entries2[type])[idx].at_main_addr ) {
+
+ HDassert( addr == (entries2[type])[idx].main_addr );
+
+ } else {
+
+ HDassert( addr == (entries2[type])[idx].alt_addr );
+ }
+
+ return(addr);
+
+} /* type_and_index_to_addr2() */
+
+#endif
+
+
+/* Call back functions: */
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: check_if_write_permitted2
+ *
+ * Purpose: Determine if a write is permitted under the current
+ * circumstances, and set *write_permitted_ptr accordingly.
+ * As a general rule it is, but when we are running in parallel
+ * mode with collective I/O, we must ensure that a read cannot
+ * cause a write.
+ *
+ * In the event of failure, the value of *write_permitted_ptr
+ * is undefined.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/15/04
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+check_write_permitted2(const H5F_t UNUSED * f,
+ hid_t UNUSED dxpl_id,
+ hbool_t * write_permitted_ptr)
+{
+
+ HDassert( write_permitted_ptr );
+ *write_permitted_ptr = write_permitted2;
+
+ return(SUCCEED);
+
+} /* check_write_permitted2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: clear_dirty_bits & friends
+ *
+ * Purpose: Clear the dirty bits. The helper functions verify that the
+ * correct version of clear_dirty_gits is being called, and
+ * then call clear_dirty_bits() proper.
+ *
+ * Return: SUCCEED
+ *
+ * Programmer: John Mainzer
+ * 9/20/07
+ *
+ * Modifications:
+ *
+ * None
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+clear_dirty_bits(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ test_entry_t * entry_ptr;
+ test_entry_t * base_addr;
+
+ HDassert( thing );
+
+ entry_ptr = (test_entry_t *)thing;
+ base_addr = entries2[entry_ptr->type];
+
+ HDassert( entry_ptr->addr == addr );
+ HDassert( entry_ptr->size == len );
+
+ HDassert( entry_ptr->index >= 0 );
+ HDassert( entry_ptr->index <= max_indices2[entry_ptr->type] );
+ HDassert( entry_ptr == &(base_addr[entry_ptr->index]) );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( entry_ptr->header.addr == entry_ptr->addr );
+ HDassert( entry_ptr->header.size == entry_ptr->size );
+ HDassert( ( entry_ptr->type == VARIABLE_ENTRY_TYPE ) ||
+ ( entry_ptr->size == entry_sizes2[entry_ptr->type] ) );
+
+ entry_ptr->is_dirty = FALSE;
+
+ entry_ptr->cleared = TRUE;
+
+ return(SUCCEED);
+
+} /* clear_dirty_bits() */
+
+herr_t
+pico_clear_dirty_bits(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == PICO_ENTRY_TYPE );
+ return(clear_dirty_bits(addr, len, thing));
+}
+
+herr_t
+nano_clear_dirty_bits(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == NANO_ENTRY_TYPE );
+ return(clear_dirty_bits(addr, len, thing));
+}
+
+herr_t
+micro_clear_dirty_bits(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == MICRO_ENTRY_TYPE );
+ return(clear_dirty_bits(addr, len, thing));
+}
+
+herr_t
+tiny_clear_dirty_bits(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == TINY_ENTRY_TYPE );
+ return(clear_dirty_bits(addr, len, thing));
+}
+
+herr_t
+small_clear_dirty_bits(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == SMALL_ENTRY_TYPE );
+ return(clear_dirty_bits(addr, len, thing));
+}
+
+herr_t
+medium_clear_dirty_bits(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == MEDIUM_ENTRY_TYPE );
+ return(clear_dirty_bits(addr, len, thing));
+}
+
+herr_t
+large_clear_dirty_bits(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == LARGE_ENTRY_TYPE );
+ return(clear_dirty_bits(addr, len, thing));
+}
+
+herr_t
+huge_clear_dirty_bits(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == HUGE_ENTRY_TYPE );
+ return(clear_dirty_bits(addr, len, thing));
+}
+
+herr_t
+monster_clear_dirty_bits(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == MONSTER_ENTRY_TYPE );
+ return(clear_dirty_bits(addr, len, thing));
+}
+
+herr_t
+variable_clear_dirty_bits(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == VARIABLE_ENTRY_TYPE );
+ return(clear_dirty_bits(addr, len, thing));
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: deserialize & friends
+ *
+ * Purpose: deserialize the entry. The helper functions verify that the
+ * correct version of deserialize is being called, and then call
+ * deserialize proper.
+ *
+ * Return: void * (pointer to the in core representation of the entry)
+ *
+ * Programmer: John Mainzer
+ * 9/20/07
+ *
+ * Modifications:
+ *
+ * None
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void *
+deserialize(haddr_t addr,
+ size_t len,
+ const void * image_ptr,
+ const UNUSED void * udata_ptr,
+ hbool_t * dirty_ptr)
+{
+ int32_t type;
+ int32_t idx;
+ test_entry_t * entry_ptr;
+ test_entry_t * base_addr;
+
+ addr_to_type_and_index2(addr, &type, &idx);
+
+ base_addr = entries2[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->type >= 0 );
+ HDassert( entry_ptr->type < NUMBER_OF_ENTRY_TYPES );
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->index >= 0 );
+ HDassert( entry_ptr->index <= max_indices2[type] );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( entry_ptr->addr == addr );
+ HDassert( entry_ptr->size == len );
+ HDassert( ( entry_ptr->type == VARIABLE_ENTRY_TYPE ) ||
+ ( entry_ptr->size == entry_sizes2[type] ) );
+ HDassert( dirty_ptr != NULL );
+
+ /* for now *dirty_ptr will always be FALSE */
+ *dirty_ptr = FALSE;
+
+
+ /* verify that the image contains the expected data. */
+ HDassert( image_ptr != NULL );
+ if ( ( ( entry_ptr->at_main_addr )
+ &&
+ ( entry_ptr->written_to_main_addr )
+ )
+ ||
+ ( ( ! ( entry_ptr->at_main_addr ) )
+ &&
+ ( entry_ptr->written_to_alt_addr )
+ )
+ ) {
+
+ if ( ( type == PICO_ENTRY_TYPE ) || ( type == VARIABLE_ENTRY_TYPE ) ) {
+
+ if ( (*((const char *)image_ptr)) != (char)(idx & 0xFF) ) {
+
+ HDfprintf(stdout, "type = %d, idx = %d, addr = 0x%lx.\n",
+ type, idx, (long)addr);
+ HDfprintf(stdout, "*image_ptr = 0x%x\n",
+ (int)(*((const char *)image_ptr)));
+ HDfprintf(stdout, "expected *image_ptr = 0x%x\n",
+ (int)(idx & 0xFF));
+ }
+ HDassert( (*((const char *)image_ptr)) == (char)(idx & 0xFF) );
+
+ } else {
+
+ if ( (*(((const char *)image_ptr) + 2)) != (char)(idx & 0xFF) ) {
+
+ HDfprintf(stdout, "type = %d, idx = %d, addr = 0x%lx.\n",
+ type, idx, (long)addr);
+ HDfprintf(stdout, "*image_ptr = 0x%x 0x%x 0x%x\n",
+ (int)(*((const char *)image_ptr)),
+ (int)(*(((const char *)image_ptr) + 1)),
+ (int)(*(((const char *)image_ptr) + 2)));
+ HDfprintf(stdout, "expected *image_ptr = 0x%x\n",
+ (int)(idx & 0xFF),
+ (int)((idx & 0xFF00)>>8),
+ (int)(idx & 0xFF));
+ }
+ HDassert( (*((const char *)image_ptr)) == (char)(type & 0xFF) );
+ HDassert( (*(((const char *)image_ptr) + 1)) ==
+ (char)((idx & 0xFF00)>>8) );
+ HDassert( (*(((const char *)image_ptr) + 2)) ==
+ (char)(idx & 0xFF) );
+
+ }
+ }
+
+ entry_ptr->deserialized = TRUE;
+
+ entry_ptr->header.is_dirty = FALSE;
+ entry_ptr->is_dirty = FALSE;
+
+ (entry_ptr->deserializes)++;
+
+ return((void *)entry_ptr);
+
+} /* deserialize() */
+
+void *
+pico_deserialize(haddr_t addr,
+ size_t len,
+ const void * image_ptr,
+ const UNUSED void * udata_ptr,
+ hbool_t * dirty_ptr)
+{
+ return deserialize(addr, len, image_ptr, udata_ptr, dirty_ptr);
+}
+
+void *
+nano_deserialize(haddr_t addr,
+ size_t len,
+ const void * image_ptr,
+ const UNUSED void * udata_ptr,
+ hbool_t * dirty_ptr)
+{
+ return deserialize(addr, len, image_ptr, udata_ptr, dirty_ptr);
+}
+
+void *
+micro_deserialize(haddr_t addr,
+ size_t len,
+ const void * image_ptr,
+ const UNUSED void * udata_ptr,
+ hbool_t * dirty_ptr)
+{
+ return deserialize(addr, len, image_ptr, udata_ptr, dirty_ptr);
+}
+
+void *
+tiny_deserialize(haddr_t addr,
+ size_t len,
+ const void * image_ptr,
+ const UNUSED void * udata_ptr,
+ hbool_t * dirty_ptr)
+{
+ return deserialize(addr, len, image_ptr, udata_ptr, dirty_ptr);
+}
+
+void *
+small_deserialize(haddr_t addr,
+ size_t len,
+ const void * image_ptr,
+ const UNUSED void * udata_ptr,
+ hbool_t * dirty_ptr)
+{
+ return deserialize(addr, len, image_ptr, udata_ptr, dirty_ptr);
+}
+
+void *
+medium_deserialize(haddr_t addr,
+ size_t len,
+ const void * image_ptr,
+ const UNUSED void * udata_ptr,
+ hbool_t * dirty_ptr)
+{
+ return deserialize(addr, len, image_ptr, udata_ptr, dirty_ptr);
+}
+
+void *
+large_deserialize(haddr_t addr,
+ size_t len,
+ const void * image_ptr,
+ const UNUSED void * udata_ptr,
+ hbool_t * dirty_ptr)
+{
+ return deserialize(addr, len, image_ptr, udata_ptr, dirty_ptr);
+}
+
+void *
+huge_deserialize(haddr_t addr,
+ size_t len,
+ const void * image_ptr,
+ const UNUSED void * udata_ptr,
+ hbool_t * dirty_ptr)
+{
+ return deserialize(addr, len, image_ptr, udata_ptr, dirty_ptr);
+}
+
+void *
+monster_deserialize(haddr_t addr,
+ size_t len,
+ const void * image_ptr,
+ const UNUSED void * udata_ptr,
+ hbool_t * dirty_ptr)
+{
+ return deserialize(addr, len, image_ptr, udata_ptr, dirty_ptr);
+}
+
+void *
+variable_deserialize(haddr_t addr,
+ size_t len,
+ const void * image_ptr,
+ const UNUSED void * udata_ptr,
+ hbool_t * dirty_ptr)
+{
+ return deserialize(addr, len, image_ptr, udata_ptr, dirty_ptr);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: image_len & friends
+ *
+ * Purpose: Return the real (and possibly reduced) length of the image.
+ * The helper functions verify that the correct version of
+ * deserialize is being called, and then call deserialize
+ * proper.
+ *
+ * Return: SUCCEED
+ *
+ * Programmer: John Mainzer
+ * 9/19/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+image_len(void *thing,
+ size_t *image_len_ptr)
+{
+ int32_t type;
+ int32_t idx;
+ test_entry_t * entry_ptr;
+ test_entry_t * base_addr;
+
+ HDassert( thing );
+ HDassert( image_len_ptr );
+
+ entry_ptr = (test_entry_t *)thing;
+
+ HDassert( entry_ptr->self == entry_ptr );
+
+ type = entry_ptr->type;
+ idx = entry_ptr->index;
+
+ HDassert( ( type >= 0 ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( idx >= 0 ) && ( idx <= max_indices2[type] ) );
+
+ base_addr = entries2[type];
+ HDassert( entry_ptr == &(base_addr[idx]) );
+
+ if ( type != VARIABLE_ENTRY_TYPE ) {
+
+ HDassert( entry_ptr->size == entry_sizes2[type] );
+
+ } else {
+
+ HDassert( entry_ptr->size <= entry_sizes2[type] );
+ HDassert( entry_ptr->size > 0 );
+ }
+
+ *image_len_ptr = entry_ptr->size;
+
+ return(SUCCEED);
+
+} /* image_len() */
+
+
+herr_t
+pico_image_len(void *thing,
+ size_t *image_len_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == PICO_ENTRY_TYPE );
+ return(image_len(thing, image_len_ptr));
+}
+
+herr_t
+nano_image_len(void *thing,
+ size_t *image_len_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == NANO_ENTRY_TYPE );
+ return(image_len(thing, image_len_ptr));
+}
+
+herr_t
+micro_image_len(void *thing,
+ size_t *image_len_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == MICRO_ENTRY_TYPE );
+ return(image_len(thing, image_len_ptr));
+}
+
+herr_t
+tiny_image_len(void *thing,
+ size_t *image_len_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == TINY_ENTRY_TYPE );
+ return(image_len(thing, image_len_ptr));
+}
+
+
+herr_t
+small_image_len(void *thing,
+ size_t *image_len_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == SMALL_ENTRY_TYPE );
+ return(image_len(thing, image_len_ptr));
+}
+
+herr_t
+medium_image_len(void *thing,
+ size_t *image_len_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == MEDIUM_ENTRY_TYPE );
+ return(image_len(thing, image_len_ptr));
+}
+
+herr_t
+large_image_len(void *thing,
+ size_t *image_len_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == LARGE_ENTRY_TYPE );
+ return(image_len(thing, image_len_ptr));
+}
+
+herr_t
+huge_image_len(void *thing,
+ size_t *image_len_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == HUGE_ENTRY_TYPE );
+ return(image_len(thing, image_len_ptr));
+}
+
+herr_t
+monster_image_len(void *thing,
+ size_t *image_len_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == MONSTER_ENTRY_TYPE );
+ return(image_len(thing, image_len_ptr));
+}
+
+herr_t
+variable_image_len(void *thing,
+ size_t *image_len_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == VARIABLE_ENTRY_TYPE );
+ return(image_len(thing, image_len_ptr));
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: serialize & friends
+ *
+ * Purpose: Serialize the supplied entry. For now this consistes of
+ * loading the type and index of the entry into the first
+ * three bytes of the image (if it is long enough -- if not
+ * just load the low order byte of the index into the first
+ * byte of the image).
+ *
+ * The helper functions verify that the correct version of
+ * serialize is being called, and then call serialize
+ * proper.
+ *
+ * Return: SUCCEED if successful, FAIL otherwise.
+ *
+ * Programmer: John Mainzer
+ * 9/19/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+serialize(haddr_t addr,
+ size_t len,
+ void * image_ptr,
+ void * thing,
+ unsigned * flags_ptr,
+ haddr_t * new_addr_ptr,
+ size_t * new_len_ptr,
+ void ** new_image_ptr_ptr)
+{
+ const char * fcn_name = "serialize()";
+ hbool_t verbose = FALSE;
+ herr_t ret_val = SUCCEED;
+ int32_t i;
+ int32_t type;
+ int32_t idx;
+ test_entry_t * entry_ptr;
+ test_entry_t * base_addr;
+
+ if ( verbose ) {
+
+ HDfprintf(stdout, "%s: addr = 0x%lx, len = %ld.\n", fcn_name,
+ (long)addr, (long)len);
+ }
+
+ HDassert( image_ptr );
+ HDassert( thing );
+ HDassert( flags_ptr );
+
+ *flags_ptr = 0;
+
+ HDassert( new_addr_ptr );
+ HDassert( new_len_ptr );
+ HDassert( new_image_ptr_ptr );
+
+ entry_ptr = (test_entry_t *)thing;
+
+ HDassert( entry_ptr->self == entry_ptr );
+ HDassert( entry_ptr->addr == addr );
+ HDassert( entry_ptr->size == len );
+
+ /* shouldn't serialize the entry unless it is dirty */
+ HDassert( entry_ptr->is_dirty );
+
+ type = entry_ptr->type;
+ idx = entry_ptr->index;
+
+ HDassert( ( type >= 0 ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( idx >= 0 ) && ( idx <= max_indices2[type] ) );
+
+ base_addr = entries2[type];
+
+ HDassert( entry_ptr == &(base_addr[idx]) );
+
+ HDassert( entry_ptr->num_flush_ops >= 0 );
+ HDassert( entry_ptr->num_flush_ops < MAX_FLUSH_OPS );
+
+ if ( entry_ptr->num_flush_ops > 0 ) {
+
+ for ( i = 0; i < entry_ptr->num_flush_ops; i++ )
+ {
+ execute_flush_op2(entry_ptr->cache_ptr,
+ entry_ptr,
+ &((entry_ptr->flush_ops)[i]),
+ flags_ptr);
+ }
+ entry_ptr->num_flush_ops = 0;
+ entry_ptr->flush_op_self_resize_in_progress = FALSE;
+
+ if ( ( pass2 ) &&
+ ( ((*flags_ptr) & H5C2__SERIALIZE_RESIZED_FLAG) != 0 ) ) {
+
+ /* re-allocate *image_ptr, and place the new pointer in
+ * *new_image_ptr_ptr.
+ */
+ image_ptr = H5MM_xfree(image_ptr);
+
+ if ( image_ptr != NULL ) {
+
+ ret_val = FAIL;
+ pass2 = FALSE;
+ failure_mssg2 = "couldn't free image_ptr.";
+ }
+
+ if ( pass2 ) {
+
+ HDassert( entry_ptr->type == VARIABLE_ENTRY_TYPE );
+ HDassert( entry_ptr->size > 0 );
+ HDassert( entry_ptr->size <= VARIABLE_ENTRY_SIZE );
+
+ image_ptr = H5MM_malloc((size_t)(entry_ptr->size));
+
+ if ( image_ptr == NULL ) {
+
+ ret_val = FAIL;
+ pass2 = FALSE;
+ failure_mssg2 = "couldn't allocate new image.";
+
+ } else {
+
+ *new_image_ptr_ptr = image_ptr;
+ *new_len_ptr = entry_ptr->size;
+
+ }
+ }
+ }
+
+ if ( ((*flags_ptr) & H5C2__SERIALIZE_RENAMED_FLAG) != 0 ) {
+
+ HDassert( ((*flags_ptr) | H5C2__SERIALIZE_RESIZED_FLAG) != 0 );
+
+ /* place the new address in *new_addr_ptr */
+
+ *new_addr_ptr = entry_ptr->addr;
+ }
+ }
+
+ if ( ( type == PICO_ENTRY_TYPE ) || ( type == VARIABLE_ENTRY_TYPE ) ) {
+
+ HDassert( entry_ptr->size >= PICO_ENTRY_SIZE );
+ *((char *)image_ptr) = (char)((entry_ptr->index) & 0xFF);
+
+ } else {
+
+ HDassert(entry_ptr->size >= NANO_ENTRY_SIZE );
+ *((char *)image_ptr) = (char)((entry_ptr->type) & 0xFF);
+ *(((char *)image_ptr) + 1) =
+ (char)(((entry_ptr->index) & 0xFF00) >> 8);
+ *(((char *)image_ptr) + 2) = (char)((entry_ptr->index) & 0xFF);
+
+ }
+
+ /* We no longer do the actual write through an callback -- this is
+ * as close to that callback as we will get. Hence mark the entry
+ * clean here. If all goes well, it will be flushed shortly.
+ */
+
+ entry_ptr->is_dirty = FALSE;
+
+ /* since the entry is about to be written to disk, we can mark it
+ * as initialized.
+ */
+ if ( entry_ptr->at_main_addr ) {
+ entry_ptr->written_to_main_addr = TRUE;
+ } else {
+ entry_ptr->written_to_alt_addr = TRUE;
+ }
+
+ /* do book keeping */
+ (entry_ptr->serializes)++;
+ entry_ptr->serialized = TRUE;
+
+ return(SUCCEED);
+
+} /* serialize() */
+
+herr_t
+pico_serialize(haddr_t addr,
+ size_t len,
+ void * image_ptr,
+ void * thing,
+ unsigned * flags_ptr,
+ haddr_t * new_addr_ptr,
+ size_t * new_len_ptr,
+ void ** new_image_ptr_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == PICO_ENTRY_TYPE );
+ return(serialize(addr, len, image_ptr, thing, flags_ptr,
+ new_addr_ptr, new_len_ptr, new_image_ptr_ptr));
+}
+
+herr_t
+nano_serialize(haddr_t addr,
+ size_t len,
+ void * image_ptr,
+ void * thing,
+ unsigned * flags_ptr,
+ haddr_t * new_addr_ptr,
+ size_t * new_len_ptr,
+ void ** new_image_ptr_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == NANO_ENTRY_TYPE );
+ return(serialize(addr, len, image_ptr, thing, flags_ptr,
+ new_addr_ptr, new_len_ptr, new_image_ptr_ptr));
+}
+
+herr_t
+micro_serialize(haddr_t addr,
+ size_t len,
+ void * image_ptr,
+ void * thing,
+ unsigned * flags_ptr,
+ haddr_t * new_addr_ptr,
+ size_t * new_len_ptr,
+ void ** new_image_ptr_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == MICRO_ENTRY_TYPE );
+ return(serialize(addr, len, image_ptr, thing, flags_ptr,
+ new_addr_ptr, new_len_ptr, new_image_ptr_ptr));
+}
+
+herr_t
+tiny_serialize(haddr_t addr,
+ size_t len,
+ void * image_ptr,
+ void * thing,
+ unsigned * flags_ptr,
+ haddr_t * new_addr_ptr,
+ size_t * new_len_ptr,
+ void ** new_image_ptr_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == TINY_ENTRY_TYPE );
+ return(serialize(addr, len, image_ptr, thing, flags_ptr,
+ new_addr_ptr, new_len_ptr, new_image_ptr_ptr));
+}
+
+herr_t
+small_serialize(haddr_t addr,
+ size_t len,
+ void * image_ptr,
+ void * thing,
+ unsigned * flags_ptr,
+ haddr_t * new_addr_ptr,
+ size_t * new_len_ptr,
+ void ** new_image_ptr_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == SMALL_ENTRY_TYPE );
+ return(serialize(addr, len, image_ptr, thing, flags_ptr,
+ new_addr_ptr, new_len_ptr, new_image_ptr_ptr));
+}
+
+herr_t
+medium_serialize(haddr_t addr,
+ size_t len,
+ void * image_ptr,
+ void * thing,
+ unsigned * flags_ptr,
+ haddr_t * new_addr_ptr,
+ size_t * new_len_ptr,
+ void ** new_image_ptr_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == MEDIUM_ENTRY_TYPE );
+ return(serialize(addr, len, image_ptr, thing, flags_ptr,
+ new_addr_ptr, new_len_ptr, new_image_ptr_ptr));
+}
+
+herr_t
+large_serialize(haddr_t addr,
+ size_t len,
+ void * image_ptr,
+ void * thing,
+ unsigned * flags_ptr,
+ haddr_t * new_addr_ptr,
+ size_t * new_len_ptr,
+ void ** new_image_ptr_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == LARGE_ENTRY_TYPE );
+ return(serialize(addr, len, image_ptr, thing, flags_ptr,
+ new_addr_ptr, new_len_ptr, new_image_ptr_ptr));
+}
+
+herr_t
+huge_serialize(haddr_t addr,
+ size_t len,
+ void * image_ptr,
+ void * thing,
+ unsigned * flags_ptr,
+ haddr_t * new_addr_ptr,
+ size_t * new_len_ptr,
+ void ** new_image_ptr_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == HUGE_ENTRY_TYPE );
+ return(serialize(addr, len, image_ptr, thing, flags_ptr,
+ new_addr_ptr, new_len_ptr, new_image_ptr_ptr));
+}
+
+herr_t
+monster_serialize(haddr_t addr,
+ size_t len,
+ void * image_ptr,
+ void * thing,
+ unsigned * flags_ptr,
+ haddr_t * new_addr_ptr,
+ size_t * new_len_ptr,
+ void ** new_image_ptr_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == MONSTER_ENTRY_TYPE );
+ return(serialize(addr, len, image_ptr, thing, flags_ptr,
+ new_addr_ptr, new_len_ptr, new_image_ptr_ptr));
+}
+
+herr_t
+variable_serialize(haddr_t addr,
+ size_t len,
+ void * image_ptr,
+ void * thing,
+ unsigned * flags_ptr,
+ haddr_t * new_addr_ptr,
+ size_t * new_len_ptr,
+ void ** new_image_ptr_ptr)
+{
+ HDassert( ((test_entry_t *)thing)->type == VARIABLE_ENTRY_TYPE );
+ return(serialize(addr, len, image_ptr, thing, flags_ptr,
+ new_addr_ptr, new_len_ptr, new_image_ptr_ptr));
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: free_icr & friends
+ *
+ * Purpose: Nominally, this callback is supposed to free the
+ * in core representation of the entry.
+ *
+ * In the context of this test bed, we use it to do
+ * do all the processing we used to do on a destroy.
+ * In particular, we use it to release all the pins
+ * that this entry may have on other entries.
+ *
+ * The helper functions verify that the correct version of
+ * serialize is being called, and then call free_icr
+ * proper.
+ *
+ * Return: SUCCEED
+ *
+ * Programmer: John Mainzer
+ * 9/19/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+free_icr(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ int i;
+ test_entry_t * entry_ptr;
+ test_entry_t * base_addr;
+ test_entry_t * pinned_entry_ptr;
+ test_entry_t * pinned_base_addr;
+
+ HDassert( thing );
+
+ entry_ptr = (test_entry_t *)thing;
+ base_addr = entries2[entry_ptr->type];
+
+ HDassert( entry_ptr->addr == addr );
+ HDassert( entry_ptr->size == len );
+
+ HDassert( entry_ptr->index >= 0 );
+ HDassert( entry_ptr->index <= max_indices2[entry_ptr->type] );
+ HDassert( entry_ptr == &(base_addr[entry_ptr->index]) );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( entry_ptr->cache_ptr != NULL );
+ HDassert( entry_ptr->cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( ( entry_ptr->header.destroy_in_progress ) ||
+ ( entry_ptr->header.addr == entry_ptr->addr ) );
+ HDassert( entry_ptr->header.size == entry_ptr->size );
+ HDassert( ( entry_ptr->type == VARIABLE_ENTRY_TYPE ) ||
+ ( entry_ptr->size == entry_sizes2[entry_ptr->type] ) );
+
+ HDassert( !(entry_ptr->is_dirty) );
+ HDassert( !(entry_ptr->header.is_dirty) );
+
+ if ( entry_ptr->num_pins > 0 ) {
+
+ for ( i = 0; i < entry_ptr->num_pins; i++ )
+ {
+ pinned_base_addr = entries2[entry_ptr->pin_type[i]];
+ pinned_entry_ptr = &(pinned_base_addr[entry_ptr->pin_idx[i]]);
+
+ HDassert( 0 <= pinned_entry_ptr->type );
+ HDassert( pinned_entry_ptr->type < NUMBER_OF_ENTRY_TYPES );
+ HDassert( pinned_entry_ptr->type == entry_ptr->pin_type[i] );
+ HDassert( pinned_entry_ptr->index >= 0 );
+ HDassert( pinned_entry_ptr->index <=
+ max_indices2[pinned_entry_ptr->type] );
+ HDassert( pinned_entry_ptr->index == entry_ptr->pin_idx[i] );
+ HDassert( pinned_entry_ptr == pinned_entry_ptr->self );
+ HDassert( pinned_entry_ptr->header.is_pinned );
+ HDassert( pinned_entry_ptr->is_pinned );
+ HDassert( pinned_entry_ptr->pinning_ref_count > 0 );
+
+ pinned_entry_ptr->pinning_ref_count--;
+
+ if ( pinned_entry_ptr->pinning_ref_count <= 0 ) {
+
+ unpin_entry2(pinned_entry_ptr->cache_ptr,
+ pinned_entry_ptr->type,
+ pinned_entry_ptr->index);
+ }
+
+ entry_ptr->pin_type[i] = -1;
+ entry_ptr->pin_idx[i] = -1;
+ }
+ entry_ptr->num_pins = 0;
+ }
+
+ entry_ptr->destroyed = TRUE;
+ entry_ptr->cache_ptr = NULL;
+
+ return(SUCCEED);
+
+} /* free_icr() */
+
+herr_t
+pico_free_icr(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == PICO_ENTRY_TYPE );
+ return(free_icr(addr, len, thing));
+}
+
+herr_t
+nano_free_icr(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == NANO_ENTRY_TYPE );
+ return(free_icr(addr, len, thing));
+}
+
+herr_t
+micro_free_icr(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == MICRO_ENTRY_TYPE );
+ return(free_icr(addr, len, thing));
+}
+
+herr_t
+tiny_free_icr(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == TINY_ENTRY_TYPE );
+ return(free_icr(addr, len, thing));
+}
+
+herr_t
+small_free_icr(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == SMALL_ENTRY_TYPE );
+ return(free_icr(addr, len, thing));
+}
+
+herr_t
+medium_free_icr(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == MEDIUM_ENTRY_TYPE );
+ return(free_icr(addr, len, thing));
+}
+
+herr_t
+large_free_icr(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == LARGE_ENTRY_TYPE );
+ return(free_icr(addr, len, thing));
+}
+
+herr_t
+huge_free_icr(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == HUGE_ENTRY_TYPE );
+ return(free_icr(addr, len, thing));
+}
+
+herr_t
+monster_free_icr(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == MONSTER_ENTRY_TYPE );
+ return(free_icr(addr, len, thing));
+}
+
+herr_t
+variable_free_icr(haddr_t addr,
+ size_t len,
+ void * thing)
+{
+ HDassert( ((test_entry_t *)thing)->type == VARIABLE_ENTRY_TYPE );
+ return(free_icr(addr, len, thing));
+}
+
+
+/**************************************************************************/
+/**************************************************************************/
+/************************** test utility functions: ***********************/
+/**************************************************************************/
+/**************************************************************************/
+
+/*-------------------------------------------------------------------------
+ * Function: add_flush_op2
+ *
+ * Purpose: Do noting if pass2 is FALSE on entry.
+ *
+ * Otherwise, add the specified flush operation to the
+ * target instance of test_entry_t.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/1/06
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+add_flush_op2(int target_type,
+ int target_idx,
+ int op_code,
+ int type,
+ int idx,
+ hbool_t flag,
+ size_t new_size)
+{
+ int i;
+ test_entry_t * target_base_addr;
+ test_entry_t * target_entry_ptr;
+
+ HDassert( ( 0 <= target_type ) && ( target_type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= target_idx ) &&
+ ( target_idx <= max_indices2[target_type] ) );
+ HDassert( ( 0 <= op_code ) && ( op_code <= FLUSH_OP__MAX_OP ) );
+ HDassert( ( op_code != FLUSH_OP__RESIZE ) ||
+ ( type == VARIABLE_ENTRY_TYPE ) );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices2[type] ) );
+ HDassert( ( flag == TRUE ) || ( flag == FALSE ) );
+ HDassert( new_size <= VARIABLE_ENTRY_SIZE );
+
+ if ( pass2 ) {
+
+ target_base_addr = entries2[target_type];
+ target_entry_ptr = &(target_base_addr[target_idx]);
+
+ HDassert( target_entry_ptr->index == target_idx );
+ HDassert( target_entry_ptr->type == target_type );
+ HDassert( target_entry_ptr == target_entry_ptr->self );
+ HDassert( target_entry_ptr->num_flush_ops < MAX_FLUSH_OPS );
+
+ i = (target_entry_ptr->num_flush_ops)++;
+ (target_entry_ptr->flush_ops)[i].op_code = op_code;
+ (target_entry_ptr->flush_ops)[i].type = type;
+ (target_entry_ptr->flush_ops)[i].idx = idx;
+ (target_entry_ptr->flush_ops)[i].flag = flag;
+ (target_entry_ptr->flush_ops)[i].size = new_size;
+
+ }
+
+ return;
+
+} /* add_flush_op2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_pinned_entry_dependency2
+ *
+ * Purpose: Do nothing if pass2 is FALSE on entry.
+ *
+ * Otherwise, set up a pinned entry dependency so we can
+ * test the pinned entry modifications to the flush routine.
+ *
+ * Given the types and indicies of the pinned and pinning
+ * entries, add the pinned entry to the list of pinned
+ * entries in the pinning entry, increment the
+ * pinning reference count of the pinned entry, and
+ * if that count was zero initially, pin the entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+create_pinned_entry_dependency2(H5C2_t * cache_ptr,
+ int pinning_type,
+ int pinning_idx,
+ int pinned_type,
+ int pinned_idx)
+{
+ test_entry_t * pinning_base_addr;
+ test_entry_t * pinning_entry_ptr;
+ test_entry_t * pinned_base_addr;
+ test_entry_t * pinned_entry_ptr;
+
+ if ( pass2 ) {
+
+ HDassert( ( 0 <= pinning_type ) &&
+ ( pinning_type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= pinning_idx ) &&
+ ( pinning_idx <= max_indices2[pinning_type] ) );
+ HDassert( ( 0 <= pinned_type ) &&
+ ( pinned_type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= pinned_idx ) &&
+ ( pinned_idx <= max_indices2[pinned_type] ) );
+
+ pinning_base_addr = entries2[pinning_type];
+ pinning_entry_ptr = &(pinning_base_addr[pinning_idx]);
+
+ pinned_base_addr = entries2[pinned_type];
+ pinned_entry_ptr = &(pinned_base_addr[pinned_idx]);
+
+ HDassert( pinning_entry_ptr->index == pinning_idx );
+ HDassert( pinning_entry_ptr->type == pinning_type );
+ HDassert( pinning_entry_ptr == pinning_entry_ptr->self );
+ HDassert( pinning_entry_ptr->num_pins < MAX_PINS );
+
+ HDassert( pinning_entry_ptr->index == pinning_idx );
+ HDassert( pinning_entry_ptr->type == pinning_type );
+ HDassert( pinning_entry_ptr == pinning_entry_ptr->self );
+ HDassert( ! ( pinning_entry_ptr->is_protected ) );
+
+ pinning_entry_ptr->pin_type[pinning_entry_ptr->num_pins] = pinned_type;
+ pinning_entry_ptr->pin_idx[pinning_entry_ptr->num_pins] = pinned_idx;
+ (pinning_entry_ptr->num_pins)++;
+
+ if ( pinned_entry_ptr->pinning_ref_count == 0 ) {
+
+ protect_entry2(cache_ptr, pinned_type, pinned_idx);
+ unprotect_entry2(cache_ptr, pinned_type, pinned_idx, FALSE,
+ H5C2__PIN_ENTRY_FLAG);
+ }
+
+ (pinned_entry_ptr->pinning_ref_count)++;
+ }
+
+ return;
+
+} /* create_pinned_entry_dependency2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: dirty_entry2
+ *
+ * Purpose: Given a pointer to a cache, an entry type, and an index,
+ * dirty the target entry.
+ *
+ * If the dirty_pin parameter is true, verify that the
+ * target entry is in the cache and is pinned. If it
+ * isn't, scream and die. If it is, use the
+ * H5C2_mark_pinned_entry_dirty() call to dirty it.
+ *
+ * Do nothing if pass2 is false on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+dirty_entry2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ hbool_t dirty_pin)
+{
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices2[type] ) );
+
+ if ( pass2 ) {
+
+ if ( dirty_pin ) {
+
+ if ( ! entry_in_cache2(cache_ptr, type, idx) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "entry to be dirty pinned is not in cache.";
+
+ } else {
+
+ base_addr = entries2[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+
+ if ( ! ( (entry_ptr->header).is_pinned ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "entry to be dirty pinned is not pinned.";
+
+ } else {
+
+ mark_pinned_entry_dirty2(cache_ptr, type, idx,
+ FALSE, (size_t)0);
+
+ }
+ }
+ } else {
+
+ protect_entry2(cache_ptr, type, idx);
+ unprotect_entry2(cache_ptr, type, idx, TRUE, H5C2__NO_FLAGS_SET);
+ }
+ }
+
+ return;
+
+} /* dirty_entry2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: execute_flush_op2
+ *
+ * Purpose: Given a pointer to an instance of struct flush_op, execute
+ * it.
+ *
+ * Do nothing if pass2 is false on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/1/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+execute_flush_op2(H5C2_t * cache_ptr,
+ struct test_entry_t * entry_ptr,
+ struct flush_op * op_ptr,
+ unsigned * flags_ptr)
+{
+ const char * fcn_name = "execute_flush_op2()";
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C2__H5C2_T_MAGIC );
+ HDassert( entry_ptr != NULL );
+ HDassert( entry_ptr = entry_ptr->self );
+ HDassert( entry_ptr->header.addr == entry_ptr->addr );
+ HDassert( ( entry_ptr->flush_op_self_resize_in_progress ) ||
+ ( entry_ptr->header.size == entry_ptr->size ) );
+ HDassert( op_ptr != NULL );
+ HDassert( ( 0 <= entry_ptr->type ) &&
+ ( entry_ptr->type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= entry_ptr->index ) &&
+ ( entry_ptr->index <= max_indices2[entry_ptr->type] ) );
+ HDassert( ( 0 <= op_ptr->type ) &&
+ ( op_ptr->type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= op_ptr->idx ) &&
+ ( op_ptr->idx <= max_indices2[op_ptr->type] ) );
+ HDassert( ( op_ptr->flag == FALSE ) || ( op_ptr->flag == TRUE ) );
+ HDassert( flags_ptr != NULL );
+
+ if ( pass2 ) {
+
+ switch ( op_ptr->op_code )
+ {
+ case FLUSH_OP__NO_OP:
+ break;
+
+ case FLUSH_OP__DIRTY:
+ HDassert( ( entry_ptr->type != op_ptr->type ) ||
+ ( entry_ptr->index != op_ptr->idx ) );
+
+ dirty_entry2(cache_ptr, op_ptr->type, op_ptr->idx,
+ op_ptr->flag);
+ break;
+
+ case FLUSH_OP__RESIZE:
+ if ( ( entry_ptr->type == op_ptr->type ) &&
+ ( entry_ptr->index == op_ptr->idx ) ) {
+
+ /* the flush operation is acting on the entry to
+ * which it is attached. Handle this here:
+ */
+ HDassert( entry_ptr->type == VARIABLE_ENTRY_TYPE );
+ HDassert( op_ptr->size > 0 );
+ HDassert( op_ptr->size <= VARIABLE_ENTRY_SIZE );
+
+ entry_ptr->size = op_ptr->size;
+
+ (*flags_ptr) |= H5C2__SERIALIZE_RESIZED_FLAG;
+
+ entry_ptr->flush_op_self_resize_in_progress = TRUE;
+
+ } else {
+
+ /* change the size of some other entry */
+
+ resize_entry2(cache_ptr, op_ptr->type, op_ptr->idx,
+ op_ptr->size, op_ptr->flag);
+ }
+ break;
+
+ case FLUSH_OP__RENAME:
+ if ( ( entry_ptr->type == op_ptr->type ) &&
+ ( entry_ptr->index == op_ptr->idx ) ) {
+
+ /* the flush operation is acting on the entry to
+ * which it is attached. Handle this here:
+ */
+
+ HDassert( ((*flags_ptr) & H5C2__SERIALIZE_RESIZED_FLAG)
+ != 0 );
+
+ (*flags_ptr) |= H5C2__SERIALIZE_RENAMED_FLAG;
+
+ if ( op_ptr->flag ) {
+
+ HDassert( entry_ptr->addr == entry_ptr->alt_addr );
+ entry_ptr->addr = entry_ptr->main_addr;
+ entry_ptr->at_main_addr = TRUE;
+
+ } else {
+
+ HDassert( entry_ptr->addr == entry_ptr->main_addr );
+ entry_ptr->addr = entry_ptr->alt_addr;
+ entry_ptr->at_main_addr = FALSE;
+
+ }
+
+ } else {
+
+ rename_entry2(cache_ptr, op_ptr->type, op_ptr->idx,
+ op_ptr->flag);
+ }
+ break;
+
+ default:
+ pass2 = FALSE;
+ failure_mssg2 = "Undefined flush op code.";
+ break;
+ }
+ }
+
+ return;
+
+} /* execute_flush_op2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: entry_in_cache2
+ *
+ * Purpose: Given a pointer to a cache, an entry type, and an index,
+ * determine if the entry is currently in the cache.
+ *
+ * Return: TRUE if the entry is in the cache, and FALSE otherwise.
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ * JRM - 10/12/04
+ * Removed references to local_H5C2_t, as we now get direct
+ * access to the definition of H5C2_t via H5Cpkg.h.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+hbool_t
+entry_in_cache2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx)
+{
+ hbool_t in_cache = FALSE; /* will set to TRUE if necessary */
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+ H5C2_cache_entry_t * test_ptr = NULL;
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices2[type] ) );
+
+ base_addr = entries2[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+
+ H5C2__SEARCH_INDEX(cache_ptr, entry_ptr->addr, test_ptr)
+
+ if ( test_ptr != NULL ) {
+
+ in_cache = TRUE;
+ HDassert( test_ptr == (H5C2_cache_entry_t *)entry_ptr );
+ HDassert( entry_ptr->addr == entry_ptr->header.addr );
+ }
+
+ return(in_cache);
+
+} /* entry_in_cache2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: reset_entries2
+ *
+ * Purpose: reset the contents of the entries arrays to know values.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ * JRM -- 3/31/06
+ * Added initialization for new pinned entry test related
+ * fields.
+ *
+ * JRM -- 4/1/07
+ * Added initialization for the new is_read_only, and
+ * ro_ref_count fields.
+ *
+ * JRM -- 9/20/07
+ * Re-worked function for the cache api mods needed to
+ * support journaling.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+reset_entries2(void)
+
+{
+ int i;
+ int j;
+ int k;
+ int32_t max_index;
+ haddr_t addr = PICO_BASE_ADDR;
+ haddr_t alt_addr = PICO_ALT_BASE_ADDR;
+ size_t entry_size;
+ test_entry_t * base_addr;
+
+ for ( i = 0; i < NUMBER_OF_ENTRY_TYPES; i++ )
+ {
+ entry_size = entry_sizes2[i];
+ max_index = max_indices2[i];
+ base_addr = entries2[i];
+
+ HDassert( base_addr );
+
+ for ( j = 0; j <= max_index; j++ )
+ {
+ /* one can argue that we should fill the header with garbage.
+ * If this is desired, we can simply comment out the header
+ * initialization - the headers will be full of garbage soon
+ * enough.
+ */
+
+ base_addr[j].header.addr = (haddr_t)0;
+ base_addr[j].header.size = (size_t)0;
+ base_addr[j].header.type = NULL;
+ base_addr[j].header.is_dirty = FALSE;
+ base_addr[j].header.is_protected = FALSE;
+ base_addr[j].header.is_read_only = FALSE;
+ base_addr[j].header.ro_ref_count = FALSE;
+ base_addr[j].header.next = NULL;
+ base_addr[j].header.prev = NULL;
+ base_addr[j].header.aux_next = NULL;
+ base_addr[j].header.aux_prev = NULL;
+
+ base_addr[j].self = &(base_addr[j]);
+ base_addr[j].cache_ptr = NULL;
+ base_addr[j].written_to_main_addr = FALSE;
+ base_addr[j].written_to_alt_addr = FALSE;
+ base_addr[j].addr = addr;
+ base_addr[j].at_main_addr = TRUE;
+ base_addr[j].main_addr = addr;
+ base_addr[j].alt_addr = alt_addr;
+ base_addr[j].size = entry_size;
+ base_addr[j].type = i;
+ base_addr[j].index = j;
+ base_addr[j].serializes = 0;
+ base_addr[j].deserializes = 0;
+ base_addr[j].is_dirty = FALSE;
+ base_addr[j].is_protected = FALSE;
+ base_addr[j].is_read_only = FALSE;
+ base_addr[j].ro_ref_count = FALSE;
+
+ base_addr[j].is_pinned = FALSE;
+ base_addr[j].pinning_ref_count = 0;
+ base_addr[j].num_pins = 0;
+ for ( k = 0; k < MAX_PINS; k++ )
+ {
+ base_addr[j].pin_type[k] = -1;
+ base_addr[j].pin_idx[k] = -1;
+ }
+
+ base_addr[j].num_flush_ops = 0;
+ for ( k = 0; k < MAX_FLUSH_OPS; k++ )
+ {
+ base_addr[j].flush_ops[k].op_code = FLUSH_OP__NO_OP;
+ base_addr[j].flush_ops[k].type = -1;
+ base_addr[j].flush_ops[k].idx = -1;
+ base_addr[j].flush_ops[k].flag = FALSE;
+ base_addr[j].flush_ops[k].size = 0;
+ }
+ base_addr[j].flush_op_self_resize_in_progress = FALSE;
+
+ base_addr[j].deserialized = FALSE;
+ base_addr[j].cleared = FALSE;
+ base_addr[j].serialized = FALSE;
+ base_addr[j].destroyed = FALSE;
+
+ addr += (haddr_t)entry_size;
+ alt_addr += (haddr_t)entry_size;
+ }
+ }
+
+ return;
+
+} /* reset_entries2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: resize_entry2
+ *
+ * Purpose: Given a pointer to a cache, an entry type, an index, and
+ * a size, set the size of the target entry to the size. Note
+ * that at present, the type of the entry must be
+ * VARIABLE_ENTRY_TYPE.
+ *
+ * If the resize_pin parameter is true, verify that the
+ * target entry is in the cache and is pinned. If it
+ * isn't, scream and die. If it is, use the
+ * H5C2_mark_pinned_entry_dirty() call to resize it.
+ *
+ * Do nothing if pass2 is false on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+resize_entry2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ size_t new_size,
+ hbool_t resize_pin)
+{
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( type == VARIABLE_ENTRY_TYPE );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices2[type] ) );
+ HDassert( ( 0 < new_size ) && ( new_size <= entry_sizes2[type] ) );
+
+ if ( pass2 ) {
+
+ base_addr = entries2[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+
+ if ( resize_pin ) {
+
+ if ( ! entry_in_cache2(cache_ptr, type, idx) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "entry to be resized pinned is not in cache.";
+
+ } else {
+
+ if ( ! ( (entry_ptr->header).is_pinned ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "entry to be resized pinned is not pinned.";
+
+ } else {
+
+ mark_pinned_entry_dirty2(cache_ptr, type, idx,
+ TRUE, new_size);
+ }
+ }
+ } else {
+
+ protect_entry2(cache_ptr, type, idx);
+ unprotect_entry_with_size_change2(cache_ptr, type, idx,
+ H5C2__SIZE_CHANGED_FLAG,
+ new_size);
+ }
+ }
+
+ return;
+
+} /* resize_entry2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_clean2
+ *
+ * Purpose: Verify that all cache entries are marked as clean. If any
+ * are not, set pass2 to FALSE.
+ *
+ * Do nothing if pass2 is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+verify_clean2(void)
+
+{
+ int i;
+ int j;
+ int dirty_count = 0;
+ int32_t max_index;
+ test_entry_t * base_addr;
+
+ if ( pass2 ) {
+
+ for ( i = 0; i < NUMBER_OF_ENTRY_TYPES; i++ )
+ {
+ max_index = max_indices2[i];
+ base_addr = entries2[i];
+
+ HDassert( base_addr );
+
+ for ( j = 0; j <= max_index; j++ )
+ {
+ if ( ( base_addr[j].header.is_dirty ) ||
+ ( base_addr[j].is_dirty ) ) {
+
+ dirty_count++;
+ }
+ }
+ }
+
+ if ( dirty_count > 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "verify_clean2() found dirty entry(s).";
+ }
+ }
+
+ return;
+
+} /* verify_clean2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_entry_status2
+ *
+ * Purpose: Verify that a list of entries have the expected status.
+ * If any discrepencies are found, set the failure message
+ * and set pass2 to FALSE.
+ *
+ * Do nothing if pass2 is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 10/8/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+verify_entry_status2(H5C2_t * cache_ptr,
+ int tag,
+ int num_entries,
+ struct expected_entry_status expected[])
+{
+ const char * fcn_name = "verify_entry_status2()";
+ static char msg[128];
+ hbool_t in_cache = FALSE; /* will set to TRUE if necessary */
+ int i;
+ test_entry_t * entry_ptr;
+ test_entry_t * base_addr;
+
+ i = 0;
+ while ( ( pass2 ) && ( i < num_entries ) )
+ {
+ base_addr = entries2[expected[i].entry_type];
+ entry_ptr = &(base_addr[expected[i].entry_index]);
+
+ if ( ( ! expected[i].in_cache ) &&
+ ( ( expected[i].is_dirty ) ||
+ ( expected[i].is_protected ) ||
+ ( expected[i].is_pinned ) ) ) {
+
+ pass2 = FALSE;
+ sprintf(msg, "%d: Contradictory data in expected[%d].\n", tag, i);
+ failure_mssg2 = msg;
+ }
+
+ if ( pass2 ) {
+
+ in_cache = entry_in_cache2(cache_ptr, expected[i].entry_type,
+ expected[i].entry_index);
+
+ if ( in_cache != expected[i].in_cache ) {
+
+ pass2 = FALSE;
+ sprintf(msg,
+ "%d entry (%d, %d) in cache actual/expected = %d/%d.\n",
+ tag,
+ (int)expected[i].entry_type,
+ (int)expected[i].entry_index,
+ (int)in_cache,
+ (int)expected[i].in_cache);
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( entry_ptr->size != expected[i].size ) {
+
+ pass2 = FALSE;
+ sprintf(msg,
+ "%d entry (%d, %d) size actualexpected = %ld/%ld.\n",
+ tag,
+ (int)expected[i].entry_type,
+ (int)expected[i].entry_index,
+ (long)(entry_ptr->size),
+ (long)expected[i].size);
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( ( pass2 ) && ( in_cache ) ) {
+
+ if ( entry_ptr->header.size != expected[i].size ) {
+
+ pass2 = FALSE;
+ sprintf(msg,
+ "%d entry (%d, %d) header size actual/expected = %ld/%ld.\n",
+ tag,
+ (int)expected[i].entry_type,
+ (int)expected[i].entry_index,
+ (long)(entry_ptr->header.size),
+ (long)expected[i].size);
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( entry_ptr->at_main_addr != expected[i].at_main_addr ) {
+
+ pass2 = FALSE;
+ sprintf(msg,
+ "%d entry (%d, %d) at main addr actual/expected = %d/%d.\n",
+ tag,
+ (int)expected[i].entry_type,
+ (int)expected[i].entry_index,
+ (int)(entry_ptr->at_main_addr),
+ (int)expected[i].at_main_addr);
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( entry_ptr->is_dirty != expected[i].is_dirty ) {
+
+ pass2 = FALSE;
+ sprintf(msg,
+ "%d entry (%d, %d) is_dirty actual/expected = %d/%d.\n",
+ tag,
+ (int)expected[i].entry_type,
+ (int)expected[i].entry_index,
+ (int)(entry_ptr->is_dirty),
+ (int)expected[i].is_dirty);
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( ( pass2 ) && ( in_cache ) ) {
+
+ if ( entry_ptr->header.is_dirty != expected[i].is_dirty ) {
+
+ pass2 = FALSE;
+ sprintf(msg,
+ "%d entry (%d, %d) header is_dirty actual/expected = %d/%d.\n",
+ tag,
+ (int)expected[i].entry_type,
+ (int)expected[i].entry_index,
+ (int)(entry_ptr->header.is_dirty),
+ (int)expected[i].is_dirty);
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( entry_ptr->is_protected != expected[i].is_protected ) {
+
+ pass2 = FALSE;
+ sprintf(msg,
+ "%d entry (%d, %d) is_protected actual/expected = %d/%d.\n",
+ tag,
+ (int)expected[i].entry_type,
+ (int)expected[i].entry_index,
+ (int)(entry_ptr->is_protected),
+ (int)expected[i].is_protected);
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( ( pass2 ) && ( in_cache ) ) {
+
+ if ( entry_ptr->header.is_protected != expected[i].is_protected ) {
+
+ pass2 = FALSE;
+ sprintf(msg,
+ "%d entry (%d, %d) header is_protected actual/expected = %d/%d.\n",
+ tag,
+ (int)expected[i].entry_type,
+ (int)expected[i].entry_index,
+ (int)(entry_ptr->header.is_protected),
+ (int)expected[i].is_protected);
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( entry_ptr->is_pinned != expected[i].is_pinned ) {
+
+ pass2 = FALSE;
+ sprintf(msg,
+ "%d entry (%d, %d) is_pinned actual/expected = %d/%d.\n",
+ tag,
+ (int)expected[i].entry_type,
+ (int)expected[i].entry_index,
+ (int)(entry_ptr->is_pinned),
+ (int)expected[i].is_pinned);
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( ( pass2 ) && ( in_cache ) ) {
+
+ if ( entry_ptr->header.is_pinned != expected[i].is_pinned ) {
+
+ pass2 = FALSE;
+ sprintf(msg,
+ "%d entry (%d, %d) header is_pinned actual/expected = %d/%d.\n",
+ tag,
+ (int)expected[i].entry_type,
+ (int)expected[i].entry_index,
+ (int)(entry_ptr->header.is_pinned),
+ (int)expected[i].is_pinned);
+ failure_mssg2 = msg;
+ }
+ }
+
+ if ( pass2 ) {
+
+ if ( ( entry_ptr->deserialized != expected[i].deserialized ) ||
+ ( entry_ptr->cleared != expected[i].cleared ) ||
+ ( entry_ptr->serialized != expected[i].serialized ) ||
+ ( entry_ptr->destroyed != expected[i].destroyed ) ) {
+
+ pass2 = FALSE;
+ sprintf(msg,
+ "%d entry (%d,%d) deserialized = %d(%d), clrd = %d(%d), serialized = %d(%d), dest = %d(%d)\n",
+ tag,
+ (int)expected[i].entry_type,
+ (int)expected[i].entry_index,
+ (int)(entry_ptr->deserialized),
+ (int)(expected[i].deserialized),
+ (int)(entry_ptr->cleared),
+ (int)(expected[i].cleared),
+ (int)(entry_ptr->serialized),
+ (int)(expected[i].serialized),
+ (int)(entry_ptr->destroyed),
+ (int)(expected[i].destroyed));
+ failure_mssg2 = msg;
+ }
+ }
+ i++;
+ } /* while */
+
+ return;
+
+} /* verify_entry_status2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_unprotected2
+ *
+ * Purpose: Verify that no cache entries are marked as protected. If
+ * any are, set pass2 to FALSE.
+ *
+ * Do nothing if pass2 is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+verify_unprotected2(void)
+
+{
+ int i;
+ int j;
+ int protected_count = 0;
+ int32_t max_index;
+ test_entry_t * base_addr;
+
+ if ( pass2 ) {
+
+ for ( i = 0; i < NUMBER_OF_ENTRY_TYPES; i++ )
+ {
+ max_index = max_indices2[i];
+ base_addr = entries2[i];
+
+ HDassert( base_addr );
+
+ for ( j = 0; j <= max_index; j++ )
+ {
+ HDassert( base_addr[j].header.is_protected ==
+ base_addr[j].is_protected );
+
+ if ( ( base_addr[j].header.is_protected ) ||
+ ( base_addr[j].is_protected ) ) {
+
+ protected_count++;
+ }
+ }
+ }
+
+ if ( protected_count > 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "verify_unprotected2() found protected entry(s).";
+ }
+ }
+
+ return;
+
+} /* verify_unprotected2() */
+
+
+/*****************************************************************************
+ *
+ * Function: setup_cache2()
+ *
+ * Purpose: Open an HDF file. This will allocate an instance and
+ * initialize an associated instance of H5C2_t. However,
+ * we want to test an instance of H5C2_t, so allocate and
+ * initialize one with the file ID returned by the call to
+ * H5Fcreate(). Return a pointer to this instance of H5C2_t.
+ *
+ * Observe that we open a HDF file because the cache now
+ * writes directly to file, and we need the file I/O facilities
+ * associated with the file.
+ *
+ * To avoid tripping on error check code, must allocate enough
+ * space in the file to hold all the test entries and their
+ * alternates. This is a little sticky, as the addresses of
+ * all the test entries are determined at compile time.
+ *
+ * Deal with this by choosing BASE_ADDR large enough that
+ * the base address of the allocate space will be less than
+ * or equal to BASE_ADDR, and then requesting an extra BASE_ADDR
+ * bytes, so we don't have to wory about exceeding the allocation.
+ *
+ * Return: Success: Ptr to H5C2_t
+ *
+ * Failure: NULL
+ *
+ * Programmer: JRM -- 9/13/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+H5C2_t *
+setup_cache2(size_t max_cache_size,
+ size_t min_clean_size)
+{
+ const char * fcn_name = "setup_cache2()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hbool_t verbose = TRUE;
+ int mile_stone = 1;
+ hid_t fid = -1;
+ H5F_t * file_ptr = NULL;
+ H5C2_t * cache_ptr = NULL;
+ H5C2_t * ret_val = NULL;
+ haddr_t actual_base_addr;
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+#if 0 /* This debugging code is useful from time to time -- keep it for now */
+ HDfprintf(stdout, "PICO_BASE_ADDR = 0x%lx, PICO_ALT_BASE_ADDR = 0x%lx.\n",
+ (long)PICO_BASE_ADDR, (long)PICO_ALT_BASE_ADDR);
+ HDfprintf(stdout, "NANO_BASE_ADDR = 0x%lx, NANO_ALT_BASE_ADDR = 0x%lx.\n",
+ (long)NANO_BASE_ADDR, (long)NANO_ALT_BASE_ADDR);
+ HDfprintf(stdout,
+ "MICRO_BASE_ADDR = 0x%lx, MICRO_ALT_BASE_ADDR = 0x%lx.\n",
+ (long)MICRO_BASE_ADDR, (long)MICRO_ALT_BASE_ADDR);
+ HDfprintf(stdout, "TINY_BASE_ADDR = 0x%lx, TINY_ALT_BASE_ADDR = 0x%lx.\n",
+ (long)TINY_BASE_ADDR, (long)TINY_ALT_BASE_ADDR);
+ HDfprintf(stdout,
+ "SMALL_BASE_ADDR = 0x%lx, SMALL_ALT_BASE_ADDR = 0x%lx.\n",
+ (long)SMALL_BASE_ADDR, (long)SMALL_ALT_BASE_ADDR);
+ HDfprintf(stdout,
+ "MEDIUM_BASE_ADDR = 0x%lx, MEDIUM_ALT_BASE_ADDR = 0x%lx.\n",
+ (long)MEDIUM_BASE_ADDR, (long)MEDIUM_ALT_BASE_ADDR);
+ HDfprintf(stdout,
+ "LARGE_BASE_ADDR = 0x%lx, LARGE_ALT_BASE_ADDR = 0x%lx.\n",
+ (long)LARGE_BASE_ADDR, (long)LARGE_ALT_BASE_ADDR);
+ HDfprintf(stdout, "HUGE_BASE_ADDR = 0x%lx, HUGE_ALT_BASE_ADDR = 0x%lx.\n",
+ (long)HUGE_BASE_ADDR, (long)HUGE_ALT_BASE_ADDR);
+ HDfprintf(stdout,
+ "MONSTER_BASE_ADDR = 0x%lx, MONSTER_ALT_BASE_ADDR = 0x%lx.\n",
+ (long)MONSTER_BASE_ADDR, (long)MONSTER_ALT_BASE_ADDR);
+ HDfprintf(stdout,
+ "VARIABLE_BASE_ADDR = 0x%lx, VARIABLE_ALT_BASE_ADDR = 0x%lx.\n",
+ (long)VARIABLE_BASE_ADDR, (long)VARIABLE_ALT_BASE_ADDR);
+#endif /* JRM */
+
+ /* setup the file name */
+ if ( pass2 ) {
+
+ if ( h5_fixname(FILENAME[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+
+ if ( fid < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fcreate() failed.";
+
+ if ( verbose ) {
+ HDfprintf(stdout, "%s: H5Fcreate() failed.\n", fcn_name);
+ }
+
+ } else if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5Fflush() failed.";
+
+ if ( verbose ) {
+ HDfprintf(stdout, "%s: H5Fflush() failed.\n", fcn_name);
+ }
+
+ } else {
+
+ saved_fid = fid;
+ file_ptr = H5I_object_verify(fid, H5I_FILE);
+
+ if ( file_ptr == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Can't get file_ptr.";
+
+ if ( verbose ) {
+ HDfprintf(stdout, "%s: H5Fflush() failed.\n", fcn_name);
+ }
+ }
+ }
+ }
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ cache_ptr = H5C2_create(file_ptr,
+ max_cache_size,
+ min_clean_size,
+ (NUMBER_OF_ENTRY_TYPES - 1),
+ (const char **)entry_type_names2,
+ check_write_permitted2,
+ TRUE,
+ NULL,
+ NULL);
+ }
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ if ( cache_ptr == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5C2_create() failed.";
+
+ if ( verbose ) {
+ HDfprintf(stdout, "%s: H5C2_create() failed.\n", fcn_name);
+ }
+
+ } else if ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "Bad cache_ptr magic.";
+
+ if ( verbose ) {
+ HDfprintf(stdout, "%s: Bad cache_ptr magic.\n", fcn_name);
+ }
+ }
+ }
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) { /* allocate space for test entries */
+
+ actual_base_addr = H5MF_alloc(file_ptr, H5FD_MEM_DEFAULT, H5P_DEFAULT,
+ (hsize_t)(ADDR_SPACE_SIZE + BASE_ADDR));
+
+ if ( actual_base_addr == HADDR_UNDEF ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "H5MF_alloc() failed.";
+
+ if ( verbose ) {
+ HDfprintf(stdout, "%s: H5MF_alloc() failed.\n", fcn_name);
+ }
+
+ } else if ( actual_base_addr > BASE_ADDR ) {
+
+ /* If this happens, must increase BASE_ADDR so that the
+ * actual_base_addr is <= BASE_ADDR. This should only happen
+ * if the size of the superblock is increase.
+ */
+ pass2 = FALSE;
+ failure_mssg2 = "actual_base_addr > BASE_ADDR";
+
+ if ( verbose ) {
+ HDfprintf(stdout, "%s: actual_base_addr > BASE_ADDR.\n",
+ fcn_name);
+ }
+ }
+ }
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ if ( pass2 ) {
+
+ H5C2_stats__reset(cache_ptr);
+ ret_val = cache_ptr;
+ }
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass2 = %d\n",
+ fcn_name, mile_stone++, (int)pass2);
+
+ return(ret_val);
+
+} /* setup_cache2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: takedown_cache2()
+ *
+ * Purpose: Flush the specified cache and destroy it. If requested,
+ * dump stats first. Then close and delete the associate
+ * file.
+ *
+ * If pass2 is FALSE, do nothing.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/07
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+takedown_cache2(H5C2_t * cache_ptr,
+ hbool_t dump_stats,
+ hbool_t dump_detailed_stats)
+{
+ char filename[512];
+
+ if ( cache_ptr != NULL ) {
+
+ if ( dump_stats ) {
+
+ H5C2_stats(cache_ptr, "test cache", dump_detailed_stats);
+ }
+
+ flush_cache2(cache_ptr, TRUE, FALSE, FALSE);
+
+ H5C2_dest(cache_ptr, H5P_DATASET_XFER_DEFAULT);
+
+ }
+
+ if ( saved_fid != -1 ) {
+
+ if ( H5Fclose(saved_fid) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "couldn't close test file.";
+
+ } else {
+
+ saved_fid = -1;
+
+ }
+
+ if ( h5_fixname(FILENAME[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "h5_fixname() failed.\n";
+ }
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "couldn't delete test file.";
+
+ }
+ }
+
+ return;
+
+} /* takedown_cache2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: expunge_entry2()
+ *
+ * Purpose: Expunge the entry indicated by the type and index.
+ *
+ * Do nothing if pass2 is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/6/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+expunge_entry2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx)
+{
+ /* const char * fcn_name = "expunge_entry2()"; */
+ herr_t result;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ if ( pass2 ) {
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices2[type] ) );
+
+ base_addr = entries2[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( entry_ptr->cache_ptr == cache_ptr );
+ HDassert( ! ( entry_ptr->header.is_protected ) );
+ HDassert( ! ( entry_ptr->is_protected ) );
+ HDassert( ! ( entry_ptr->header.is_pinned ) );
+ HDassert( ! ( entry_ptr->is_pinned ) );
+
+ result = H5C2_expunge_entry(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[type]),
+ entry_ptr->addr);
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "error in H5C2_expunge_entry().";
+
+ }
+ }
+
+ return;
+
+} /* expunge_entry2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: flush_cache2()
+ *
+ * Purpose: Flush the specified cache, destroying all entries if
+ requested. If requested, dump stats first.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/23/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+flush_cache2(H5C2_t * cache_ptr,
+ hbool_t destroy_entries,
+ hbool_t dump_stats,
+ hbool_t dump_detailed_stats)
+{
+ const char * fcn_name = "flush_cache2()";
+ hbool_t show_progress = FALSE;
+ herr_t result = 0;
+ int mile_post = 0;
+
+ HDassert(cache_ptr);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: mile_post = %d.\n",
+ fcn_name, mile_post++); /* 0 */
+ }
+
+ verify_unprotected2();
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: mile_post = %d.\n",
+ fcn_name, mile_post++); /* 1 */
+ }
+
+ if ( pass2 ) {
+
+ if ( destroy_entries ) {
+
+ result = H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ H5C2__FLUSH_INVALIDATE_FLAG);
+
+ } else {
+
+ result = H5C2_flush_cache(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ H5C2__NO_FLAGS_SET);
+ }
+ }
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: mile_post = %d.\n",
+ fcn_name, mile_post++); /* 2 */
+ }
+
+ if ( dump_stats ) {
+
+ H5C2_stats(cache_ptr, "test cache", dump_detailed_stats);
+ }
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: mile_post = %d.\n",
+ fcn_name, mile_post++); /* 3 */
+ }
+
+ if ( result < 0 ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "error in H5C2_flush_cache().";
+ }
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: mile_post = %d.\n",
+ fcn_name, mile_post++); /* 4 */
+ }
+
+ return;
+
+} /* flush_cache2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: insert_entry2()
+ *
+ * Purpose: Insert the entry indicated by the type and index. Mark
+ * it clean or dirty as indicated.
+ *
+ * Note that I don't see much practical use for inserting
+ * a clean entry, but the interface permits it so we should
+ * test it.
+ *
+ * Do nothing if pass2 is false.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/16/04
+ *
+ * Modifications:
+ *
+ * JRM -- 1/13/05
+ * Updated function for the flags parameter in
+ * H5C2_insert_entry(), and to allow access to this parameter.
+ *
+ * JRM -- 6/17/05
+ * The interface no longer permits clean inserts.
+ * Accordingly, the dirty parameter is no longer meaningfull.
+ *
+ * JRM -- 4/5/06
+ * Added code to initialize the new cache_ptr field of the
+ * test_entry_t structure.
+ *
+ * JRM -- 8/10/06
+ * Updated to reflect the fact that entries can now be
+ * inserted pinned.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+insert_entry2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ hbool_t UNUSED dirty,
+ unsigned int flags)
+{
+ herr_t result;
+ hbool_t insert_pinned;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ if ( pass2 ) {
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices2[type] ) );
+
+ base_addr = entries2[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( !(entry_ptr->is_protected) );
+
+ insert_pinned = ((flags & H5C2__PIN_ENTRY_FLAG) != 0 );
+
+ entry_ptr->is_dirty = TRUE;
+
+ result = H5C2_insert_entry(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[type]), entry_ptr->addr,
+ entry_ptr->size, (void *)entry_ptr, flags);
+
+ if ( ( result < 0 ) ||
+ ( entry_ptr->header.is_protected ) ||
+ ( entry_ptr->header.type != &(types2[type]) ) ||
+ ( entry_ptr->size != entry_ptr->header.size ) ||
+ ( entry_ptr->addr != entry_ptr->header.addr ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "error in H5C2_insert().";
+
+#if 0 /* This is useful debugging code. Lets keep it around. */
+
+ HDfprintf(stdout, "result = %d\n", (int)result);
+ HDfprintf(stdout, "entry_ptr->header.is_protected = %d\n",
+ (int)(entry_ptr->header.is_protected));
+ HDfprintf(stdout,
+ "entry_ptr->header.type != &(types2[type]) = %d\n",
+ (int)(entry_ptr->header.type != &(types2[type])));
+ HDfprintf(stdout,
+ "entry_ptr->size != entry_ptr->header.size = %d\n",
+ (int)(entry_ptr->size != entry_ptr->header.size));
+ HDfprintf(stdout,
+ "entry_ptr->addr != entry_ptr->header.addr = %d\n",
+ (int)(entry_ptr->addr != entry_ptr->header.addr));
+#endif
+ }
+ HDassert( entry_ptr->cache_ptr == NULL );
+
+ entry_ptr->cache_ptr = cache_ptr;
+
+ if ( insert_pinned ) {
+
+ HDassert( entry_ptr->header.is_pinned );
+ entry_ptr->is_pinned = TRUE;
+
+ } else {
+
+ HDassert( ! ( entry_ptr->header.is_pinned ) );
+ entry_ptr->is_pinned = FALSE;
+
+ }
+ HDassert( entry_ptr->header.is_dirty );
+ HDassert( ((entry_ptr->header).type)->id == type );
+ }
+
+ return;
+
+} /* insert_entry2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: mark_pinned_entry_dirty2()
+ *
+ * Purpose: Mark the specified entry as dirty.
+ *
+ * Do nothing if pass2 is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/28/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+mark_pinned_entry_dirty2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ hbool_t size_changed,
+ size_t new_size)
+{
+ /* const char * fcn_name = "mark_pinned_entry_dirty2()"; */
+ herr_t result;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ if ( pass2 ) {
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices2[type] ) );
+
+ base_addr = entries2[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( entry_ptr->cache_ptr == cache_ptr );
+ HDassert( ! (entry_ptr->header.is_protected) );
+ HDassert( entry_ptr->header.is_pinned );
+ HDassert( entry_ptr->is_pinned );
+
+ entry_ptr->is_dirty = TRUE;
+
+ result = H5C2_mark_pinned_entry_dirty(cache_ptr,
+ (void *)entry_ptr,
+ size_changed,
+ new_size);
+
+ if ( ( result < 0 ) ||
+ ( ! (entry_ptr->header.is_dirty) ) ||
+ ( ! (entry_ptr->header.is_pinned) ) ||
+ ( entry_ptr->header.type != &(types2[type]) ) ||
+ ( entry_ptr->size != entry_ptr->header.size ) ||
+ ( entry_ptr->addr != entry_ptr->header.addr ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "error in H5C2_mark_pinned_entry_dirty().";
+
+ }
+
+ HDassert( ((entry_ptr->header).type)->id == type );
+
+ }
+
+ return;
+
+} /* mark_pinned_entry_dirty2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: mark_pinned_or_protected_entry_dirty2()
+ *
+ * Purpose: Mark the specified entry as dirty.
+ *
+ * Do nothing if pass2 is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 5/17/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+mark_pinned_or_protected_entry_dirty2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx)
+{
+ /* const char * fcn_name = "mark_pinned_or_protected_entry_dirty2()"; */
+ herr_t result;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ if ( pass2 ) {
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices2[type] ) );
+
+ base_addr = entries2[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( entry_ptr->cache_ptr == cache_ptr );
+ HDassert( entry_ptr->header.is_protected ||
+ entry_ptr->header.is_pinned );
+
+ entry_ptr->is_dirty = TRUE;
+
+ result = H5C2_mark_pinned_or_protected_entry_dirty(cache_ptr,
+ (void *)entry_ptr);
+
+ if ( ( result < 0 )
+ ||
+ ( ( ! (entry_ptr->header.is_protected) )
+ &&
+ ( ! (entry_ptr->header.is_pinned) )
+ )
+ ||
+ ( ( entry_ptr->header.is_protected )
+ &&
+ ( ! ( entry_ptr->header.dirtied ) )
+ )
+ ||
+ ( ( ! ( entry_ptr->header.is_protected ) )
+ &&
+ ( ! ( entry_ptr->header.is_dirty ) )
+ )
+ ||
+ ( entry_ptr->header.type != &(types2[type]) )
+ ||
+ ( entry_ptr->size != entry_ptr->header.size )
+ ||
+ ( entry_ptr->addr != entry_ptr->header.addr ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 =
+ "error in H5C2_mark_pinned_or_protected_entry_dirty().";
+
+ }
+
+ HDassert( ((entry_ptr->header).type)->id == type );
+
+ }
+
+ return;
+
+} /* mark_pinned_or_protected_entry_dirty2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: rename_entry2()
+ *
+ * Purpose: Rename the entry indicated by the type and index to its
+ * main or alternate address as indicated. If the entry is
+ * already at the desired entry, do nothing.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/21/04
+ *
+ * Modifications:
+ *
+ * JRM -- 6/17/05
+ * Updated code to reflect the fact that renames automatically
+ * dirty entries.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+rename_entry2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ hbool_t main_addr)
+{
+ herr_t result;
+ hbool_t done = TRUE; /* will set to FALSE if we have work to do */
+ haddr_t old_addr = HADDR_UNDEF;
+ haddr_t new_addr = HADDR_UNDEF;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices2[type] ) );
+
+ base_addr = entries2[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( entry_ptr->cache_ptr == cache_ptr );
+ HDassert( !(entry_ptr->is_protected) );
+ HDassert( !(entry_ptr->header.is_protected) );
+
+
+ if ( entry_ptr->at_main_addr && !main_addr ) {
+
+ /* rename to alt addr */
+
+ HDassert( entry_ptr->addr == entry_ptr->main_addr );
+
+ done = FALSE;
+ old_addr = entry_ptr->addr;
+ new_addr = entry_ptr->alt_addr;
+
+ } else if ( !(entry_ptr->at_main_addr) && main_addr ) {
+
+ /* rename to main addr */
+
+ HDassert( entry_ptr->addr == entry_ptr->alt_addr );
+
+ done = FALSE;
+ old_addr = entry_ptr->addr;
+ new_addr = entry_ptr->main_addr;
+ }
+
+ if ( ! done ) {
+
+ entry_ptr->is_dirty = TRUE;
+
+ result = H5C2_rename_entry(cache_ptr, &(types2[type]),
+ old_addr, new_addr);
+ }
+
+ if ( ! done ) {
+
+ if ( ( result < 0 ) ||
+ ( ( ! ( entry_ptr->header.destroy_in_progress ) ) &&
+ ( entry_ptr->header.addr != new_addr ) ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "error in H5C2_rename_entry().";
+
+ } else {
+
+ entry_ptr->addr = new_addr;
+ entry_ptr->at_main_addr = main_addr;
+ }
+ }
+
+ HDassert( ((entry_ptr->header).type)->id == type );
+
+ HDassert( entry_ptr->header.is_dirty );
+ HDassert( entry_ptr->is_dirty );
+
+ return;
+
+} /* rename_entry2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: protect_entry2()
+ *
+ * Purpose: Protect the entry indicated by the type and index.
+ *
+ * Do nothing if pass2 is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/11/04
+ *
+ * Modifications:
+ *
+ * - Modified call to H5C2_protect to pass H5C2__NO_FLAGS_SET in the
+ * new flags parameter.
+ * JRM -- 3/28/07
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+protect_entry2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx)
+{
+ const char * fcn_name = "protect_entry2()";
+ hbool_t verbose = FALSE;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+ H5C2_cache_entry_t * cache_entry_ptr;
+
+ if ( verbose ) {
+ HDfprintf(stdout, "\n%s: entering. type = %d, idx = %d.\n",
+ fcn_name, type, idx);
+ }
+
+ if ( pass2 ) {
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices2[type] ) );
+
+ base_addr = entries2[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( !(entry_ptr->is_protected) );
+
+ if ( verbose ) {
+ HDfprintf(stdout,
+ "%s: calling H5C2_protect(). addr = 0x%lx, len = %ld.\n",
+ fcn_name, (long)(entry_ptr->addr),
+ (long)(entry_ptr->size));
+ }
+
+ cache_entry_ptr = H5C2_protect(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[type]), entry_ptr->addr,
+ entry_ptr->size, NULL,
+ H5C2__NO_FLAGS_SET);
+
+ if ( verbose ) {
+ HDfprintf(stdout,
+ "%s: H5C2_protect() returns. addr = 0x%lx, len = %ld.\n",
+ fcn_name, (long)(entry_ptr->addr),
+ (long)(entry_ptr->size));
+ }
+
+ if ( ( cache_entry_ptr != (void *)entry_ptr ) ||
+ ( !(entry_ptr->header.is_protected) ) ||
+ ( entry_ptr->header.type != &(types2[type]) ) ||
+ ( entry_ptr->size != entry_ptr->header.size ) ||
+ ( entry_ptr->addr != entry_ptr->header.addr ) ) {
+
+#if 0
+ /* I've written the following debugging code several times
+ * now. Lets keep it around so I don't have to write it
+ * again.
+ * - JRM
+ */
+ HDfprintf(stdout, "( cache_entry_ptr != (void *)entry_ptr ) = %d\n",
+ (int)( cache_entry_ptr != (void *)entry_ptr ));
+ HDfprintf(stdout, "cache_entry_ptr = 0x%lx, entry_ptr = 0x%lx\n",
+ (long)cache_entry_ptr, (long)entry_ptr);
+ HDfprintf(stdout, "entry_ptr->header.is_protected = %d\n",
+ (int)(entry_ptr->header.is_protected));
+ HDfprintf(stdout,
+ "( entry_ptr->header.type != &(types2[type]) ) = %d\n",
+ (int)( entry_ptr->header.type != &(types2[type]) ));
+ HDfprintf(stdout,
+ "entry_ptr->size = %d, entry_ptr->header.size = %d\n",
+ (int)(entry_ptr->size), (int)(entry_ptr->header.size));
+ HDfprintf(stdout,
+ "entry_ptr->addr = %d, entry_ptr->header.addr = %d\n",
+ (int)(entry_ptr->addr), (int)(entry_ptr->header.addr));
+#endif
+ pass2 = FALSE;
+ failure_mssg2 = "error in H5C2_protect().";
+
+ } else {
+
+ HDassert( ( entry_ptr->cache_ptr == NULL ) ||
+ ( entry_ptr->cache_ptr == cache_ptr ) );
+
+ entry_ptr->cache_ptr = cache_ptr;
+ entry_ptr->is_protected = TRUE;
+
+ }
+
+ HDassert( ((entry_ptr->header).type)->id == type );
+ }
+
+ if ( verbose ) {
+ HDfprintf(stdout, "%s: exiting.\n", fcn_name);
+ }
+
+ return;
+
+} /* protect_entry2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: protect_entry_ro2()
+ *
+ * Purpose: Do a read only protect the entry indicated by the type
+ * and index.
+ *
+ * Do nothing if pass2 is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/1/07
+ *
+ * Modifications:
+ *
+ * - None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+protect_entry_ro2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx)
+{
+ /* const char * fcn_name = "protect_entry_ro2()"; */
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+ H5C2_cache_entry_t * cache_entry_ptr;
+
+ if ( pass2 ) {
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices2[type] ) );
+
+ base_addr = entries2[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( ( ! ( entry_ptr->is_protected ) ) ||
+ ( ( entry_ptr->is_read_only ) &&
+ ( entry_ptr->ro_ref_count > 0 ) ) );
+
+ cache_entry_ptr = H5C2_protect(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[type]), entry_ptr->addr,
+ entry_ptr->size, NULL,
+ H5C2__READ_ONLY_FLAG);
+
+ if ( ( cache_entry_ptr != (void *)entry_ptr ) ||
+ ( !(entry_ptr->header.is_protected) ) ||
+ ( !(entry_ptr->header.is_read_only) ) ||
+ ( entry_ptr->header.ro_ref_count <= 0 ) ||
+ ( entry_ptr->header.type != &(types2[type]) ) ||
+ ( entry_ptr->size != entry_ptr->header.size ) ||
+ ( entry_ptr->addr != entry_ptr->header.addr ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "error in read only H5C2_protect().";
+
+ } else {
+
+ HDassert( ( entry_ptr->cache_ptr == NULL ) ||
+ ( entry_ptr->cache_ptr == cache_ptr ) );
+
+ entry_ptr->cache_ptr = cache_ptr;
+ entry_ptr->is_protected = TRUE;
+ entry_ptr->is_read_only = TRUE;
+ entry_ptr->ro_ref_count++;
+ }
+
+ HDassert( ((entry_ptr->header).type)->id == type );
+ }
+
+ return;
+
+} /* protect_entry_ro2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: unpin_entry2()
+ *
+ * Purpose: Unpin the entry indicated by the type and index.
+ *
+ * Do nothing if pass2 is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/28/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+unpin_entry2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx)
+{
+ /* const char * fcn_name = "unpin_entry2()"; */
+ herr_t result;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ if ( pass2 ) {
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices2[type] ) );
+
+ base_addr = entries2[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( entry_ptr->cache_ptr == cache_ptr );
+ HDassert( ! (entry_ptr->header.is_protected) );
+ HDassert( entry_ptr->header.is_pinned );
+ HDassert( entry_ptr->is_pinned );
+
+ result = H5C2_unpin_entry(cache_ptr, (void *)entry_ptr);
+
+ if ( ( result < 0 ) ||
+ ( entry_ptr->header.is_pinned ) ||
+ ( entry_ptr->header.type != &(types2[type]) ) ||
+ ( entry_ptr->size != entry_ptr->header.size ) ||
+ ( entry_ptr->addr != entry_ptr->header.addr ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "error in H5C2_unpin().";
+
+ }
+
+ entry_ptr->is_pinned = FALSE;
+
+ HDassert( ((entry_ptr->header).type)->id == type );
+
+ }
+
+ return;
+
+} /* unpin_entry2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: unprotect_entry2()
+ *
+ * Purpose: Unprotect the entry indicated by the type and index.
+ *
+ * Do nothing if pass2 is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/12/04
+ *
+ * Modifications:
+ *
+ * JRM -- 1/7/05
+ * Updated for the replacement of the deleted parameter in
+ * H5C2_unprotect() with the new flags parameter.
+ *
+ * JRM - 6/17/05
+ * Modified function to use the new dirtied parameter of
+ * H5C2_unprotect().
+ *
+ * JRM -- 9/8/05
+ * Update for new entry size parameter in H5C2_unprotect().
+ * We don't use them here for now.
+ *
+ * JRM -- 3/31/06
+ * Update for pinned entries.
+ *
+ * JRM -- 4/1/07
+ * Updated for new multiple read protects.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+unprotect_entry2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ int dirty,
+ unsigned int flags)
+{
+ const char * fcn_name = "unprotect_entry2()";
+ herr_t result;
+ hbool_t verbose = FALSE;
+ hbool_t pin_flag_set;
+ hbool_t unpin_flag_set;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ if ( verbose ) {
+ HDfprintf(stdout,
+ "\n%s: entering. type = %d, idx = %d, dirty = %d, flags = %0x.\n",
+ fcn_name, type, idx, (int)dirty, (int)flags);
+ }
+
+ if ( pass2 ) {
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices2[type] ) );
+
+ base_addr = entries2[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( entry_ptr->cache_ptr == cache_ptr );
+ HDassert( entry_ptr->header.is_protected );
+ HDassert( entry_ptr->is_protected );
+
+ pin_flag_set = ((flags & H5C2__PIN_ENTRY_FLAG) != 0 );
+ unpin_flag_set = ((flags & H5C2__UNPIN_ENTRY_FLAG) != 0 );
+
+ HDassert ( ! ( pin_flag_set && unpin_flag_set ) );
+ HDassert ( ( ! pin_flag_set ) || ( ! (entry_ptr->is_pinned) ) );
+ HDassert ( ( ! unpin_flag_set ) || ( entry_ptr->is_pinned ) );
+
+ if ( ( dirty == TRUE ) || ( dirty == FALSE ) ) {
+
+ flags |= (dirty ? H5C2__DIRTIED_FLAG : H5C2__NO_FLAGS_SET);
+ entry_ptr->is_dirty = (entry_ptr->is_dirty || dirty);
+ }
+
+ if ( verbose ) {
+ HDfprintf(stdout, "%s: calling H5C2_unprotect(). addr = 0X%lx.\n",
+ fcn_name, (long)(entry_ptr->addr));
+ }
+
+ result = H5C2_unprotect(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[type]), entry_ptr->addr,
+ (void *)entry_ptr, flags, (size_t)0);
+
+ if ( verbose ) {
+ HDfprintf(stdout, "%s: H5C2_unprotect() returns. addr = 0X%lx.\n",
+ fcn_name, (long)(entry_ptr->addr));
+ }
+
+
+ if ( ( result < 0 ) ||
+ ( ( entry_ptr->header.is_protected ) &&
+ ( ( ! ( entry_ptr->is_read_only ) ) ||
+ ( entry_ptr->ro_ref_count <= 0 ) ) ) ||
+ ( entry_ptr->header.type != &(types2[type]) ) ||
+ ( entry_ptr->size != entry_ptr->header.size ) ||
+ ( entry_ptr->addr != entry_ptr->header.addr ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "error in H5C2_unprotect().";
+
+ }
+ else
+ {
+ if ( entry_ptr->ro_ref_count > 1 ) {
+
+ entry_ptr->ro_ref_count--;
+
+ } else if ( entry_ptr->ro_ref_count == 1 ) {
+
+ entry_ptr->is_protected = FALSE;
+ entry_ptr->is_read_only = FALSE;
+ entry_ptr->ro_ref_count = 0;
+
+ } else {
+
+ entry_ptr->is_protected = FALSE;
+
+ }
+
+ if ( pin_flag_set ) {
+
+ HDassert ( entry_ptr->header.is_pinned );
+ entry_ptr->is_pinned = TRUE;
+
+ } else if ( unpin_flag_set ) {
+
+ HDassert ( ! ( entry_ptr->header.is_pinned ) );
+ entry_ptr->is_pinned = FALSE;
+
+ }
+ }
+
+ HDassert( ((entry_ptr->header).type)->id == type );
+
+ if ( ( flags & H5C2__DIRTIED_FLAG ) != 0
+ && ( (flags & H5C2__DELETED_FLAG) == 0 ) ) {
+
+ HDassert( entry_ptr->header.is_dirty );
+ HDassert( entry_ptr->is_dirty );
+ }
+
+ HDassert( entry_ptr->header.is_protected == entry_ptr->is_protected );
+ HDassert( entry_ptr->header.is_read_only == entry_ptr->is_read_only );
+ HDassert( entry_ptr->header.ro_ref_count == entry_ptr->ro_ref_count );
+ }
+
+ if ( verbose ) {
+ HDfprintf(stdout, "\n%s: exiting.\n", fcn_name);
+ }
+
+ return;
+
+} /* unprotect_entry2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: unprotect_entry_with_size_change2()
+ *
+ * Purpose: Version of unprotect_entry() that allow access to the new
+ * size change parameters in H5C2_unprotect_entry()
+ *
+ * At present, only the sizes of VARIABLE_ENTRY_TYPE entries
+ * can be changed. Thus this function will scream and die
+ * if the H5C2__SIZE_CHANGED_FLAG is set and the type is not
+ * VARIABLE_ENTRY_TYPE.
+ *
+ * Do nothing if pass2 is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 8/31/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+unprotect_entry_with_size_change2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ unsigned int flags,
+ size_t new_size)
+{
+ /* const char * fcn_name = "unprotect_entry_with_size_change2()"; */
+ herr_t result;
+ hbool_t dirty_flag_set;
+ hbool_t pin_flag_set;
+ hbool_t unpin_flag_set;
+ hbool_t size_changed_flag_set;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ if ( pass2 ) {
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices2[type] ) );
+ HDassert( new_size <= entry_sizes2[type] );
+
+ base_addr = entries2[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( entry_ptr->cache_ptr == cache_ptr );
+ HDassert( entry_ptr->header.is_protected );
+ HDassert( entry_ptr->is_protected );
+
+ dirty_flag_set = ((flags & H5C2__DIRTIED_FLAG) != 0 );
+ pin_flag_set = ((flags & H5C2__PIN_ENTRY_FLAG) != 0 );
+ unpin_flag_set = ((flags & H5C2__UNPIN_ENTRY_FLAG) != 0 );
+ size_changed_flag_set = ((flags & H5C2__SIZE_CHANGED_FLAG) != 0 );
+
+ HDassert ( ! ( pin_flag_set && unpin_flag_set ) );
+ HDassert ( ( ! pin_flag_set ) || ( ! (entry_ptr->is_pinned) ) );
+ HDassert ( ( ! unpin_flag_set ) || ( entry_ptr->is_pinned ) );
+ HDassert ( ( ! size_changed_flag_set ) || ( new_size > 0 ) );
+ HDassert ( ( ! size_changed_flag_set ) ||
+ ( type == VARIABLE_ENTRY_TYPE ) );
+
+ entry_ptr->is_dirty = (entry_ptr->is_dirty || dirty_flag_set);
+
+ if ( size_changed_flag_set ) {
+
+ entry_ptr->is_dirty = TRUE;
+ entry_ptr->size = new_size;
+ }
+
+ result = H5C2_unprotect(cache_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types2[type]), entry_ptr->addr,
+ (void *)entry_ptr, flags, new_size);
+
+ if ( ( result < 0 ) ||
+ ( entry_ptr->header.is_protected ) ||
+ ( entry_ptr->header.type != &(types2[type]) ) ||
+ ( entry_ptr->size != entry_ptr->header.size ) ||
+ ( entry_ptr->addr != entry_ptr->header.addr ) ) {
+
+ pass2 = FALSE;
+ failure_mssg2 = "error in H5C2_unprotect().";
+
+ }
+ else
+ {
+ entry_ptr->is_protected = FALSE;
+
+ if ( pin_flag_set ) {
+
+ HDassert ( entry_ptr->header.is_pinned );
+ entry_ptr->is_pinned = TRUE;
+
+ } else if ( unpin_flag_set ) {
+
+ HDassert ( ! ( entry_ptr->header.is_pinned ) );
+ entry_ptr->is_pinned = FALSE;
+
+ }
+ }
+
+ HDassert( ((entry_ptr->header).type)->id == type );
+
+ if ( ( flags & H5C2__DIRTIED_FLAG ) != 0
+ && ( (flags & H5C2__DELETED_FLAG) == 0 ) ) {
+
+ HDassert( entry_ptr->header.is_dirty );
+ HDassert( entry_ptr->is_dirty );
+ }
+ }
+
+ return;
+
+} /* unprotect_entry_with_size_change2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: row_major_scan_forward2()
+ *
+ * Purpose: Do a sequence of inserts, protects, unprotects, renames,
+ * destroys while scanning through the set of entries. If
+ * pass2 is false on entry, do nothing.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/12/04
+ *
+ * Modifications:
+ *
+ * JRM -- 4/4/07
+ * Added code supporting multiple read only protects.
+ * Note that this increased the minimum lag to 10.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+row_major_scan_forward2(H5C2_t * cache_ptr,
+ int32_t lag,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ hbool_t do_renames,
+ hbool_t rename_to_main_addr,
+ hbool_t do_destroys,
+ hbool_t do_mult_ro_protects,
+ int dirty_destroys,
+ int dirty_unprotects)
+{
+ const char * fcn_name = "row_major_scan_forward2";
+ int32_t type;
+ int32_t idx;
+
+ if ( verbose )
+ HDfprintf(stdout, "%s(): entering.\n", fcn_name);
+
+ HDassert( lag >= 10 );
+
+ type = 0;
+
+ if ( ( pass2 ) && ( reset_stats ) ) {
+
+ H5C2_stats__reset(cache_ptr);
+ }
+
+ while ( ( pass2 ) && ( type < NUMBER_OF_ENTRY_TYPES ) )
+ {
+ idx = -lag;
+
+ while ( ( pass2 ) && ( idx <= (max_indices2[type] + lag) ) )
+ {
+ if ( verbose ) {
+
+ HDfprintf(stdout, "%d:%d: ", type, idx);
+ }
+
+ if ( ( pass2 ) && ( do_inserts ) && ( (idx + lag) >= 0 ) &&
+ ( (idx + lag) <= max_indices2[type] ) &&
+ ( ((idx + lag) % 2) == 0 ) &&
+ ( ! entry_in_cache2(cache_ptr, type, (idx + lag)) ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "1(i, %d, %d) ", type, (idx + lag));
+
+ insert_entry2(cache_ptr, type, (idx + lag), dirty_inserts,
+ H5C2__NO_FLAGS_SET);
+ }
+
+
+ if ( ( pass2 ) && ( (idx + lag - 1) >= 0 ) &&
+ ( (idx + lag - 1) <= max_indices2[type] ) &&
+ ( ( (idx + lag - 1) % 3 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "2(p, %d, %d) ", type, (idx + lag - 1));
+
+ protect_entry2(cache_ptr, type, (idx + lag - 1));
+ }
+
+ if ( ( pass2 ) && ( (idx + lag - 2) >= 0 ) &&
+ ( (idx + lag - 2) <= max_indices2[type] ) &&
+ ( ( (idx + lag - 2) % 3 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "3(u, %d, %d) ", type, (idx + lag - 2));
+
+ unprotect_entry2(cache_ptr, type, idx+lag-2, NO_CHANGE,
+ H5C2__NO_FLAGS_SET);
+ }
+
+
+ if ( ( pass2 ) && ( do_renames ) && ( (idx + lag - 2) >= 0 ) &&
+ ( (idx + lag - 2) <= max_indices2[type] ) &&
+ ( ( (idx + lag - 2) % 3 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "4(r, %d, %d, %d) ",
+ type, (idx + lag - 2), (int)rename_to_main_addr);
+
+ rename_entry2(cache_ptr, type, (idx + lag - 2),
+ rename_to_main_addr);
+ }
+
+
+ if ( ( pass2 ) && ( (idx + lag - 3) >= 0 ) &&
+ ( (idx + lag - 3) <= max_indices2[type] ) &&
+ ( ( (idx + lag - 3) % 5 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "5(p, %d, %d) ", type, (idx + lag - 3));
+
+ protect_entry2(cache_ptr, type, (idx + lag - 3));
+ }
+
+ if ( ( pass2 ) && ( (idx + lag - 5) >= 0 ) &&
+ ( (idx + lag - 5) <= max_indices2[type] ) &&
+ ( ( (idx + lag - 5) % 5 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "6(u, %d, %d) ", type, (idx + lag - 5));
+
+ unprotect_entry2(cache_ptr, type, idx+lag-5, NO_CHANGE,
+ H5C2__NO_FLAGS_SET);
+ }
+
+ if ( do_mult_ro_protects )
+ {
+ if ( ( pass2 ) && ( (idx + lag - 5) >= 0 ) &&
+ ( (idx + lag - 5) < max_indices2[type] ) &&
+ ( (idx + lag - 5) % 9 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "7(p-ro, %d, %d) ", type,
+ (idx + lag - 5));
+
+ protect_entry_ro2(cache_ptr, type, (idx + lag - 5));
+ }
+
+ if ( ( pass2 ) && ( (idx + lag - 6) >= 0 ) &&
+ ( (idx + lag - 6) < max_indices2[type] ) &&
+ ( (idx + lag - 6) % 11 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "8(p-ro, %d, %d) ", type,
+ (idx + lag - 6));
+
+ protect_entry_ro2(cache_ptr, type, (idx + lag - 6));
+ }
+
+ if ( ( pass2 ) && ( (idx + lag - 7) >= 0 ) &&
+ ( (idx + lag - 7) < max_indices2[type] ) &&
+ ( (idx + lag - 7) % 13 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "9(p-ro, %d, %d) ", type,
+ (idx + lag - 7));
+
+ protect_entry_ro2(cache_ptr, type, (idx + lag - 7));
+ }
+
+ if ( ( pass2 ) && ( (idx + lag - 7) >= 0 ) &&
+ ( (idx + lag - 7) < max_indices2[type] ) &&
+ ( (idx + lag - 7) % 9 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "10(u-ro, %d, %d) ", type,
+ (idx + lag - 7));
+
+ unprotect_entry2(cache_ptr, type, (idx + lag - 7),
+ FALSE, H5C2__NO_FLAGS_SET);
+ }
+
+ if ( ( pass2 ) && ( (idx + lag - 8) >= 0 ) &&
+ ( (idx + lag - 8) < max_indices2[type] ) &&
+ ( (idx + lag - 8) % 11 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "11(u-ro, %d, %d) ", type,
+ (idx + lag - 8));
+
+ unprotect_entry2(cache_ptr, type, (idx + lag - 8),
+ FALSE, H5C2__NO_FLAGS_SET);
+ }
+
+ if ( ( pass2 ) && ( (idx + lag - 9) >= 0 ) &&
+ ( (idx + lag - 9) < max_indices2[type] ) &&
+ ( (idx + lag - 9) % 13 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "12(u-ro, %d, %d) ", type,
+ (idx + lag - 9));
+
+ unprotect_entry2(cache_ptr, type, (idx + lag - 9),
+ FALSE, H5C2__NO_FLAGS_SET);
+ }
+ } /* if ( do_mult_ro_protects ) */
+
+ if ( ( pass2 ) && ( idx >= 0 ) && ( idx <= max_indices2[type] ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "13(p, %d, %d) ", type, idx);
+
+ protect_entry2(cache_ptr, type, idx);
+ }
+
+ if ( ( pass2 ) && ( (idx - lag + 2) >= 0 ) &&
+ ( (idx - lag + 2) <= max_indices2[type] ) &&
+ ( ( (idx - lag + 2) % 7 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "14(u, %d, %d) ", type, (idx - lag + 2));
+
+ unprotect_entry2(cache_ptr, type, idx-lag+2, NO_CHANGE,
+ H5C2__NO_FLAGS_SET);
+ }
+
+ if ( ( pass2 ) && ( (idx - lag + 1) >= 0 ) &&
+ ( (idx - lag + 1) <= max_indices2[type] ) &&
+ ( ( (idx - lag + 1) % 7 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "15(p, %d, %d) ", type, (idx - lag + 1));
+
+ protect_entry2(cache_ptr, type, (idx - lag + 1));
+ }
+
+
+ if ( do_destroys ) {
+
+ if ( ( pass2 ) && ( (idx - lag) >= 0 ) &&
+ ( ( idx - lag) <= max_indices2[type] ) ) {
+
+ switch ( (idx - lag) %4 ) {
+
+ case 0: /* we just did an insert */
+
+ if ( verbose )
+ HDfprintf(stdout,
+ "16(u, %d, %d) ", type, (idx - lag));
+
+ unprotect_entry2(cache_ptr, type, idx - lag,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ break;
+
+ case 1:
+ if ( (entries2[type])[idx-lag].is_dirty ) {
+
+ if ( verbose )
+ HDfprintf(stdout,
+ "17(u, %d, %d) ", type, (idx - lag));
+
+ unprotect_entry2(cache_ptr, type, idx - lag,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ } else {
+
+ if ( verbose )
+ HDfprintf(stdout,
+ "18(u, %d, %d) ", type, (idx - lag));
+
+ unprotect_entry2(cache_ptr, type, idx - lag,
+ dirty_unprotects,
+ H5C2__NO_FLAGS_SET);
+ }
+ break;
+
+ case 2: /* we just did an insrt */
+
+ if ( verbose )
+ HDfprintf(stdout,
+ "19(u-del, %d, %d) ", type, (idx - lag));
+
+ unprotect_entry2(cache_ptr, type, idx - lag,
+ NO_CHANGE, H5C2__DELETED_FLAG);
+ break;
+
+ case 3:
+ if ( (entries2[type])[idx-lag].is_dirty ) {
+
+ if ( verbose )
+ HDfprintf(stdout,
+ "20(u-del, %d, %d) ",
+ type, (idx - lag));
+
+ unprotect_entry2(cache_ptr, type, idx - lag,
+ NO_CHANGE, H5C2__DELETED_FLAG);
+ } else {
+
+ if ( verbose )
+ HDfprintf(stdout,
+ "21(u-del, %d, %d) ",
+ type, (idx - lag));
+
+ unprotect_entry2(cache_ptr, type, idx - lag,
+ dirty_destroys,
+ H5C2__DELETED_FLAG);
+ }
+ break;
+
+ default:
+ HDassert(0); /* this can't happen... */
+ break;
+ }
+ }
+
+ } else {
+
+ if ( ( pass2 ) && ( (idx - lag) >= 0 ) &&
+ ( ( idx - lag) <= max_indices2[type] ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "22(u, %d, %d) ", type, (idx - lag));
+
+ unprotect_entry2(cache_ptr, type, idx - lag,
+ dirty_unprotects, H5C2__NO_FLAGS_SET);
+ }
+ }
+
+ if ( verbose )
+ HDfprintf(stdout, "\n");
+
+ idx++;
+ }
+ type++;
+ }
+
+ if ( ( pass2 ) && ( display_stats ) ) {
+
+ H5C2_stats(cache_ptr, "test cache", display_detailed_stats);
+ }
+
+ return;
+
+} /* row_major_scan_forward2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: hl_row_major_scan_forward2()
+ *
+ * Purpose: Do a high locality sequence of inserts, protects, and
+ * unprotects while scanning through the set of entries.
+ * If pass2 is false on entry, do nothing.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 10/21/04
+ *
+ * Modifications:
+ *
+ * JRM -- 1/21/05
+ * Added the max_index parameter to allow the caller to
+ * throttle the size of the inner loop, and thereby the
+ * execution time of the function.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+hl_row_major_scan_forward2(H5C2_t * cache_ptr,
+ int32_t max_index,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts)
+{
+ const char * fcn_name = "hl_row_major_scan_forward2";
+ int32_t type;
+ int32_t idx;
+ int32_t i;
+ int32_t lag = 100;
+ int32_t local_max_index;
+
+ if ( verbose )
+ HDfprintf(stdout, "%s(): entering.\n", fcn_name);
+
+ HDassert( lag > 5 );
+ HDassert( max_index >= 200 );
+ HDassert( max_index <= MAX_ENTRIES );
+
+ type = 0;
+
+ if ( ( pass2 ) && ( reset_stats ) ) {
+
+ H5C2_stats__reset(cache_ptr);
+ }
+
+ while ( ( pass2 ) && ( type < NUMBER_OF_ENTRY_TYPES ) )
+ {
+ idx = -lag;
+
+ local_max_index = MIN(max_index, max_indices2[type]);
+
+ while ( ( pass2 ) && ( idx <= (local_max_index + lag) ) )
+ {
+ if ( ( pass2 ) && ( do_inserts ) && ( (idx + lag) >= 0 ) &&
+ ( (idx + lag) <= max_indices2[type] ) &&
+ ( ((idx + lag) % 2) == 0 ) &&
+ ( ! entry_in_cache2(cache_ptr, type, (idx + lag)) ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(i, %d, %d) ", type, (idx + lag));
+
+ insert_entry2(cache_ptr, type, (idx + lag), dirty_inserts,
+ H5C2__NO_FLAGS_SET);
+ }
+
+ i = idx;
+
+ while ( ( pass2 ) && ( i >= idx - lag ) && ( i >= 0 ) )
+ {
+ if ( ( pass2 ) && ( i >= 0 ) && ( i <= local_max_index ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, i);
+
+ protect_entry2(cache_ptr, type, i);
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, i);
+
+ unprotect_entry2(cache_ptr, type, i, NO_CHANGE,
+ H5C2__NO_FLAGS_SET);
+ }
+ i--;
+ }
+
+ if ( verbose )
+ HDfprintf(stdout, "\n");
+
+ idx++;
+ }
+ type++;
+ }
+
+ if ( ( pass2 ) && ( display_stats ) ) {
+
+ H5C2_stats(cache_ptr, "test cache", display_detailed_stats);
+ }
+
+ return;
+
+} /* hl_row_major_scan_forward2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: row_major_scan_backward2()
+ *
+ * Purpose: Do a sequence of inserts, protects, unprotects, renames,
+ * destroys while scanning backwards through the set of
+ * entries. If pass2 is false on entry, do nothing.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/12/04
+ *
+ * Modifications:
+ *
+ * JRM -- 4/4/07
+ * Added code supporting multiple read only protects.
+ * Note that this increased the minimum lag to 10.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+row_major_scan_backward2(H5C2_t * cache_ptr,
+ int32_t lag,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ hbool_t do_renames,
+ hbool_t rename_to_main_addr,
+ hbool_t do_destroys,
+ hbool_t do_mult_ro_protects,
+ int dirty_destroys,
+ int dirty_unprotects)
+{
+ const char * fcn_name = "row_major_scan_backward2";
+ int32_t type;
+ int32_t idx;
+
+ if ( verbose )
+ HDfprintf(stdout, "%s(): Entering.\n", fcn_name);
+
+ HDassert( lag >= 10 );
+
+ type = NUMBER_OF_ENTRY_TYPES - 1;
+
+ if ( ( pass2 ) && ( reset_stats ) ) {
+
+ H5C2_stats__reset(cache_ptr);
+ }
+
+ while ( ( pass2 ) && ( type >= 0 ) )
+ {
+ idx = max_indices2[type] + lag;
+
+ while ( ( pass2 ) && ( idx >= -lag ) )
+ {
+ if ( ( pass2 ) && ( do_inserts ) && ( (idx - lag) >= 0 ) &&
+ ( (idx - lag) <= max_indices2[type] ) &&
+ ( ((idx - lag) % 2) == 1 ) &&
+ ( ! entry_in_cache2(cache_ptr, type, (idx - lag)) ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(i, %d, %d) ", type, (idx - lag));
+
+ insert_entry2(cache_ptr, type, (idx - lag), dirty_inserts,
+ H5C2__NO_FLAGS_SET);
+ }
+
+
+ if ( ( pass2 ) && ( (idx - lag + 1) >= 0 ) &&
+ ( (idx - lag + 1) <= max_indices2[type] ) &&
+ ( ( (idx - lag + 1) % 3 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, (idx - lag + 1));
+
+ protect_entry2(cache_ptr, type, (idx - lag + 1));
+ }
+
+ if ( ( pass2 ) && ( (idx - lag + 2) >= 0 ) &&
+ ( (idx - lag + 2) <= max_indices2[type] ) &&
+ ( ( (idx - lag + 2) % 3 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, (idx - lag + 2));
+
+ unprotect_entry2(cache_ptr, type, idx-lag+2, NO_CHANGE,
+ H5C2__NO_FLAGS_SET);
+ }
+
+
+ if ( ( pass2 ) && ( do_renames ) && ( (idx - lag + 2) >= 0 ) &&
+ ( (idx - lag + 2) <= max_indices2[type] ) &&
+ ( ( (idx - lag + 2) % 3 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(r, %d, %d, %d) ",
+ type, (idx + lag + 2), (int)rename_to_main_addr);
+
+ rename_entry2(cache_ptr, type, (idx - lag + 2),
+ rename_to_main_addr);
+ }
+
+
+ if ( ( pass2 ) && ( (idx - lag + 3) >= 0 ) &&
+ ( (idx - lag + 3) <= max_indices2[type] ) &&
+ ( ( (idx - lag + 3) % 5 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, (idx - lag + 3));
+
+ protect_entry2(cache_ptr, type, (idx - lag + 3));
+ }
+
+ if ( ( pass2 ) && ( (idx - lag + 5) >= 0 ) &&
+ ( (idx - lag + 5) <= max_indices2[type] ) &&
+ ( ( (idx - lag + 5) % 5 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, (idx - lag + 5));
+
+ unprotect_entry2(cache_ptr, type, idx-lag+5, NO_CHANGE,
+ H5C2__NO_FLAGS_SET);
+ }
+
+ if ( do_mult_ro_protects )
+ {
+ if ( ( pass2 ) && ( (idx - lag + 5) >= 0 ) &&
+ ( (idx - lag + 5) < max_indices2[type] ) &&
+ ( (idx - lag + 5) % 9 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p-ro, %d, %d) ", type,
+ (idx - lag + 5));
+
+ protect_entry_ro2(cache_ptr, type, (idx - lag + 5));
+ }
+
+ if ( ( pass2 ) && ( (idx - lag + 6) >= 0 ) &&
+ ( (idx - lag + 6) < max_indices2[type] ) &&
+ ( (idx - lag + 6) % 11 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p-ro, %d, %d) ", type,
+ (idx - lag + 6));
+
+ protect_entry_ro2(cache_ptr, type, (idx - lag + 6));
+ }
+
+ if ( ( pass2 ) && ( (idx - lag + 7) >= 0 ) &&
+ ( (idx - lag + 7) < max_indices2[type] ) &&
+ ( (idx - lag + 7) % 13 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p-ro, %d, %d) ", type,
+ (idx - lag + 7));
+
+ protect_entry_ro2(cache_ptr, type, (idx - lag + 7));
+ }
+
+ if ( ( pass2 ) && ( (idx - lag + 7) >= 0 ) &&
+ ( (idx - lag + 7) < max_indices2[type] ) &&
+ ( (idx - lag + 7) % 9 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u-ro, %d, %d) ", type,
+ (idx - lag + 7));
+
+ unprotect_entry2(cache_ptr, type, (idx - lag + 7),
+ FALSE, H5C2__NO_FLAGS_SET);
+ }
+
+ if ( ( pass2 ) && ( (idx - lag + 8) >= 0 ) &&
+ ( (idx - lag + 8) < max_indices2[type] ) &&
+ ( (idx - lag + 8) % 11 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u-ro, %d, %d) ", type,
+ (idx - lag + 8));
+
+ unprotect_entry2(cache_ptr, type, (idx - lag + 8),
+ FALSE, H5C2__NO_FLAGS_SET);
+ }
+
+ if ( ( pass2 ) && ( (idx - lag + 9) >= 0 ) &&
+ ( (idx - lag + 9) < max_indices2[type] ) &&
+ ( (idx - lag + 9) % 13 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u-ro, %d, %d) ", type,
+ (idx - lag + 9));
+
+ unprotect_entry2(cache_ptr, type, (idx - lag + 9),
+ FALSE, H5C2__NO_FLAGS_SET);
+ }
+ } /* if ( do_mult_ro_protects ) */
+
+ if ( ( pass2 ) && ( idx >= 0 ) && ( idx <= max_indices2[type] ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, idx);
+
+ protect_entry2(cache_ptr, type, idx);
+ }
+
+
+ if ( ( pass2 ) && ( (idx + lag - 2) >= 0 ) &&
+ ( (idx + lag - 2) <= max_indices2[type] ) &&
+ ( ( (idx + lag - 2) % 7 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, (idx + lag - 2));
+
+ unprotect_entry2(cache_ptr, type, idx+lag-2, NO_CHANGE,
+ H5C2__NO_FLAGS_SET);
+ }
+
+ if ( ( pass2 ) && ( (idx + lag - 1) >= 0 ) &&
+ ( (idx + lag - 1) <= max_indices2[type] ) &&
+ ( ( (idx + lag - 1) % 7 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, (idx + lag - 1));
+
+ protect_entry2(cache_ptr, type, (idx + lag - 1));
+ }
+
+
+ if ( do_destroys ) {
+
+ if ( ( pass2 ) && ( (idx + lag) >= 0 ) &&
+ ( ( idx + lag) <= max_indices2[type] ) ) {
+
+ switch ( (idx + lag) %4 ) {
+
+ case 0:
+ if ( (entries2[type])[idx+lag].is_dirty ) {
+
+ unprotect_entry2(cache_ptr, type, idx + lag,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ } else {
+
+ unprotect_entry2(cache_ptr, type, idx + lag,
+ dirty_unprotects,
+ H5C2__NO_FLAGS_SET);
+ }
+ break;
+
+ case 1: /* we just did an insert */
+ unprotect_entry2(cache_ptr, type, idx + lag,
+ NO_CHANGE, H5C2__NO_FLAGS_SET);
+ break;
+
+ case 2:
+ if ( (entries2[type])[idx + lag].is_dirty ) {
+
+ unprotect_entry2(cache_ptr, type, idx + lag,
+ NO_CHANGE, H5C2__DELETED_FLAG);
+ } else {
+
+ unprotect_entry2(cache_ptr, type, idx + lag,
+ dirty_destroys,
+ H5C2__DELETED_FLAG);
+ }
+ break;
+
+ case 3: /* we just did an insrt */
+ unprotect_entry2(cache_ptr, type, idx + lag,
+ NO_CHANGE, H5C2__DELETED_FLAG);
+ break;
+
+ default:
+ HDassert(0); /* this can't happen... */
+ break;
+ }
+ }
+ } else {
+
+ if ( ( pass2 ) && ( (idx + lag) >= 0 ) &&
+ ( ( idx + lag) <= max_indices2[type] ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, (idx + lag));
+
+ unprotect_entry2(cache_ptr, type, idx + lag,
+ dirty_unprotects, H5C2__NO_FLAGS_SET);
+ }
+ }
+
+ if ( verbose )
+ HDfprintf(stdout, "\n");
+
+ idx--;
+ }
+ type--;
+ }
+
+ if ( ( pass2 ) && ( display_stats ) ) {
+
+ H5C2_stats(cache_ptr, "test cache", display_detailed_stats);
+ }
+
+ return;
+
+} /* row_major_scan_backward2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: hl_row_major_scan_backward2()
+ *
+ * Purpose: Do a high locality sequence of inserts, protects, and
+ * unprotects while scanning through the set of entries.
+ * If pass2 is false on entry, do nothing.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 10/21/04
+ *
+ * Modifications:
+ *
+ * JRM -- 1/21/05
+ * Added the max_index parameter to allow the caller to
+ * throttle the size of the inner loop, and thereby the
+ * execution time of the function.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+hl_row_major_scan_backward2(H5C2_t * cache_ptr,
+ int32_t max_index,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts)
+{
+ const char * fcn_name = "hl_row_major_scan_backward2";
+ int32_t type;
+ int32_t idx;
+ int32_t i;
+ int32_t lag = 100;
+ int32_t local_max_index;
+
+ if ( verbose )
+ HDfprintf(stdout, "%s(): entering.\n", fcn_name);
+
+ HDassert( lag > 5 );
+ HDassert( max_index >= 200 );
+ HDassert( max_index <= MAX_ENTRIES );
+
+ type = NUMBER_OF_ENTRY_TYPES - 1;
+
+ if ( ( pass2 ) && ( reset_stats ) ) {
+
+ H5C2_stats__reset(cache_ptr);
+ }
+
+ while ( ( pass2 ) && ( type >= 0 ) )
+ {
+ idx = max_indices2[type] + lag;
+
+ local_max_index = MIN(max_index, max_indices2[type]);
+
+ while ( ( pass2 ) && ( idx >= -lag ) )
+ {
+ if ( ( pass2 ) && ( do_inserts ) && ( (idx + lag) >= 0 ) &&
+ ( (idx + lag) <= local_max_index ) &&
+ ( ((idx + lag) % 2) == 0 ) &&
+ ( ! entry_in_cache2(cache_ptr, type, (idx + lag)) ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(i, %d, %d) ", type, (idx + lag));
+
+ insert_entry2(cache_ptr, type, (idx + lag), dirty_inserts,
+ H5C2__NO_FLAGS_SET);
+ }
+
+ i = idx;
+
+ while ( ( pass2 ) && ( i >= idx - lag ) && ( i >= 0 ) )
+ {
+ if ( ( pass2 ) && ( i >= 0 ) && ( i <= local_max_index ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, i);
+
+ protect_entry2(cache_ptr, type, i);
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, i);
+
+ unprotect_entry2(cache_ptr, type, i, NO_CHANGE,
+ H5C2__NO_FLAGS_SET);
+ }
+ i--;
+ }
+
+ if ( verbose )
+ HDfprintf(stdout, "\n");
+
+ idx--;
+ }
+ type--;
+ }
+
+ if ( ( pass2 ) && ( display_stats ) ) {
+
+ H5C2_stats(cache_ptr, "test cache", display_detailed_stats);
+ }
+
+ return;
+
+} /* hl_row_major_scan_backward2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: col_major_scan_forward2()
+ *
+ * Purpose: Do a sequence of inserts, protects, and unprotects
+ * while scanning through the set of entries. If
+ * pass2 is false on entry, do nothing.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/23/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+col_major_scan_forward2(H5C2_t * cache_ptr,
+ int32_t lag,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ int dirty_unprotects)
+{
+ const char * fcn_name = "col_major_scan_forward2()";
+ int32_t type;
+ int32_t idx;
+
+ if ( verbose )
+ HDfprintf(stdout, "%s: entering.\n", fcn_name);
+
+ HDassert( lag > 5 );
+
+ type = 0;
+
+ if ( ( pass2 ) && ( reset_stats ) ) {
+
+ H5C2_stats__reset(cache_ptr);
+ }
+
+ idx = -lag;
+
+ while ( ( pass2 ) && ( (idx - lag) <= MAX_ENTRIES ) )
+ {
+ type = 0;
+
+ while ( ( pass2 ) && ( type < NUMBER_OF_ENTRY_TYPES ) )
+ {
+ if ( ( pass2 ) && ( do_inserts ) && ( (idx + lag) >= 0 ) &&
+ ( (idx + lag) <= max_indices2[type] ) &&
+ ( ((idx + lag) % 3) == 0 ) &&
+ ( ! entry_in_cache2(cache_ptr, type, (idx + lag)) ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(i, %d, %d) ", type, (idx + lag));
+
+ insert_entry2(cache_ptr, type, (idx + lag), dirty_inserts,
+ H5C2__NO_FLAGS_SET);
+ }
+
+ if ( ( pass2 ) && ( idx >= 0 ) && ( idx <= max_indices2[type] ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, idx);
+
+ protect_entry2(cache_ptr, type, idx);
+ }
+
+ if ( ( pass2 ) && ( (idx - lag) >= 0 ) &&
+ ( (idx - lag) <= max_indices2[type] ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, (idx - lag));
+
+ unprotect_entry2(cache_ptr, type, idx - lag,
+ dirty_unprotects, H5C2__NO_FLAGS_SET);
+ }
+
+ if ( verbose )
+ HDfprintf(stdout, "\n");
+
+ type++;
+ }
+
+ idx++;
+ }
+
+ if ( ( pass2 ) && ( display_stats ) ) {
+
+ H5C2_stats(cache_ptr, "test cache", display_detailed_stats);
+ }
+
+ return;
+
+} /* col_major_scan_forward2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: hl_col_major_scan_forward2()
+ *
+ * Purpose: Do a high locality sequence of inserts, protects, and
+ * unprotects while scanning through the set of entries. If
+ * pass2 is false on entry, do nothing.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 19/25/04
+ *
+ * Modifications:
+ *
+ * JRM -- 1/21/05
+ * Added the max_index parameter to allow the caller to
+ * throttle the size of the inner loop, and thereby the
+ * execution time of the function.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+hl_col_major_scan_forward2(H5C2_t * cache_ptr,
+ int32_t max_index,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ int dirty_unprotects)
+{
+ const char * fcn_name = "hl_col_major_scan_forward2()";
+ int32_t type;
+ int32_t idx;
+ int32_t lag = 200;
+ int32_t i;
+ int32_t local_max_index;
+
+ if ( verbose )
+ HDfprintf(stdout, "%s: entering.\n", fcn_name);
+
+ HDassert( lag > 5 );
+ HDassert( max_index >= 500 );
+ HDassert( max_index <= MAX_ENTRIES );
+
+ type = 0;
+
+ if ( ( pass2 ) && ( reset_stats ) ) {
+
+ H5C2_stats__reset(cache_ptr);
+ }
+
+ idx = 0;
+
+ local_max_index = MIN(max_index, MAX_ENTRIES);
+
+ while ( ( pass2 ) && ( idx <= local_max_index ) )
+ {
+
+ i = idx;
+
+ while ( ( pass2 ) && ( i >= 0 ) && ( i >= (idx - lag) ) ) {
+
+ type = 0;
+
+ while ( ( pass2 ) && ( type < NUMBER_OF_ENTRY_TYPES ) )
+ {
+ if ( ( pass2 ) && ( do_inserts ) && ( i == idx ) &&
+ ( i <= local_max_index ) &&
+ ( (i % 3) == 0 ) &&
+ ( ! entry_in_cache2(cache_ptr, type, i) ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(i, %d, %d) ", type, i);
+
+ insert_entry2(cache_ptr, type, i, dirty_inserts,
+ H5C2__NO_FLAGS_SET);
+ }
+
+ if ( ( pass2 ) && ( i >= 0 ) && ( i <= local_max_index ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, i);
+
+ protect_entry2(cache_ptr, type, i);
+ }
+
+ if ( ( pass2 ) && ( i >= 0 ) &&
+ ( i <= max_indices2[type] ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, i);
+
+ unprotect_entry2(cache_ptr, type, i,
+ dirty_unprotects, H5C2__NO_FLAGS_SET);
+ }
+
+ if ( verbose )
+ HDfprintf(stdout, "\n");
+
+ type++;
+ }
+
+ i--;
+ }
+
+ idx++;
+ }
+
+ if ( ( pass2 ) && ( display_stats ) ) {
+
+ H5C2_stats(cache_ptr, "test cache", display_detailed_stats);
+ }
+
+ return;
+
+} /* hl_col_major_scan_forward2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: col_major_scan_backward2()
+ *
+ * Purpose: Do a sequence of inserts, protects, and unprotects
+ * while scanning backwards through the set of
+ * entries. If pass2 is false on entry, do nothing.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/23/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+col_major_scan_backward2(H5C2_t * cache_ptr,
+ int32_t lag,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ int dirty_unprotects)
+{
+ const char * fcn_name = "col_major_scan_backward2()";
+ int mile_stone = 1;
+ int32_t type;
+ int32_t idx;
+
+ if ( verbose )
+ HDfprintf(stdout, "%s: entering.\n", fcn_name);
+
+ HDassert( lag > 5 );
+
+ if ( ( pass2 ) && ( reset_stats ) ) {
+
+ H5C2_stats__reset(cache_ptr);
+ }
+
+ idx = MAX_ENTRIES + lag;
+
+ if ( verbose ) /* 1 */
+ HDfprintf(stdout, "%s: point %d.\n", fcn_name, mile_stone++);
+
+
+ while ( ( pass2 ) && ( (idx + lag) >= 0 ) )
+ {
+ type = NUMBER_OF_ENTRY_TYPES - 1;
+
+ while ( ( pass2 ) && ( type >= 0 ) )
+ {
+ if ( ( pass2 ) && ( do_inserts) && ( (idx - lag) >= 0 ) &&
+ ( (idx - lag) <= max_indices2[type] ) &&
+ ( ((idx - lag) % 3) == 0 ) &&
+ ( ! entry_in_cache2(cache_ptr, type, (idx - lag)) ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(i, %d, %d) ", type, (idx - lag));
+
+ insert_entry2(cache_ptr, type, (idx - lag), dirty_inserts,
+ H5C2__NO_FLAGS_SET);
+ }
+
+ if ( ( pass2 ) && ( idx >= 0 ) && ( idx <= max_indices2[type] ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, idx);
+
+ protect_entry2(cache_ptr, type, idx);
+ }
+
+ if ( ( pass2 ) && ( (idx + lag) >= 0 ) &&
+ ( (idx + lag) <= max_indices2[type] ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, (idx + lag));
+
+ unprotect_entry2(cache_ptr, type, idx + lag,
+ dirty_unprotects, H5C2__NO_FLAGS_SET);
+ }
+
+ if ( verbose )
+ HDfprintf(stdout, "\n");
+
+ type--;
+ }
+
+ idx--;
+ }
+
+ if ( verbose ) /* 2 */
+ HDfprintf(stdout, "%s: point %d.\n", fcn_name, mile_stone++);
+
+ if ( ( pass2 ) && ( display_stats ) ) {
+
+ H5C2_stats(cache_ptr, "test cache", display_detailed_stats);
+ }
+
+ if ( verbose )
+ HDfprintf(stdout, "%s: exiting.\n", fcn_name);
+
+ return;
+
+} /* col_major_scan_backward2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: hl_col_major_scan_backward2()
+ *
+ * Purpose: Do a high locality sequence of inserts, protects, and
+ * unprotects while scanning backwards through the set of
+ * entries. If pass2 is false on entry, do nothing.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 10/25/04
+ *
+ * Modifications:
+ *
+ * JRM -- 1/21/05
+ * Added the max_index parameter to allow the caller to
+ * throttle the size of the inner loop, and thereby the
+ * execution time of the function.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+hl_col_major_scan_backward2(H5C2_t * cache_ptr,
+ int32_t max_index,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ int dirty_unprotects)
+{
+ const char * fcn_name = "hl_col_major_scan_backward2()";
+ int32_t type;
+ int32_t idx;
+ int32_t lag = 50;
+ int32_t i;
+ int32_t local_max_index;
+
+ if ( verbose )
+ HDfprintf(stdout, "%s: entering.\n", fcn_name);
+
+ HDassert( lag > 5 );
+ HDassert( max_index >= 500 );
+ HDassert( max_index <= MAX_ENTRIES );
+
+ type = 0;
+
+ local_max_index = MIN(max_index, MAX_ENTRIES);
+
+ if ( ( pass2 ) && ( reset_stats ) ) {
+
+ H5C2_stats__reset(cache_ptr);
+ }
+
+ idx = local_max_index;
+
+ while ( ( pass2 ) && ( idx >= 0 ) )
+ {
+
+ i = idx;
+
+ while ( ( pass2 ) && ( i <= local_max_index ) && ( i <= (idx + lag) ) ) {
+
+ type = 0;
+
+ while ( ( pass2 ) && ( type < NUMBER_OF_ENTRY_TYPES ) )
+ {
+ if ( ( pass2 ) && ( do_inserts ) && ( i == idx ) &&
+ ( i <= local_max_index ) &&
+ ( ! entry_in_cache2(cache_ptr, type, i) ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(i, %d, %d) ", type, i);
+
+ insert_entry2(cache_ptr, type, i, dirty_inserts,
+ H5C2__NO_FLAGS_SET);
+ }
+
+ if ( ( pass2 ) && ( i >= 0 ) && ( i <= local_max_index ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, i);
+
+ protect_entry2(cache_ptr, type, i);
+ }
+
+ if ( ( pass2 ) && ( i >= 0 ) &&
+ ( i <= local_max_index ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, i);
+
+ unprotect_entry2(cache_ptr, type, i,
+ dirty_unprotects, H5C2__NO_FLAGS_SET);
+ }
+
+ if ( verbose )
+ HDfprintf(stdout, "\n");
+
+ type++;
+ }
+
+ i++;
+ }
+
+ idx--;
+ }
+
+ if ( ( pass2 ) && ( display_stats ) ) {
+
+ H5C2_stats(cache_ptr, "test cache", display_detailed_stats);
+ }
+
+ return;
+
+} /* hl_col_major_scan_backward2() */
diff --git a/test/cache2_common.h b/test/cache2_common.h
new file mode 100644
index 0000000..351f7e8
--- /dev/null
+++ b/test/cache2_common.h
@@ -0,0 +1,823 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Programmer: John Mainzer
+ * 10/27/05
+ *
+ * This file contains common #defines, type definitions, and
+ * externs for tests of the cache implemented in H5C2.c
+ */
+#include "h5test.h"
+#include "H5Iprivate.h"
+#include "H5ACprivate.h"
+
+#define H5C2_PACKAGE /*suppress error about including H5Cpkg */
+
+#include "H5C2pkg.h"
+
+#define H5F_PACKAGE /*suppress error about including H5Fpkg */
+
+#include "H5Fpkg.h"
+
+#define NO_CHANGE -1
+
+/* with apologies for the abuse of terminology... */
+
+#define PICO_ENTRY_TYPE 0
+#define NANO_ENTRY_TYPE 1
+#define MICRO_ENTRY_TYPE 2
+#define TINY_ENTRY_TYPE 3
+#define SMALL_ENTRY_TYPE 4
+#define MEDIUM_ENTRY_TYPE 5
+#define LARGE_ENTRY_TYPE 6
+#define HUGE_ENTRY_TYPE 7
+#define MONSTER_ENTRY_TYPE 8
+#define VARIABLE_ENTRY_TYPE 9
+
+#define NUMBER_OF_ENTRY_TYPES 10
+
+#define PICO_ENTRY_SIZE (size_t)1
+#define NANO_ENTRY_SIZE (size_t)4
+#define MICRO_ENTRY_SIZE (size_t)16
+#define TINY_ENTRY_SIZE (size_t)64
+#define SMALL_ENTRY_SIZE (size_t)256
+#define MEDIUM_ENTRY_SIZE (size_t)1024
+#define LARGE_ENTRY_SIZE (size_t)(4 * 1024)
+#define HUGE_ENTRY_SIZE (size_t)(16 * 1024)
+#define MONSTER_ENTRY_SIZE (size_t)(64 * 1024)
+#define VARIABLE_ENTRY_SIZE (size_t)(10 * 1024)
+
+#define NUM_PICO_ENTRIES (10 * 1024)
+#define NUM_NANO_ENTRIES (10 * 1024)
+#define NUM_MICRO_ENTRIES (10 * 1024)
+#define NUM_TINY_ENTRIES (10 * 1024)
+#define NUM_SMALL_ENTRIES (10 * 1024)
+#define NUM_MEDIUM_ENTRIES (10 * 1024)
+#define NUM_LARGE_ENTRIES (10 * 1024)
+#define NUM_HUGE_ENTRIES (10 * 1024)
+#define NUM_MONSTER_ENTRIES (10 * 1024)
+#define NUM_VARIABLE_ENTRIES (10 * 1024)
+
+#define MAX_ENTRIES (10 * 1024)
+
+
+/* The choice of the BASE_ADDR below is arbitrary -- it just has to be
+ * larger than the superblock.
+ */
+#define BASE_ADDR (haddr_t)1024
+#define PICO_BASE_ADDR BASE_ADDR
+#define NANO_BASE_ADDR (haddr_t)(PICO_BASE_ADDR + \
+ (PICO_ENTRY_SIZE * NUM_PICO_ENTRIES))
+#define MICRO_BASE_ADDR (haddr_t)(NANO_BASE_ADDR + \
+ (NANO_ENTRY_SIZE * NUM_NANO_ENTRIES))
+#define TINY_BASE_ADDR (haddr_t)(MICRO_BASE_ADDR + \
+ (MICRO_ENTRY_SIZE * NUM_MICRO_ENTRIES))
+#define SMALL_BASE_ADDR (haddr_t)(TINY_BASE_ADDR + \
+ (TINY_ENTRY_SIZE * NUM_TINY_ENTRIES))
+#define MEDIUM_BASE_ADDR (haddr_t)(SMALL_BASE_ADDR + \
+ (SMALL_ENTRY_SIZE * NUM_SMALL_ENTRIES))
+#define LARGE_BASE_ADDR (haddr_t)(MEDIUM_BASE_ADDR + \
+ (MEDIUM_ENTRY_SIZE * NUM_MEDIUM_ENTRIES))
+#define HUGE_BASE_ADDR (haddr_t)(LARGE_BASE_ADDR + \
+ (LARGE_ENTRY_SIZE * NUM_LARGE_ENTRIES))
+#define MONSTER_BASE_ADDR (haddr_t)(HUGE_BASE_ADDR + \
+ (HUGE_ENTRY_SIZE * NUM_HUGE_ENTRIES))
+#define VARIABLE_BASE_ADDR (haddr_t)(MONSTER_BASE_ADDR + \
+ (MONSTER_ENTRY_SIZE * NUM_MONSTER_ENTRIES))
+
+#define PICO_ALT_BASE_ADDR (haddr_t)(VARIABLE_BASE_ADDR + \
+ (VARIABLE_ENTRY_SIZE * NUM_VARIABLE_ENTRIES))
+#define NANO_ALT_BASE_ADDR (haddr_t)(PICO_ALT_BASE_ADDR + \
+ (PICO_ENTRY_SIZE * NUM_PICO_ENTRIES))
+#define MICRO_ALT_BASE_ADDR (haddr_t)(NANO_ALT_BASE_ADDR + \
+ (NANO_ENTRY_SIZE * NUM_NANO_ENTRIES))
+#define TINY_ALT_BASE_ADDR (haddr_t)(MICRO_ALT_BASE_ADDR + \
+ (MICRO_ENTRY_SIZE * NUM_MICRO_ENTRIES))
+#define SMALL_ALT_BASE_ADDR (haddr_t)(TINY_ALT_BASE_ADDR + \
+ (TINY_ENTRY_SIZE * NUM_TINY_ENTRIES))
+#define MEDIUM_ALT_BASE_ADDR (haddr_t)(SMALL_ALT_BASE_ADDR + \
+ (SMALL_ENTRY_SIZE * NUM_SMALL_ENTRIES))
+#define LARGE_ALT_BASE_ADDR (haddr_t)(MEDIUM_ALT_BASE_ADDR + \
+ (MEDIUM_ENTRY_SIZE * NUM_MEDIUM_ENTRIES))
+#define HUGE_ALT_BASE_ADDR (haddr_t)(LARGE_ALT_BASE_ADDR + \
+ (LARGE_ENTRY_SIZE * NUM_LARGE_ENTRIES))
+#define MONSTER_ALT_BASE_ADDR (haddr_t)(HUGE_ALT_BASE_ADDR + \
+ (HUGE_ENTRY_SIZE * NUM_HUGE_ENTRIES))
+#define VARIABLE_ALT_BASE_ADDR (haddr_t)(MONSTER_ALT_BASE_ADDR + \
+ (MONSTER_ENTRY_SIZE * NUM_MONSTER_ENTRIES))
+#define MAX_ADDR (haddr_t)(VARIABLE_ALT_BASE_ADDR + \
+ (VARIABLE_ENTRY_SIZE * NUM_VARIABLE_ENTRIES))
+#define ADDR_SPACE_SIZE (haddr_t)(MAX_ADDR - BASE_ADDR)
+
+#define MAX_PINS 8 /* Maximum number of entries that can be
+ * directly pinned by a single entry.
+ */
+
+#define FLUSH_OP__NO_OP 0
+#define FLUSH_OP__DIRTY 1
+#define FLUSH_OP__RESIZE 2
+#define FLUSH_OP__RENAME 3
+#define FLUSH_OP__MAX_OP 3
+
+#define MAX_FLUSH_OPS 10 /* Maximum number of flush operations
+ * that can be associated with a
+ * cache entry.
+ */
+
+typedef struct flush_op
+{
+ int op_code; /* integer op code indicating the
+ * operation to be performed. At
+ * present it must be one of:
+ *
+ * FLUSH_OP__NO_OP
+ * FLUSH_OP__DIRTY
+ * FLUSH_OP__RESIZE
+ * FLUSH_OP__RENAME
+ */
+ int type; /* type code of the cache entry that
+ * is the target of the operation.
+ * This value is passed into the
+ * function implementing the flush
+ * operation.
+ */
+ int idx; /* index of the cache entry that
+ * is the target of the operation.
+ * This value is passed into the
+ * function implementing the flush
+ * operation.
+ */
+ hbool_t flag; /* boolean flag passed into the
+ * function implementing the flush
+ * operation. The meaning of the
+ * flag is dependant upon the flush
+ * operation:
+ *
+ * FLUSH_OP__DIRTY: TRUE iff the
+ * target is pinned, and is to
+ * be dirtied via the
+ * H5C_mark_pinned_entry_dirty()
+ * call.
+ *
+ * FLUSH_OP__RESIZE: TRUE iff the
+ * target is pinned, and is to
+ * be resized via the
+ * H5C_mark_pinned_entry_dirty()
+ * call.
+ *
+ * FLUSH_OP__RENAME: TRUE iff the
+ * target is to be renamed to
+ * its main address.
+ */
+ size_t size; /* New target size in the
+ * FLUSH_OP__RENAME operation.
+ * Unused elsewhere.
+ */
+} flush_op;
+
+typedef struct test_entry_t
+{
+ H5C2_cache_entry_t header; /* entry data used by the cache
+ * -- must be first
+ */
+ struct test_entry_t * self; /* pointer to this entry -- used for
+ * sanity checking.
+ */
+ H5C2_t * cache_ptr; /* pointer to the cache in which
+ * the entry resides, or NULL if the
+ * entry is not in cache.
+ */
+ hbool_t written_to_main_addr;
+ /* Flag indicating whether an image
+ * of the entry has been written to
+ * its main address. Since we no
+ * longer have a flush callback, we
+ * set this field to true whenever the
+ * entry is serialized while at its
+ * main address.
+ */
+ hbool_t written_to_alt_addr;
+ /* Flag indicating whether an image
+ * of the entry has been written to
+ * its alternate address. Since we no
+ * longer have a flush callback, we
+ * set this field to true whenever the
+ * entry is serialized while at its
+ * alternate address.
+ */
+ haddr_t addr; /* where the cache thinks this entry
+ * is located
+ */
+ hbool_t at_main_addr; /* boolean flag indicating whether
+ * the entry is supposed to be at
+ * either its main or alternate
+ * address.
+ */
+ haddr_t main_addr; /* initial location of the entry
+ */
+ haddr_t alt_addr; /* location to which the entry
+ * can be relocated or "renamed"
+ */
+ size_t size; /* how big the cache thinks this
+ * entry is
+ */
+ int32_t type; /* indicates which entry array this
+ * entry is in
+ */
+ int32_t index; /* index in its entry array
+ */
+ int32_t serializes; /* number of times this entry has
+ * been serialized.
+ */
+ int32_t deserializes; /* number of times this entry has
+ * been deserialized
+ */
+ hbool_t is_dirty; /* entry has been modified since
+ * last write
+ */
+ hbool_t is_protected; /* entry should currently be on
+ * the cache's protected list.
+ */
+ hbool_t is_read_only; /* TRUE iff the entry should be
+ * protected read only.
+ */
+ int ro_ref_count; /* Number of outstanding read only
+ * protects on the entry.
+ */
+ hbool_t is_pinned; /* entry is currently pinned in
+ * the cache.
+ */
+ int pinning_ref_count; /* Number of entries that
+ * pin this entry in the cache.
+ * When this count drops to zero,
+ * this entry should be unpinned.
+ */
+ int num_pins; /* Number of entries that this
+ * entry pins in the cache. This
+ * value must be in the range
+ * [0, MAX_PINS].
+ */
+ int pin_type[MAX_PINS]; /* array of the types of entries
+ * pinned by this entry.
+ */
+ int pin_idx[MAX_PINS]; /* array of the indicies of
+ * entries pinned by this entry.
+ */
+ int num_flush_ops; /* integer field containing the
+ * number of flush operations to
+ * be executed when the entry is
+ * flushed. This value must lie in
+ * the closed interval
+ * [0, MAX_FLUSH_OPS].
+ */
+ struct flush_op flush_ops[MAX_FLUSH_OPS]; /* Array of instances
+ * of struct flush_op detailing the
+ * flush operations (if any) that
+ * are to be executed when the entry
+ * is flushed from the cache.
+ *
+ * num_flush_ops contains the number
+ * of valid entries in this array.
+ */
+ hbool_t flush_op_self_resize_in_progress; /* Boolean flag
+ * that is set to TRUE iff this
+ * entry is being flushed, it has
+ * been resized by a resize flush
+ * op, and the flush function has
+ * not yet returned, This field is
+ * used to turn off overactive santity
+ * checking code that would otherwise
+ * cause a false test failure.
+ */
+ hbool_t deserialized; /* entry has been deserialized since
+ * the last time it was reset.
+ */
+ hbool_t cleared; /* entry has been cleared since the
+ * last time it was reset.
+ */
+ hbool_t serialized; /* entry has been serialized since the
+ * last time it was reset.
+ */
+ hbool_t destroyed; /* entry has been destroyed since the
+ * last time it was reset.
+ */
+} test_entry_t;
+
+/* The following is a cut down copy of the hash table manipulation
+ * macros from H5C.c, which have been further modified to avoid references
+ * to the error reporting macros. Needless to say, these macros must be
+ * updated as necessary.
+ */
+
+#define H5C2__HASH_MASK ((size_t)(H5C2__HASH_TABLE_LEN - 1) << 3)
+#define H5C2__HASH_FCN(x) (int)(((x) & H5C2__HASH_MASK) >> 3)
+
+#define H5C2__PRE_HT_SEARCH_SC(cache_ptr, Addr) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C2__H5C2_T_MAGIC ) || \
+ ( ! H5F_addr_defined(Addr) ) || \
+ ( H5C2__HASH_FCN(Addr) < 0 ) || \
+ ( H5C2__HASH_FCN(Addr) >= H5C2__HASH_TABLE_LEN ) ) { \
+ HDfprintf(stdout, "Pre HT search SC failed.\n"); \
+}
+
+#define H5C2__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C2__H5C2_T_MAGIC ) || \
+ ( (cache_ptr)->index_len < 1 ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
+ ( H5F_addr_ne((entry_ptr)->addr, (Addr)) ) || \
+ ( (entry_ptr)->size <= 0 ) || \
+ ( ((cache_ptr)->index)[k] == NULL ) || \
+ ( ( ((cache_ptr)->index)[k] != (entry_ptr) ) && \
+ ( (entry_ptr)->ht_prev == NULL ) ) || \
+ ( ( ((cache_ptr)->index)[k] == (entry_ptr) ) && \
+ ( (entry_ptr)->ht_prev != NULL ) ) || \
+ ( ( (entry_ptr)->ht_prev != NULL ) && \
+ ( (entry_ptr)->ht_prev->ht_next != (entry_ptr) ) ) || \
+ ( ( (entry_ptr)->ht_next != NULL ) && \
+ ( (entry_ptr)->ht_next->ht_prev != (entry_ptr) ) ) ) { \
+ HDfprintf(stdout, "Post successful HT search SC failed.\n"); \
+}
+
+
+#define H5C2__SEARCH_INDEX(cache_ptr, Addr, entry_ptr) \
+{ \
+ int k; \
+ int depth = 0; \
+ H5C2__PRE_HT_SEARCH_SC(cache_ptr, Addr) \
+ k = H5C2__HASH_FCN(Addr); \
+ entry_ptr = ((cache_ptr)->index)[k]; \
+ while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) \
+ { \
+ (entry_ptr) = (entry_ptr)->ht_next; \
+ (depth)++; \
+ } \
+ if ( entry_ptr ) \
+ { \
+ H5C2__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k) \
+ if ( entry_ptr != ((cache_ptr)->index)[k] ) \
+ { \
+ if ( (entry_ptr)->ht_next ) \
+ { \
+ (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
+ } \
+ HDassert( (entry_ptr)->ht_prev != NULL ); \
+ (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
+ ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
+ (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
+ (entry_ptr)->ht_prev = NULL; \
+ ((cache_ptr)->index)[k] = (entry_ptr); \
+ } \
+ } \
+}
+
+
+/* misc type definitions */
+
+struct flush_cache_test_spec
+{
+ int entry_num;
+ int entry_type;
+ int entry_index;
+ hbool_t insert_flag;
+ hbool_t dirty_flag;
+ unsigned int flags;
+ hbool_t expected_deserialized;
+ hbool_t expected_cleared;
+ hbool_t expected_serialized;
+ hbool_t expected_destroyed;
+};
+
+struct pe_flush_cache_test_spec
+{
+ int entry_num;
+ int entry_type;
+ int entry_index;
+ hbool_t insert_flag;
+ hbool_t dirty_flag;
+ unsigned int flags;
+ int num_pins;
+ int pin_type[MAX_PINS];
+ int pin_idx[MAX_PINS];
+ hbool_t expected_deserialized;
+ hbool_t expected_cleared;
+ hbool_t expected_serialized;
+ hbool_t expected_destroyed;
+};
+
+struct fo_flush_entry_check
+{
+ int entry_num;
+ int entry_type;
+ int entry_index;
+ size_t expected_size;
+ hbool_t in_cache;
+ hbool_t at_main_addr;
+ hbool_t is_dirty;
+ hbool_t is_protected;
+ hbool_t is_pinned;
+ hbool_t expected_deserialized;
+ hbool_t expected_cleared;
+ hbool_t expected_serialized;
+ hbool_t expected_destroyed;
+};
+
+struct fo_flush_cache_test_spec
+{
+ int entry_num;
+ int entry_type;
+ int entry_index;
+ hbool_t insert_flag;
+ unsigned int flags;
+ size_t new_size;
+ int num_pins;
+ int pin_type[MAX_PINS];
+ int pin_idx[MAX_PINS];
+ int num_flush_ops;
+ struct flush_op flush_ops[MAX_FLUSH_OPS];
+ hbool_t expected_deserialized;
+ hbool_t expected_cleared;
+ hbool_t expected_serialized;
+ hbool_t expected_destroyed;
+};
+
+struct rename_entry_test_spec
+{
+ int entry_type;
+ int entry_index;
+ hbool_t is_dirty;
+ hbool_t is_pinned;
+};
+
+struct expected_entry_status
+{
+ int entry_type;
+ int entry_index;
+ size_t size;
+ hbool_t in_cache;
+ hbool_t at_main_addr;
+ hbool_t is_dirty;
+ hbool_t is_protected;
+ hbool_t is_pinned;
+ hbool_t deserialized;
+ hbool_t cleared;
+ hbool_t serialized;
+ hbool_t destroyed;
+};
+
+
+
+
+/* global variable externs: */
+
+extern hbool_t write_permitted2;
+extern hbool_t pass2; /* set to false on error */
+extern hbool_t skip_long_tests2;
+extern hbool_t run_full_test2;
+extern const char *failure_mssg2;
+
+extern test_entry_t pico_entries2[NUM_PICO_ENTRIES];
+extern test_entry_t nano_entries2[NUM_NANO_ENTRIES];
+extern test_entry_t micro_entries2[NUM_MICRO_ENTRIES];
+extern test_entry_t tiny_entries2[NUM_TINY_ENTRIES];
+extern test_entry_t small_entries2[NUM_SMALL_ENTRIES];
+extern test_entry_t medium_entries2[NUM_MEDIUM_ENTRIES];
+extern test_entry_t large_entries2[NUM_LARGE_ENTRIES];
+extern test_entry_t huge_entries2[NUM_HUGE_ENTRIES];
+extern test_entry_t monster_entries2[NUM_MONSTER_ENTRIES];
+
+extern test_entry_t * entries2[NUMBER_OF_ENTRY_TYPES];
+extern const int32_t max_indices2[NUMBER_OF_ENTRY_TYPES];
+extern const size_t entry_sizes2[NUMBER_OF_ENTRY_TYPES];
+extern const haddr_t base_addrs2[NUMBER_OF_ENTRY_TYPES];
+extern const haddr_t alt_base_addrs2[NUMBER_OF_ENTRY_TYPES];
+extern const char * entry_type_names2[NUMBER_OF_ENTRY_TYPES];
+
+
+/* call back function declarations: */
+
+herr_t check_write_permitted2(const H5F_t UNUSED * f,
+ hid_t UNUSED dxpl_id,
+ hbool_t * write_permitted_ptr);
+
+herr_t pico_clear_dirty_bits(haddr_t addr, size_t len, void * thing);
+herr_t nano_clear_dirty_bits(haddr_t addr, size_t len, void * thing);
+herr_t micro_clear_dirty_bits(haddr_t addr, size_t len, void * thing);
+herr_t tiny_clear_dirty_bits(haddr_t addr, size_t len, void * thing);
+herr_t small_clear_dirty_bits(haddr_t addr, size_t len, void * thing);
+herr_t medium_clear_dirty_bits(haddr_t addr, size_t len, void * thing);
+herr_t large_clear_dirty_bits(haddr_t addr, size_t len, void * thing);
+herr_t huge_clear_dirty_bits(haddr_t addr, size_t len, void * thing);
+herr_t monster_clear_dirty_bits(haddr_t addr, size_t len, void * thing);
+herr_t variable_clear_dirty_bits(haddr_t addr, size_t len, void * thing);
+
+
+void * pico_deserialize(haddr_t addr, size_t len, const void * image_ptr,
+ const void * udata_ptr, hbool_t * dirty_ptr);
+void * nano_deserialize(haddr_t addr, size_t len, const void * image_ptr,
+ const void * udata_ptr, hbool_t * dirty_ptr);
+void * micro_deserialize(haddr_t addr, size_t len, const void * image_ptr,
+ const void * udata_ptr, hbool_t * dirty_ptr);
+void * tiny_deserialize(haddr_t addr, size_t len, const void * image_ptr,
+ const void * udata_ptr, hbool_t * dirty_ptr);
+void * small_deserialize(haddr_t addr, size_t len, const void * image_ptr,
+ const void * udata_ptr, hbool_t * dirty_ptr);
+void * medium_deserialize(haddr_t addr, size_t len, const void * image_ptr,
+ const void * udata_ptr, hbool_t * dirty_ptr);
+void * large_deserialize(haddr_t addr, size_t len, const void * image_ptr,
+ const void * udata_ptr, hbool_t * dirty_ptr);
+void * huge_deserialize(haddr_t addr, size_t len, const void * image_ptr,
+ const void * udata_ptr, hbool_t * dirty_ptr);
+void * monster_deserialize(haddr_t addr, size_t len, const void * image_ptr,
+ const void * udata_ptr, hbool_t * dirty_ptr);
+void * variable_deserialize(haddr_t addr, size_t len, const void * image_ptr,
+ const void * udata_ptr, hbool_t * dirty_ptr);
+
+herr_t pico_image_len(void *thing, size_t *image_len_ptr);
+herr_t nano_image_len(void *thing, size_t *image_len_ptr);
+herr_t micro_image_len(void *thing, size_t *image_len_ptr);
+herr_t tiny_image_len(void *thing, size_t *image_len_ptr);
+herr_t small_image_len(void *thing, size_t *image_len_ptr);
+herr_t medium_image_len(void *thing, size_t *image_len_ptr);
+herr_t large_image_len(void *thing, size_t *image_len_ptr);
+herr_t huge_image_len(void *thing, size_t *image_len_ptr);
+herr_t monster_image_len(void *thing, size_t *image_len_ptr);
+herr_t variable_image_len(void *thing, size_t *image_len_ptr);
+
+herr_t pico_serialize(haddr_t addr, size_t len, void * image_ptr,
+ void * thing, unsigned * flags_ptr,
+ haddr_t * new_addr_ptr, size_t * new_len_ptr,
+ void ** new_image_ptr_ptr);
+herr_t nano_serialize(haddr_t addr, size_t len, void * image_ptr,
+ void * thing, unsigned * flags_ptr,
+ haddr_t * new_addr_ptr, size_t * new_len_ptr,
+ void ** new_image_ptr_ptr);
+herr_t micro_serialize(haddr_t addr, size_t len, void * image_ptr,
+ void * thing, unsigned * flags_ptr,
+ haddr_t * new_addr_ptr, size_t * new_len_ptr,
+ void ** new_image_ptr_ptr);
+herr_t tiny_serialize(haddr_t addr, size_t len, void * image_ptr,
+ void * thing, unsigned * flags_ptr,
+ haddr_t * new_addr_ptr, size_t * new_len_ptr,
+ void ** new_image_ptr_ptr);
+herr_t small_serialize(haddr_t addr, size_t len, void * image_ptr,
+ void * thing, unsigned * flags_ptr,
+ haddr_t * new_addr_ptr, size_t * new_len_ptr,
+ void ** new_image_ptr_ptr);
+herr_t medium_serialize(haddr_t addr, size_t len, void * image_ptr,
+ void * thing, unsigned * flags_ptr,
+ haddr_t * new_addr_ptr, size_t * new_len_ptr,
+ void ** new_image_ptr_ptr);
+herr_t large_serialize(haddr_t addr, size_t len, void * image_ptr,
+ void * thing, unsigned * flags_ptr,
+ haddr_t * new_addr_ptr, size_t * new_len_ptr,
+ void ** new_image_ptr_ptr);
+herr_t huge_serialize(haddr_t addr, size_t len, void * image_ptr,
+ void * thing, unsigned * flags_ptr,
+ haddr_t * new_addr_ptr, size_t * new_len_ptr,
+ void ** new_image_ptr_ptr);
+herr_t monster_serialize(haddr_t addr, size_t len, void * image_ptr,
+ void * thing, unsigned * flags_ptr,
+ haddr_t * new_addr_ptr, size_t * new_len_ptr,
+ void ** new_image_ptr_ptr);
+herr_t variable_serialize(haddr_t addr, size_t len, void * image_ptr,
+ void * thing, unsigned * flags_ptr,
+ haddr_t * new_addr_ptr, size_t * new_len_ptr,
+ void ** new_image_ptr_ptr);
+
+herr_t pico_free_icr(haddr_t addr, size_t len, void * thing);
+herr_t nano_free_icr(haddr_t addr, size_t len, void * thing);
+herr_t micro_free_icr(haddr_t addr, size_t len, void * thing);
+herr_t tiny_free_icr(haddr_t addr, size_t len, void * thing);
+herr_t small_free_icr(haddr_t addr, size_t len, void * thing);
+herr_t medium_free_icr(haddr_t addr, size_t len, void * thing);
+herr_t large_free_icr(haddr_t addr, size_t len, void * thing);
+herr_t huge_free_icr(haddr_t addr, size_t len, void * thing);
+herr_t monster_free_icr(haddr_t addr, size_t len, void * thing);
+herr_t variable_free_icr(haddr_t addr, size_t len, void * thing);
+
+
+/* callback table extern */
+
+extern const H5C2_class_t types2[NUMBER_OF_ENTRY_TYPES];
+
+
+/* function declarations: */
+
+void add_flush_op2(int target_type,
+ int target_idx,
+ int op_code,
+ int type,
+ int idx,
+ hbool_t flag,
+ size_t size);
+
+
+void addr_to_type_and_index2(haddr_t addr,
+ int32_t * type_ptr,
+ int32_t * index_ptr);
+
+#if 0 /* keep this for a while -- it may be useful */
+haddr_t type_and_index_to_addr2(int32_t type,
+ int32_t idx);
+#endif
+
+void dirty_entry2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ hbool_t dirty_pin);
+
+void expunge_entry2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx);
+
+void insert_entry2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ hbool_t dirty,
+ unsigned int flags);
+
+void mark_pinned_entry_dirty2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ hbool_t size_changed,
+ size_t new_size);
+
+void mark_pinned_or_protected_entry_dirty2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx);
+
+void rename_entry2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ hbool_t main_addr);
+
+void protect_entry2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx);
+
+void protect_entry_ro2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx);
+
+hbool_t entry_in_cache2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx);
+
+void create_pinned_entry_dependency2(H5C2_t * cache_ptr,
+ int pinning_type,
+ int pinning_idx,
+ int pinned_type,
+ int pinned_idx);
+
+void execute_flush_op2(H5C2_t * cache_ptr,
+ struct test_entry_t * entry_ptr,
+ struct flush_op * op_ptr,
+ unsigned * flags_ptr);
+
+void reset_entries2(void);
+
+void resize_entry2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ size_t new_size,
+ hbool_t resize_pin);
+
+H5C2_t * setup_cache2(size_t max_cache_size, size_t min_clean_size);
+
+void row_major_scan_forward2(H5C2_t * cache_ptr,
+ int32_t lag,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ hbool_t do_renames,
+ hbool_t rename_to_main_addr,
+ hbool_t do_destroys,
+ hbool_t do_mult_ro_protects,
+ int dirty_destroys,
+ int dirty_unprotects);
+
+void hl_row_major_scan_forward2(H5C2_t * cache_ptr,
+ int32_t max_index,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts);
+
+void row_major_scan_backward2(H5C2_t * cache_ptr,
+ int32_t lag,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ hbool_t do_renames,
+ hbool_t rename_to_main_addr,
+ hbool_t do_destroys,
+ hbool_t do_mult_ro_protects,
+ int dirty_destroys,
+ int dirty_unprotects);
+
+void hl_row_major_scan_backward2(H5C2_t * cache_ptr,
+ int32_t max_index,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts);
+
+void col_major_scan_forward2(H5C2_t * cache_ptr,
+ int32_t lag,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ int dirty_unprotects);
+
+void hl_col_major_scan_forward2(H5C2_t * cache_ptr,
+ int32_t max_index,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ int dirty_unprotects);
+
+void col_major_scan_backward2(H5C2_t * cache_ptr,
+ int32_t lag,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ int dirty_unprotects);
+
+void hl_col_major_scan_backward2(H5C2_t * cache_ptr,
+ int32_t max_index,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ int dirty_unprotects);
+
+void takedown_cache2(H5C2_t * cache_ptr,
+ hbool_t dump_stats,
+ hbool_t dump_detailed_stats);
+
+void flush_cache2(H5C2_t * cache_ptr,
+ hbool_t destroy_entries,
+ hbool_t dump_stats,
+ hbool_t dump_detailed_stats);
+
+void unpin_entry2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx);
+
+void unprotect_entry2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ int dirty,
+ unsigned int flags);
+
+void unprotect_entry_with_size_change2(H5C2_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ unsigned int flags,
+ size_t new_size);
+
+void verify_clean2(void);
+
+void verify_entry_status2(H5C2_t * cache_ptr,
+ int tag,
+ int num_entries,
+ struct expected_entry_status expected[]);
+
+void verify_unprotected2(void);
+
diff --git a/testpar/t_cache2.c b/testpar/t_cache2.c
new file mode 100644
index 0000000..da527b1
--- /dev/null
+++ b/testpar/t_cache2.c
@@ -0,0 +1,6108 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Parallel metadata cache tests.
+ *
+ */
+
+#include "h5test.h"
+#include "testpar.h"
+#include "H5Iprivate.h"
+#include "H5MFprivate.h"
+#include "H5AC2private.h"
+
+#define H5C2_PACKAGE /*suppress error about including H5C2pkg */
+
+#include "H5C2pkg.h"
+
+#define H5AC2_PACKAGE /*suppress error about including H5AC2pkg */
+
+#include "H5AC2pkg.h"
+
+#define H5F_PACKAGE /*suppress error about including H5Fpkg */
+
+#include "H5Fpkg.h"
+
+#define BASE_ADDR (haddr_t)1024
+
+
+int nerrors = 0;
+int failures = 0;
+hbool_t verbose = TRUE; /* used to control error messages */
+#if 1
+/* So far we haven't needed this, but that may change.
+ * Keep it around for now
+ */
+hid_t noblock_dxpl_id=(-1);
+#endif
+
+#define NFILENAME 2
+#define PARATESTFILE filenames[0]
+const char *FILENAME[NFILENAME]={"Cache2TestDummy", NULL};
+char filenames[NFILENAME][PATH_MAX];
+hid_t fapl; /* file access property list */
+haddr_t max_addr = 0; /* used to store the end of
+ * the address space used by
+ * the data array (see below).
+ */
+hbool_t callbacks_verbose = FALSE; /* flag used to control whether
+ * the callback functions are in
+ * verbose mode.
+ */
+
+
+int world_mpi_size = -1;
+int world_mpi_rank = -1;
+int world_server_mpi_rank = -1;
+MPI_Comm world_mpi_comm = MPI_COMM_NULL;
+int file_mpi_size = -1;
+int file_mpi_rank = -1;
+MPI_Comm file_mpi_comm = MPI_COMM_NULL;
+
+
+/* the following globals are used to maintain rudementary statistics
+ * to check the validity of the statistics maintained by H5C2.c
+ */
+
+long datum_clears = 0;
+long datum_pinned_clears = 0;
+long datum_destroys = 0;
+long datum_flushes = 0;
+long datum_pinned_flushes = 0;
+long datum_loads = 0;
+long global_pins = 0;
+long global_dirty_pins = 0;
+long local_pins = 0;
+
+
+/*****************************************************************************
+ * struct datum
+ *
+ * Instances of struct datum are used to store information on entries
+ * that may be loaded into the cache. The individual fields are
+ * discussed below:
+ *
+ * header: Instance of H5C2_cache_entry_t used by the for its data.
+ * This field is only used on the file processes, not on the
+ * server process.
+ *
+ * This field MUST be the first entry in this structure.
+ *
+ * base_addr: Base address of the entry.
+ *
+ * len: Length of the entry.
+ *
+ * local_len: Length of the entry according to the cache. This
+ * value must be positive, and may not be larger than len.
+ *
+ * The field exists to allow us change the sizes of entries
+ * in the cache without upsetting the server. This value
+ * is only used locally, and is never sent to the server.
+ *
+ * ver: Version number of the entry. This number is initialize
+ * to zero, and incremented each time the entry is modified.
+ *
+ * dirty: Boolean flag indicating whether the entry is dirty.
+ *
+ * For current purposes, an entry is clean until it is
+ * modified, and dirty until written to the server (cache
+ * on process 0) or until it is marked clean (all other
+ * caches).
+ *
+ * valid: Boolean flag indicating whether the entry contains
+ * valid data. Attempts to read an entry whose valid
+ * flag is not set should trigger an error.
+ *
+ * locked: Boolean flag that is set to true iff the entry is in
+ * the cache and locked.
+ *
+ * global_pinned: Boolean flag that is set to true iff the entry has
+ * been pinned collectively in all caches. Since writes must
+ * be collective across all processes, only entries pinned
+ * in this fashion may be marked dirty.
+ *
+ * local_pinned: Boolean flag that is set to true iff the entry
+ * has been pinned in the local cache, but probably not all
+ * caches. Such pins will typically not be consistant across
+ * processes, and thus cannot be marked as dirty unless they
+ * happen to overlap some collective operation.
+ *
+ * index: Index of this instance of datum in the data_index[] array
+ * discussed below.
+ *
+ *****************************************************************************/
+
+struct datum
+{
+ H5C2_cache_entry_t header;
+ haddr_t base_addr;
+ size_t len;
+ size_t local_len;
+ int ver;
+ hbool_t dirty;
+ hbool_t valid;
+ hbool_t locked;
+ hbool_t global_pinned;
+ hbool_t local_pinned;
+ int index;
+};
+
+/*****************************************************************************
+ * data array
+ *
+ * The data array is an array of instances of datum of size
+ * NUM_DATA_ENTRIES that is used to track the particulars of all
+ * the entries that may be loaded into the cache.
+ *
+ * It exists on all processes, although the master copy is maintained
+ * by the server process. If the cache is performing correctly, all
+ * versions should be effectively identical. By that I mean that
+ * the data received from the server should always match that in
+ * the local version of the data array.
+ *
+ *****************************************************************************/
+
+#define NUM_DATA_ENTRIES 100000
+
+struct datum data[NUM_DATA_ENTRIES];
+
+
+/* Many tests use the size of data array as the size of test loops.
+ * On some machines, this results in unacceptably long test runs.
+ *
+ * To deal with this issue, I have introduced the virt_num_data_entries
+ * global, which can be set to a lower value to throtle the length of
+ * tests.
+ *
+ * Note that this value must always be divisible by 40, and must be an
+ * even divisor of NUM_DATA_ENTRIES. So far, all tests have been with
+ * powers of 10 that meet these criteria.
+ *
+ * Further, this value must be consistant across all processes.
+ */
+
+#define STD_VIRT_NUM_DATA_ENTRIES NUM_DATA_ENTRIES
+#define EXPRESS_VIRT_NUM_DATA_ENTRIES (NUM_DATA_ENTRIES / 10)
+/* Use a smaller test size to avoid creating huge MPE logfiles. */
+#define MPE_VIRT_NUM_DATA_ENTIES (NUM_DATA_ENTRIES / 100)
+
+int virt_num_data_entries = NUM_DATA_ENTRIES;
+
+
+/*****************************************************************************
+ * data_index array
+ *
+ * The data_index array is an array of integer used to maintain a list
+ * of instances of datum in the data array in increasing base_addr order.
+ *
+ * This array is necessary, as rename operations can swap the values
+ * of the base_addr fields of two instances of datum. Without this
+ * array, we would no longer be able to use a binary search on a sorted
+ * list to find the indexes of instances of datum given the values of
+ * their base_addr fields.
+ *
+ *****************************************************************************/
+
+int data_index[NUM_DATA_ENTRIES];
+
+
+/*****************************************************************************
+ * struct mssg
+ *
+ * The mssg structure is used as a generic container for messages to
+ * and from the server. Not all fields are used in all cases.
+ *
+ * req: Integer field containing the type of the message.
+ *
+ * src: World communicator MPI rank of the sending process.
+ *
+ * dest: World communicator MPI rank of the destination process.
+ *
+ * mssg_num: Serial number assigned to the message by the sender.
+ *
+ * base_addr: Base address of a datum. Not used in all mssgs.
+ *
+ * len: Length of a datum (in bytes). Not used in all mssgs.
+ *
+ * ver: Version number of a datum. Not used in all mssgs.
+ *
+ * magic: Magic number for error detection. Must be set to
+ * MSSG_MAGIC.
+ *
+ *****************************************************************************/
+
+#define DO_WRITE_REQ_ACK FALSE
+#define DO_SYNC_AFTER_WRITE TRUE
+
+#define WRITE_REQ_CODE 0
+#define WRITE_REQ_ACK_CODE 1
+#define READ_REQ_CODE 2
+#define READ_REQ_REPLY_CODE 3
+#define SYNC_REQ_CODE 4
+#define SYNC_ACK_CODE 5
+#define DONE_REQ_CODE 6
+#define MAX_REQ_CODE 6
+
+#define MSSG_MAGIC 0x1248
+
+struct mssg_t
+{
+ int req;
+ int src;
+ int dest;
+ long int mssg_num;
+ haddr_t base_addr;
+ int len;
+ int ver;
+ unsigned magic;
+};
+
+MPI_Datatype mpi_mssg_t; /* for MPI derived type created from mssg */
+
+
+/*****************************************************************************/
+/************************** function declarations ****************************/
+/*****************************************************************************/
+
+/* stats functions */
+
+void print_stats(void);
+void reset_stats(void);
+
+/* MPI setup functions */
+
+hbool_t set_up_file_communicator(void);
+
+
+/* data array manipulation functions */
+
+int addr_to_datum_index(haddr_t base_addr);
+void init_data(void);
+
+
+/* test coodination related functions */
+
+int do_express_test(void);
+void do_sync(void);
+int get_max_nerrors(void);
+
+
+/* mssg xfer related functions */
+
+hbool_t recv_mssg(struct mssg_t *mssg_ptr, int mssg_tag_offset);
+hbool_t send_mssg(struct mssg_t *mssg_ptr, hbool_t add_req_to_tag);
+hbool_t setup_derived_types(void);
+hbool_t takedown_derived_types(void);
+
+
+/* server functions */
+
+hbool_t server_main(void);
+hbool_t serve_read_request(struct mssg_t * mssg_ptr);
+hbool_t serve_sync_request(struct mssg_t * mssg_ptr);
+hbool_t serve_write_request(struct mssg_t * mssg_ptr);
+
+
+/* call back functions & related data structures */
+
+static herr_t datum_clear_dirty_bits(haddr_t addr,
+ size_t len,
+ void * thing);
+
+static void * datum_deserialize(haddr_t addr,
+ size_t len,
+ const void * image_ptr,
+ const void * udata_ptr,
+ hbool_t * dirty_ptr);
+
+static herr_t datum_image_len(void *thing,
+ size_t *image_len_ptr);
+
+static herr_t datum_serialize(haddr_t addr,
+ size_t len,
+ void * image_ptr,
+ void * thing,
+ unsigned * flags_ptr,
+ haddr_t * new_addr_ptr,
+ size_t * new_len_ptr,
+ void ** new_image_ptr_ptr);
+
+static herr_t datum_free_icr(haddr_t addr,
+ size_t len,
+ void * thing);
+
+#define DATUM_ENTRY_TYPE H5AC2_TEST_ID
+
+#define NUMBER_OF_ENTRY_TYPES 1
+
+const H5C2_class_t types[NUMBER_OF_ENTRY_TYPES] =
+{
+ {
+ DATUM_ENTRY_TYPE,
+ "datum",
+ H5FD_MEM_DEFAULT,
+ (H5C2_deserialize_func_t)datum_deserialize,
+ (H5C2_image_len_func_t)datum_image_len,
+ (H5C2_serialize_func_t)datum_serialize,
+ (H5C2_free_icr_func_t)datum_free_icr,
+ (H5C2_clear_dirty_bits_func_t)datum_clear_dirty_bits
+ }
+};
+
+
+/* test utility functions */
+
+void expunge_entry(H5C2_t * cache_ptr, H5F_t * file_ptr, int32_t idx);
+void insert_entry(H5C2_t * cache_ptr, H5F_t * file_ptr,
+ int32_t idx, unsigned int flags);
+void local_pin_and_unpin_random_entries(H5C2_t * cache_ptr, H5F_t * file_ptr,
+ int min_idx, int max_idx,
+ int min_count, int max_count);
+void local_pin_random_entry(H5C2_t * cache_ptr, H5F_t * file_ptr,
+ int min_idx, int max_idx);
+void local_unpin_all_entries(H5C2_t * cache_ptr, H5F_t * file_ptr,
+ hbool_t via_unprotect);
+int local_unpin_next_pinned_entry(H5C2_t * cache_ptr, H5F_t * file_ptr,
+ int start_idx, hbool_t via_unprotect);
+void lock_and_unlock_random_entries(H5C2_t * cache_ptr, H5F_t * file_ptr,
+ int min_idx, int max_idx,
+ int min_count, int max_count);
+void lock_and_unlock_random_entry(H5C2_t * cache_ptr, H5F_t * file_ptr,
+ int min_idx, int max_idx);
+void lock_entry(H5C2_t * cache_ptr, H5F_t * file_ptr, int32_t idx);
+void mark_pinned_entry_dirty(H5C2_t * cache_ptr, H5F_t * file_ptr,
+ int32_t idx, hbool_t size_changed, size_t new_size);
+void mark_pinned_or_protected_entry_dirty(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ int32_t idx);
+void pin_entry(H5C2_t * cache_ptr, H5F_t * file_ptr, int32_t idx,
+ hbool_t global, hbool_t dirty);
+void pin_protected_entry(H5C2_t * cache_ptr, H5F_t * file_ptr,
+ int32_t idx, hbool_t global);
+void rename_entry(H5C2_t * cache_ptr, H5F_t * file_ptr,
+ int32_t old_idx, int32_t new_idx);
+void resize_entry(H5C2_t * cache_ptr, H5F_t * file_ptr,
+ int32_t idx, size_t new_size);
+hbool_t setup_cache_for_test(hid_t * fid_ptr, H5F_t ** file_ptr_ptr,
+ H5C2_t ** cache_ptr_ptr);
+void setup_rand(void);
+hbool_t take_down_cache(hid_t fid, H5C2_t * cache_ptr);
+void unlock_entry(H5C2_t * cache_ptr, H5F_t * file_ptr,
+ int32_t type, unsigned int flags);
+void unpin_entry(H5C2_t * cache_ptr, H5F_t * file_ptr, int32_t idx,
+ hbool_t global, hbool_t dirty, hbool_t via_unprotect);
+
+
+/* test functions */
+
+hbool_t server_smoke_check(void);
+hbool_t smoke_check_1(void);
+hbool_t smoke_check_2(void);
+hbool_t smoke_check_3(void);
+hbool_t smoke_check_4(void);
+hbool_t smoke_check_5(void);
+hbool_t trace_file_check(void);
+
+
+/*****************************************************************************/
+/****************************** stats functions ******************************/
+/*****************************************************************************/
+
+/*****************************************************************************
+ *
+ * Function: print_stats()
+ *
+ * Purpose: Print the rudementary stats maintained by t_cache.
+ *
+ * This is a debugging function, which will not normally
+ * be run as part of t_cache.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 4/17/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+void
+print_stats(void)
+{
+ HDfprintf(stdout,
+ "%d: datum clears / pinned clears / destroys = %ld / %ld / %ld\n",
+ world_mpi_rank, datum_clears, datum_pinned_clears,
+ datum_destroys );
+ HDfprintf(stdout,
+ "%d: datum flushes / pinned flushes / loads = %ld / %ld / %ld\n",
+ world_mpi_rank, datum_flushes, datum_pinned_flushes,
+ datum_loads );
+ HDfprintf(stdout,
+ "%d: pins: global / global dirty / local = %ld / %ld / %ld\n",
+ world_mpi_rank, global_pins, global_dirty_pins, local_pins);
+ HDfflush(stdout);
+
+ return;
+
+} /* print_stats() */
+
+/*****************************************************************************
+ *
+ * Function: reset_stats()
+ *
+ * Purpose: Reset the rudementary stats maintained by t_cache.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 4/17/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+void
+reset_stats(void)
+{
+ datum_clears = 0;
+ datum_pinned_clears = 0;
+ datum_destroys = 0;
+ datum_flushes = 0;
+ datum_pinned_flushes = 0;
+ datum_loads = 0;
+ global_pins = 0;
+ global_dirty_pins = 0;
+ local_pins = 0;
+
+ return;
+
+} /* reset_stats() */
+
+
+/*****************************************************************************/
+/**************************** MPI setup functions ****************************/
+/*****************************************************************************/
+
+/*****************************************************************************
+ *
+ * Function: set_up_file_communicator()
+ *
+ * Purpose: Create the MPI communicator used to open a HDF5 file with.
+ * In passing, also initialize the file_mpi... globals.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 11/16/05
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+hbool_t
+set_up_file_communicator(void)
+{
+ const char * fcn_name = "set_up_file_communicator()";
+ hbool_t success = TRUE;
+ int mpi_result;
+ int num_excluded_ranks;
+ int excluded_ranks[1];
+ MPI_Group file_group;
+ MPI_Group world_group;
+
+ if ( success ) {
+
+ mpi_result = MPI_Comm_group(world_mpi_comm, &world_group);
+
+ if ( mpi_result != MPI_SUCCESS ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ fprintf(stdout,
+ "%d:%s: MPI_Comm_group() failed with error %d.\n",
+ world_mpi_rank, fcn_name, mpi_result);
+ }
+ }
+ }
+
+ if ( success ) {
+
+ num_excluded_ranks = 1;
+ excluded_ranks[0] = world_server_mpi_rank;
+ mpi_result = MPI_Group_excl(world_group, num_excluded_ranks,
+ excluded_ranks, &file_group);
+
+ if ( mpi_result != MPI_SUCCESS ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ fprintf(stdout,
+ "%d:%s: MPI_Group_excl() failed with error %d.\n",
+ world_mpi_rank, fcn_name, mpi_result);
+ }
+ }
+ }
+
+ if ( success ) {
+
+ mpi_result = MPI_Comm_create(world_mpi_comm, file_group,
+ &file_mpi_comm);
+
+ if ( mpi_result != MPI_SUCCESS ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ fprintf(stdout,
+ "%d:%s: MPI_Comm_create() failed with error %d.\n",
+ world_mpi_rank, fcn_name, mpi_result);
+ }
+
+ } else {
+
+ if ( world_mpi_rank != world_server_mpi_rank ) {
+
+ if ( file_mpi_comm == MPI_COMM_NULL ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ fprintf(stdout,
+ "%d:%s: file_mpi_comm == MPI_COMM_NULL.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ } else {
+
+ file_mpi_size = world_mpi_size - 1; /* needed by the server */
+
+ if ( file_mpi_comm != MPI_COMM_NULL ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ fprintf(stdout,
+ "%d:%s: file_mpi_comm != MPI_COMM_NULL.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+ }
+ }
+
+ if ( ( success ) && ( world_mpi_rank != world_server_mpi_rank ) ) {
+
+ mpi_result = MPI_Comm_size(file_mpi_comm, &file_mpi_size);
+
+ if ( mpi_result != MPI_SUCCESS ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ fprintf(stdout,
+ "%d:%s: MPI_Comm_size() failed with error %d.\n",
+ world_mpi_rank, fcn_name, mpi_result);
+ }
+ }
+ }
+
+ if ( ( success ) && ( world_mpi_rank != world_server_mpi_rank ) ) {
+
+ mpi_result = MPI_Comm_rank(file_mpi_comm, &file_mpi_rank);
+
+ if ( mpi_result != MPI_SUCCESS ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ fprintf(stdout,
+ "%d:%s: MPI_Comm_rank() failed with error %d.\n",
+ world_mpi_rank, fcn_name, mpi_result);
+ }
+ }
+ }
+
+ return(success);
+
+} /* set_up_file_communicator() */
+
+
+/*****************************************************************************/
+/******************** data array manipulation functions **********************/
+/*****************************************************************************/
+
+/*****************************************************************************
+ *
+ * Function: addr_to_datum_index()
+ *
+ * Purpose: Given the base address of a datum, find and return its index
+ * in the data array.
+ *
+ * Return: Success: index of target datum.
+ *
+ * Failure: -1.
+ *
+ * Programmer: JRM -- 12/20/05
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+int
+addr_to_datum_index(haddr_t base_addr)
+{
+ /* const char * fcn_name = "addr_to_datum_index()"; */
+ int top = NUM_DATA_ENTRIES - 1;
+ int bottom = 0;
+ int middle = (NUM_DATA_ENTRIES - 1) / 2;
+ int ret_value = -1;
+
+ while ( top >= bottom )
+ {
+ if ( base_addr < data[data_index[middle]].base_addr ) {
+
+ top = middle - 1;
+ middle = (top + bottom) / 2;
+
+ } else if ( base_addr > data[data_index[middle]].base_addr ) {
+
+ bottom = middle + 1;
+ middle = (top + bottom) / 2;
+
+ } else /* ( base_addr == data[data_index[middle]].base_addr ) */ {
+
+ ret_value = data_index[middle];
+ bottom = top + 1; /* to force exit from while loop */
+
+ }
+ }
+
+ return(ret_value);
+
+} /* addr_to_datum_index() */
+
+
+/*****************************************************************************
+ *
+ * Function: init_data()
+ *
+ * Purpose: Initialize the data array, from which cache entries are
+ * loaded.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 12/20/05
+ *
+ * Modifications:
+ *
+ * JRM -- 7/11/06
+ * Added support for the local_len field.
+ *
+ *****************************************************************************/
+
+void
+init_data(void)
+{
+ /* const char * fcn_name = "init_data()"; */
+ /* The set of address offsets is chosen so as to avoid allowing the
+ * base addresses to fall in a pattern of that will annoy the hash
+ * table, and to give a good range of entry sizes.
+ *
+ * At present, I am using the first 20 entries of the Fibonacci
+ * sequence multiplied by 2. We will see how it works.
+ */
+ const int num_addr_offsets = 20;
+ const haddr_t addr_offsets[20] = { 2, 2, 4, 6, 10,
+ 16, 26, 42, 68, 110,
+ 178, 288, 466, 754, 1220,
+ 1974, 3194, 5168, 8362, 13539};
+ int i;
+ int j = 0;
+ haddr_t addr = BASE_ADDR;
+
+ /* this must hold so renames don't change entry size. */
+ HDassert( (NUM_DATA_ENTRIES / 2) % 20 == 0 );
+ HDassert( (virt_num_data_entries / 2) % 20 == 0 );
+
+ for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
+ {
+ data[i].base_addr = addr;
+ data[i].len = (size_t)(addr_offsets[j]);
+ data[i].local_len = (size_t)(addr_offsets[j]);
+ data[i].ver = 0;
+ data[i].dirty = FALSE;
+ data[i].valid = FALSE;
+ data[i].locked = FALSE;
+ data[i].global_pinned = FALSE;
+ data[i].local_pinned = FALSE;
+ data[i].index = i;
+
+ data_index[i] = i;
+
+ addr += addr_offsets[j];
+ HDassert( addr > data[i].base_addr );
+
+ j = (j + 1) % num_addr_offsets;
+ }
+
+ /* save the end of the address space used by the data array */
+ max_addr = addr;
+
+ return;
+
+} /* init_data() */
+
+
+/*****************************************************************************/
+/******************** test coodination related functions *********************/
+/*****************************************************************************/
+
+/*****************************************************************************
+ *
+ * Function: do_express_test()
+ *
+ * Purpose: Do an MPI_Allreduce to obtain the maximum value returned
+ * by GetTestExpress() across all processes. Return this
+ * value.
+ *
+ * Envirmoment variables can be different across different
+ * processes. This function ensures that all processes agree
+ * on whether to do an express test.
+ *
+ * Return: Success: Maximum of the values returned by
+ * GetTestExpress() across all processes.
+ *
+ * Failure: -1
+ *
+ * Programmer: JRM -- 4/25/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+int
+do_express_test(void)
+{
+ const char * fcn_name = "do_express_test()";
+ int express_test;
+ int max_express_test;
+ int result;
+
+ express_test = GetTestExpress();
+
+ result = MPI_Allreduce((void *)&express_test,
+ (void *)&max_express_test,
+ 1,
+ MPI_INT,
+ MPI_MAX,
+ world_mpi_comm);
+
+ if ( result != MPI_SUCCESS ) {
+
+ nerrors++;
+ max_express_test = -1;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n",
+ world_mpi_rank, fcn_name );
+ }
+ }
+
+ return(max_express_test);
+
+} /* do_express_test() */
+
+
+/*****************************************************************************
+ *
+ * Function: do_sync()
+ *
+ * Purpose: Ensure that all messages sent by this process have been
+ * processed before proceeding.
+ *
+ * Do this by exchanging sync req / sync ack messages with
+ * the server.
+ *
+ * Do nothing if nerrors is greater than zero.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 5/10/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+void
+do_sync(void)
+{
+ const char * fcn_name = "do_sync()";
+
+ struct mssg_t mssg;
+
+ if ( nerrors <= 0 ) {
+
+ /* compose the message */
+ mssg.req = SYNC_REQ_CODE;
+ mssg.src = world_mpi_rank;
+ mssg.dest = world_server_mpi_rank;
+ mssg.mssg_num = -1; /* set by send function */
+ mssg.base_addr = 0;
+ mssg.len = 0;
+ mssg.ver = 0;
+ mssg.magic = MSSG_MAGIC;
+
+ if ( ! send_mssg(&mssg, FALSE) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ if ( nerrors <= 0 ) {
+
+ if ( ! recv_mssg(&mssg, SYNC_ACK_CODE) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ } else if ( ( mssg.req != SYNC_ACK_CODE ) ||
+ ( mssg.src != world_server_mpi_rank ) ||
+ ( mssg.dest != world_mpi_rank ) ||
+ ( mssg.magic != MSSG_MAGIC ) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Bad data in sync ack.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ return;
+
+} /* do_sync() */
+
+
+/*****************************************************************************
+ *
+ * Function: get_max_nerrors()
+ *
+ * Purpose: Do an MPI_Allreduce to obtain the maximum value of nerrors
+ * across all processes. Return this value.
+ *
+ * Return: Success: Maximum of the nerrors global variables across
+ * all processes.
+ *
+ * Failure: -1
+ *
+ * Programmer: JRM -- 1/3/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+int
+get_max_nerrors(void)
+{
+ const char * fcn_name = "get_max_nerrors()";
+ int max_nerrors;
+ int result;
+
+ result = MPI_Allreduce((void *)&nerrors,
+ (void *)&max_nerrors,
+ 1,
+ MPI_INT,
+ MPI_MAX,
+ world_mpi_comm);
+
+ if ( result != MPI_SUCCESS ) {
+
+ nerrors++;
+ max_nerrors = -1;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n",
+ world_mpi_rank, fcn_name );
+ }
+ }
+
+ return(max_nerrors);
+
+} /* get_max_nerrors() */
+
+
+/*****************************************************************************/
+/************************ mssg xfer related functions ************************/
+/*****************************************************************************/
+
+/*****************************************************************************
+ *
+ * Function: recv_mssg()
+ *
+ * Purpose: Receive a message from any process in the provided instance
+ * of struct mssg.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 12/22/05
+ *
+ * Modifications:
+ *
+ * JRM -- 5/10/06
+ * Added mssg_tag_offset parameter and supporting code.
+ *
+ *****************************************************************************/
+
+#define CACHE_TEST_TAG 99 /* different from any used by the library */
+
+hbool_t
+recv_mssg(struct mssg_t *mssg_ptr,
+ int mssg_tag_offset)
+{
+ const char * fcn_name = "recv_mssg()";
+ hbool_t success = TRUE;
+ int mssg_tag = CACHE_TEST_TAG;
+ int result;
+ MPI_Status status;
+
+ if ( ( mssg_ptr == NULL ) ||
+ ( mssg_tag_offset < 0 ) ||
+ ( mssg_tag_offset> MAX_REQ_CODE ) ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: bad param(s) on entry.\n",
+ world_mpi_rank, fcn_name);
+ }
+ } else {
+
+ mssg_tag += mssg_tag_offset;
+ }
+
+ if ( success ) {
+
+ result = MPI_Recv((void *)mssg_ptr, 1, mpi_mssg_t, MPI_ANY_SOURCE,
+ mssg_tag, world_mpi_comm, &status);
+
+ if ( result != MPI_SUCCESS ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: MPI_Recv() failed.\n",
+ world_mpi_rank, fcn_name );
+ }
+ } else if ( mssg_ptr->magic != MSSG_MAGIC ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: invalid magic.\n", world_mpi_rank,
+ fcn_name);
+ }
+ } else if ( mssg_ptr->src != status.MPI_SOURCE ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout,
+ "%d:%s: mssg_ptr->src != status.MPI_SOURCE.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ return(success);
+
+} /* recv_mssg() */
+
+
+/*****************************************************************************
+ *
+ * Function: send_mssg()
+ *
+ * Purpose: Send the provided instance of mssg to the indicated target.
+ *
+ * Note that all source and destination ranks are in the
+ * global communicator.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 12/22/05
+ *
+ * Modifications:
+ *
+ * JRM -- 5/10/06
+ * Added the add_req_to_tag parameter and supporting code.
+ *
+ *****************************************************************************/
+
+hbool_t
+send_mssg(struct mssg_t *mssg_ptr,
+ hbool_t add_req_to_tag)
+{
+ const char * fcn_name = "send_mssg()";
+ hbool_t success = TRUE;
+ int mssg_tag = CACHE_TEST_TAG;
+ int result;
+ static long mssg_num = 0;
+
+ if ( ( mssg_ptr == NULL ) ||
+ ( mssg_ptr->src != world_mpi_rank ) ||
+ ( mssg_ptr->dest < 0 ) ||
+ ( mssg_ptr->dest == mssg_ptr->src ) ||
+ ( mssg_ptr->dest >= world_mpi_size ) ||
+ ( mssg_ptr->req < 0 ) ||
+ ( mssg_ptr->req > MAX_REQ_CODE ) ||
+ ( mssg_ptr->magic != MSSG_MAGIC ) ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Invalid mssg on entry.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ if ( success ) {
+
+ mssg_ptr->mssg_num = mssg_num++;
+
+ if ( add_req_to_tag ) {
+
+ mssg_tag += mssg_ptr->req;
+ }
+
+ result = MPI_Send((void *)mssg_ptr, 1, mpi_mssg_t,
+ mssg_ptr->dest, mssg_tag, world_mpi_comm);
+
+ if ( result != MPI_SUCCESS ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: MPI_Send() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ return(success);
+
+} /* send_mssg() */
+
+
+/*****************************************************************************
+ *
+ * Function: setup_derived_types()
+ *
+ * Purpose: Set up the derived types used by the test bed. At present,
+ * only the mpi_mssg derived type is needed.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 12/22/05
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+hbool_t
+setup_derived_types(void)
+{
+ const char * fcn_name = "setup_derived_types()";
+ hbool_t success = TRUE;
+ int i;
+ int result;
+ MPI_Datatype mpi_types[8] = {MPI_INT, MPI_INT, MPI_INT, MPI_LONG,
+ HADDR_AS_MPI_TYPE, MPI_INT, MPI_INT,
+ MPI_UNSIGNED};
+ int block_len[8] = {1, 1, 1, 1, 1, 1, 1, 1};
+ MPI_Aint displs[8];
+ struct mssg_t sample; /* used to compute displacements */
+
+ /* setup the displacements array */
+ if ( ( MPI_SUCCESS != MPI_Address(&sample.req, &displs[0]) ) ||
+ ( MPI_SUCCESS != MPI_Address(&sample.src, &displs[1]) ) ||
+ ( MPI_SUCCESS != MPI_Address(&sample.dest, &displs[2]) ) ||
+ ( MPI_SUCCESS != MPI_Address(&sample.mssg_num, &displs[3]) ) ||
+ ( MPI_SUCCESS != MPI_Address(&sample.base_addr, &displs[4]) ) ||
+ ( MPI_SUCCESS != MPI_Address(&sample.len, &displs[5]) ) ||
+ ( MPI_SUCCESS != MPI_Address(&sample.ver, &displs[6]) ) ||
+ ( MPI_SUCCESS != MPI_Address(&sample.magic, &displs[7]) ) ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: MPI_Address() call failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+
+ } else {
+
+ /* Now calculate the actual displacements */
+ for ( i = 7; i >= 0; --i)
+ {
+ displs[i] -= displs[0];
+ }
+ }
+
+ if ( success ) {
+
+ result = MPI_Type_struct(8, block_len, displs, mpi_types, &mpi_mssg_t);
+
+ if ( result != MPI_SUCCESS ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: MPI_Type_struct() call failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ if ( success ) {
+
+ result = MPI_Type_commit(&mpi_mssg_t);
+
+ if ( result != MPI_SUCCESS) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: MPI_Type_commit() call failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ return(success);
+
+} /* setup_derived_types */
+
+
+/*****************************************************************************
+ *
+ * Function: takedown_derived_types()
+ *
+ * Purpose: take down the derived types used by the test bed. At present,
+ * only the mpi_mssg derived type is needed.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 12/22/05
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+hbool_t
+takedown_derived_types(void)
+{
+ const char * fcn_name = "takedown_derived_types()";
+ hbool_t success = TRUE;
+ int result;
+
+ result = MPI_Type_free(&mpi_mssg_t);
+
+ if ( result != MPI_SUCCESS ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: MPI_Type_free() call failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ return(success);
+
+} /* takedown_derived_types() */
+
+
+/*****************************************************************************/
+/***************************** server functions ******************************/
+/*****************************************************************************/
+
+/*****************************************************************************
+ *
+ * Function: server_main()
+ *
+ * Purpose: Main function for the server process. This process exists
+ * to provide an independant view of the data array.
+ *
+ * The function handles request from the other processes in
+ * the test until the count of done messages received equals
+ * the number of client processes.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 12/22/05
+ *
+ * Modifications:
+ *
+ * JRM -- 5/10/06
+ * Updated for sync message.
+ *
+ *****************************************************************************/
+
+hbool_t
+server_main(void)
+{
+ const char * fcn_name = "server_main()";
+ hbool_t done = FALSE;
+ hbool_t success = TRUE;
+ int done_count = 0;
+ struct mssg_t mssg;
+
+ if ( world_mpi_rank != world_server_mpi_rank ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: This isn't the server process?!?!?\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+
+ while ( ( success ) && ( ! done ) )
+ {
+ success = recv_mssg(&mssg, 0);
+
+ if ( success ) {
+
+ switch ( mssg.req )
+ {
+ case WRITE_REQ_CODE:
+ success = serve_write_request(&mssg);
+ break;
+
+ case WRITE_REQ_ACK_CODE:
+ success = FALSE;
+ HDfprintf(stdout, "%s: Received write ack?!?.\n", fcn_name);
+ break;
+
+ case READ_REQ_CODE:
+ success = serve_read_request(&mssg);
+ break;
+
+ case READ_REQ_REPLY_CODE:
+ success = FALSE;
+ HDfprintf(stdout, "%s: Received read req reply?!?.\n",
+ fcn_name);
+ break;
+
+ case SYNC_REQ_CODE:
+ success = serve_sync_request(&mssg);
+ break;
+
+ case SYNC_ACK_CODE:
+ success = FALSE;
+ HDfprintf(stdout, "%s: Received sync ack?!?.\n",
+ fcn_name);
+ break;
+
+ case DONE_REQ_CODE:
+ done_count++;
+ /* HDfprintf(stdout, "%d:%s: done_count = %d.\n",
+ world_mpi_rank, fcn_name, done_count); */
+ if ( done_count >= file_mpi_size ) {
+
+ done = TRUE;
+ }
+ break;
+
+ default:
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Unknown request code.\n",
+ world_mpi_rank, fcn_name);
+ }
+ break;
+ }
+ }
+ }
+
+ return(success);
+
+} /* server_main() */
+
+
+/*****************************************************************************
+ *
+ * Function: serve_read_request()
+ *
+ * Purpose: Serve a read request.
+ *
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it sends
+ * a copy of the indicated datum from the data array to
+ * the requesting process.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 12/22/05
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+hbool_t
+serve_read_request(struct mssg_t * mssg_ptr)
+{
+ const char * fcn_name = "serve_read_request()";
+ hbool_t success = TRUE;
+ int target_index;
+ haddr_t target_addr;
+ struct mssg_t reply;
+
+ if ( ( mssg_ptr == NULL ) ||
+ ( mssg_ptr->req != READ_REQ_CODE ) ||
+ ( mssg_ptr->magic != MSSG_MAGIC ) ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ if ( success ) {
+
+ target_addr = mssg_ptr->base_addr;
+ target_index = addr_to_datum_index(target_addr);
+
+ if ( target_index < 0 ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n",
+ world_mpi_rank, fcn_name, target_addr);
+ }
+ } else if ( data[target_index].len != mssg_ptr->len ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout,
+ "%d:%s: data[i].len = %d != mssg->len = %d.\n",
+ world_mpi_rank, fcn_name,
+ data[target_index].len, mssg_ptr->len);
+ }
+ } else if ( ! (data[target_index].valid) ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout,
+ "%d:%s: proc %d read invalid entry. idx/base_addr = %d/%a.\n",
+ world_mpi_rank, fcn_name,
+ mssg_ptr->src, target_index,
+ target_index,
+ data[target_index].base_addr);
+ }
+ } else {
+
+ /* compose the reply message */
+ reply.req = READ_REQ_REPLY_CODE;
+ reply.src = world_mpi_rank;
+ reply.dest = mssg_ptr->src;
+ reply.mssg_num = -1; /* set by send function */
+ reply.base_addr = data[target_index].base_addr;
+ reply.len = data[target_index].len;
+ reply.ver = data[target_index].ver;
+ reply.magic = MSSG_MAGIC;
+ }
+ }
+
+ if ( success ) {
+
+ success = send_mssg(&reply, TRUE);
+ }
+
+ return(success);
+
+} /* serve_read_request() */
+
+
+/*****************************************************************************
+ *
+ * Function: serve_sync_request()
+ *
+ * Purpose: Serve a sync request.
+ *
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it sends a
+ * sync ack to the requesting process.
+ *
+ * This service exist to allow the sending process to ensure
+ * that all previous messages have been processed before
+ * proceeding.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 5/10/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+hbool_t
+serve_sync_request(struct mssg_t * mssg_ptr)
+{
+ const char * fcn_name = "serve_sync_request()";
+ hbool_t success = TRUE;
+ struct mssg_t reply;
+
+ if ( ( mssg_ptr == NULL ) ||
+ ( mssg_ptr->req != SYNC_REQ_CODE ) ||
+ ( mssg_ptr->magic != MSSG_MAGIC ) ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ if ( success ) {
+
+ /* compose the reply message */
+ reply.req = SYNC_ACK_CODE;
+ reply.src = world_mpi_rank;
+ reply.dest = mssg_ptr->src;
+ reply.mssg_num = -1; /* set by send function */
+ reply.base_addr = 0;
+ reply.len = 0;
+ reply.ver = 0;
+ reply.magic = MSSG_MAGIC;
+ }
+
+ if ( success ) {
+
+ success = send_mssg(&reply, TRUE);
+ }
+
+ return(success);
+
+} /* serve_sync_request() */
+
+
+/*****************************************************************************
+ *
+ * Function: serve_write_request()
+ *
+ * Purpose: Serve a write request.
+ *
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it updates
+ * the version number of the target data array entry as
+ * specified in the message.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 12/21/05
+ *
+ * Modifications:
+ *
+ * JRM -- 5/9/06
+ * Added code supporting a write ack message. This is a
+ * speculative fix to a bug observed on Cobalt. If it
+ * doesn't work, it will help narrow down the possibilities.
+ *
+ *****************************************************************************/
+
+hbool_t
+serve_write_request(struct mssg_t * mssg_ptr)
+{
+ const char * fcn_name = "serve_write_request()";
+ hbool_t success = TRUE;
+ int target_index;
+ int new_ver_num;
+ haddr_t target_addr;
+#if DO_WRITE_REQ_ACK
+ struct mssg_t reply;
+#endif /* DO_WRITE_REQ_ACK */
+
+ if ( ( mssg_ptr == NULL ) ||
+ ( mssg_ptr->req != WRITE_REQ_CODE ) ||
+ ( mssg_ptr->magic != MSSG_MAGIC ) ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ if ( success ) {
+
+ target_addr = mssg_ptr->base_addr;
+ target_index = addr_to_datum_index(target_addr);
+
+ if ( target_index < 0 ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n",
+ world_mpi_rank, fcn_name, target_addr);
+ }
+ } else if ( data[target_index].len != mssg_ptr->len ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout,
+ "%d:%s: data[i].len = %d != mssg->len = %d.\n",
+ world_mpi_rank, fcn_name,
+ data[target_index].len, mssg_ptr->len);
+ }
+ }
+ }
+
+ if ( success ) {
+
+ new_ver_num = mssg_ptr->ver;
+
+ if ( new_ver_num <= data[target_index].ver ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: new ver = %d <= old ver = %d.\n",
+ world_mpi_rank, fcn_name,
+ new_ver_num, data[target_index].ver);
+ }
+ }
+ }
+
+ if ( success ) {
+
+ /* process the write */
+ data[target_index].ver = new_ver_num;
+ data[target_index].valid = TRUE;
+
+#if DO_WRITE_REQ_ACK
+
+ /* compose the reply message */
+ reply.req = WRITE_REQ_ACK_CODE;
+ reply.src = world_mpi_rank;
+ reply.dest = mssg_ptr->src;
+ reply.mssg_num = -1; /* set by send function */
+ reply.base_addr = data[target_index].base_addr;
+ reply.len = data[target_index].len;
+ reply.ver = data[target_index].ver;
+ reply.magic = MSSG_MAGIC;
+
+ /* and send it */
+ success = send_mssg(&reply, TRUE);
+
+#endif /* DO_WRITE_REQ_ACK */
+
+ }
+
+ return(success);
+
+} /* serve_write_request() */
+
+
+/*****************************************************************************/
+/**************************** Call back functions ****************************/
+/*****************************************************************************/
+
+/*-------------------------------------------------------------------------
+ * Function: datum_clear_dirty_bits
+ *
+ * Purpose: Clear the dirty bits of the target entry.
+ *
+ * Return: SUCCEED
+ *
+ * Programmer: John Mainzer
+ * 10/30/07
+ *
+ * Modifications:
+ *
+ * None
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+datum_clear_dirty_bits(UNUSED haddr_t addr,
+ UNUSED size_t len,
+ void * thing)
+{
+ int idx;
+ struct datum * entry_ptr;
+
+ HDassert( thing );
+
+ entry_ptr = (struct datum *)thing;
+
+ idx = addr_to_datum_index(entry_ptr->base_addr);
+
+ if ( callbacks_verbose ) {
+
+ HDfprintf(stdout,
+ "%d: clear_dirty_bits() idx = %d, addr = %ld, len = %d.\n",
+ world_mpi_rank, idx, (long)addr, (int)len);
+ fflush(stdout);
+ }
+
+ HDassert( idx >= 0 );
+ HDassert( idx < NUM_DATA_ENTRIES );
+ HDassert( idx < virt_num_data_entries );
+ HDassert( &(data[idx]) == entry_ptr );
+
+ HDassert( entry_ptr->header.addr == entry_ptr->base_addr );
+ HDassert( ( entry_ptr->header.size == entry_ptr->len ) ||
+ ( entry_ptr->header.size == entry_ptr->local_len ) );
+
+ entry_ptr->dirty = FALSE;
+
+ datum_clears++;
+
+ if ( entry_ptr->header.is_pinned ) {
+
+ datum_pinned_clears++;
+ HDassert( entry_ptr->global_pinned || entry_ptr->local_pinned );
+ }
+
+ return(SUCCEED);
+
+} /* datum_clear_dirty_bits() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: datum_deserialize
+ *
+ * Purpose: deserialize the entry.
+ *
+ * Return: void * (pointer to the in core representation of the entry)
+ *
+ * Programmer: John Mainzer
+ * 9/20/07
+ *
+ * Modifications:
+ *
+ * None
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void *
+datum_deserialize(haddr_t addr,
+ UNUSED size_t len,
+ const void * image_ptr,
+ const UNUSED void * udata_ptr,
+ hbool_t * dirty_ptr)
+{
+ const char * fcn_name = "load_datum()";
+ hbool_t success = TRUE;
+ int idx;
+ struct datum * entry_ptr = NULL;
+ struct mssg_t mssg;
+
+ HDassert( image_ptr != NULL );
+
+ idx = addr_to_datum_index(addr);
+
+ HDassert( idx >= 0 );
+ HDassert( idx < NUM_DATA_ENTRIES );
+ HDassert( idx < virt_num_data_entries );
+
+ entry_ptr = &(data[idx]);
+
+ HDassert( addr == entry_ptr->base_addr );
+ HDassert( ! entry_ptr->global_pinned );
+ HDassert( ! entry_ptr->local_pinned );
+
+ if ( callbacks_verbose ) {
+
+ HDfprintf(stdout,
+ "%d: deserialize() idx = %d, addr = %ld, len = %d, is_dirty = %d.\n",
+ world_mpi_rank, idx, (long)addr, (int)len,
+ (int)(entry_ptr->header.is_dirty));
+ fflush(stdout);
+ }
+
+ /* compose the read message */
+ mssg.req = READ_REQ_CODE;
+ mssg.src = world_mpi_rank;
+ mssg.dest = world_server_mpi_rank;
+ mssg.mssg_num = -1; /* set by send function */
+ mssg.base_addr = entry_ptr->base_addr;
+ mssg.len = entry_ptr->len;
+ mssg.ver = 0; /* bogus -- should be corrected by server */
+ mssg.magic = MSSG_MAGIC;
+
+ if ( ! send_mssg(&mssg, FALSE) ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ if ( success ) {
+
+ if ( ! recv_mssg(&mssg, READ_REQ_REPLY_CODE) ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ if ( success ) {
+
+ if ( ( mssg.req != READ_REQ_REPLY_CODE ) ||
+ ( mssg.src != world_server_mpi_rank ) ||
+ ( mssg.dest != world_mpi_rank ) ||
+ ( mssg.base_addr != entry_ptr->base_addr ) ||
+ ( mssg.len != entry_ptr->len ) ||
+ ( mssg.ver < entry_ptr->ver ) ||
+ ( mssg.magic != MSSG_MAGIC ) ) {
+
+ nerrors++;
+ success = FALSE;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Bad data in read req reply.\n",
+ world_mpi_rank, fcn_name);
+ }
+#if 0 /* This has been useful debugging code -- keep it for now. */
+ if ( mssg.req != READ_REQ_REPLY_CODE ) {
+
+ HDfprintf(stdout, "%d:%s: mssg.req != READ_REQ_REPLY_CODE.\n",
+ world_mpi_rank, fcn_name);
+ HDfprintf(stdout, "%d:%s: mssg.req = %d.\n",
+ world_mpi_rank, fcn_name, (int)(mssg.req));
+ }
+
+ if ( mssg.src != world_server_mpi_rank ) {
+
+ HDfprintf(stdout, "%d:%s: mssg.src != world_server_mpi_rank.\n",
+ world_mpi_rank, fcn_name);
+ }
+
+ if ( mssg.dest != world_mpi_rank ) {
+
+ HDfprintf(stdout, "%d:%s: mssg.dest != world_mpi_rank.\n",
+ world_mpi_rank, fcn_name);
+ }
+
+ if ( mssg.base_addr != entry_ptr->base_addr ) {
+
+ HDfprintf(stdout,
+ "%d:%s: mssg.base_addr != entry_ptr->base_addr.\n",
+ world_mpi_rank, fcn_name);
+ HDfprintf(stdout, "%d:%s: mssg.base_addr = %a.\n",
+ world_mpi_rank, fcn_name, mssg.base_addr);
+ HDfprintf(stdout, "%d:%s: entry_ptr->base_addr = %a.\n",
+ world_mpi_rank, fcn_name, entry_ptr->base_addr);
+ }
+
+ if ( mssg.len != entry_ptr->len ) {
+
+ HDfprintf(stdout, "%d:%s: mssg.len != entry_ptr->len.\n",
+ world_mpi_rank, fcn_name);
+ HDfprintf(stdout, "%d:%s: mssg.len = %a.\n",
+ world_mpi_rank, fcn_name, mssg.len);
+ }
+
+ if ( mssg.ver < entry_ptr->ver ) {
+
+ HDfprintf(stdout, "%d:%s: mssg.ver < entry_ptr->ver.\n",
+ world_mpi_rank, fcn_name);
+ }
+
+ if ( mssg.magic != MSSG_MAGIC ) {
+
+ HDfprintf(stdout, "%d:%s: mssg.magic != MSSG_MAGIC.\n",
+ world_mpi_rank, fcn_name);
+ }
+#endif /* JRM */
+ } else {
+
+ entry_ptr->ver = mssg.ver;
+ entry_ptr->dirty = FALSE;
+ *dirty_ptr = FALSE;
+ }
+ }
+
+ if ( ! success ) {
+
+ entry_ptr = NULL;
+
+ }
+
+ datum_loads++;
+
+ return(entry_ptr);
+
+} /* deserialize() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: datum_image_len
+ *
+ * Purpose: Return the real (and possibly reduced) length of the image.
+ * The helper functions verify that the correct version of
+ * deserialize is being called, and then call deserialize
+ * proper.
+ *
+ * Return: SUCCEED
+ *
+ * Programmer: John Mainzer
+ * 9/19/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+datum_image_len(void *thing,
+ size_t *image_len_ptr)
+{
+
+
+ int idx;
+ struct datum * entry_ptr;
+
+ HDassert( thing );
+ HDassert( image_len_ptr );
+
+ entry_ptr = (struct datum *)thing;
+
+ idx = addr_to_datum_index(entry_ptr->base_addr);
+
+ HDassert( idx >= 0 );
+ HDassert( idx < NUM_DATA_ENTRIES );
+ HDassert( idx < virt_num_data_entries );
+ HDassert( &(data[idx]) == entry_ptr );
+ HDassert( entry_ptr->local_len > 0 );
+ HDassert( entry_ptr->local_len <= entry_ptr->len );
+
+ if ( callbacks_verbose ) {
+
+ HDfprintf(stdout,
+ "%d: image_len() idx = %d, addr = %ld, len = %d.\n",
+ world_mpi_rank, idx, (long)(entry_ptr->base_addr),
+ (int)(entry_ptr->local_len));
+ fflush(stdout);
+ }
+
+ HDassert( entry_ptr->header.addr == entry_ptr->base_addr );
+
+ *image_len_ptr = entry_ptr->local_len;
+
+ return(SUCCEED);
+
+} /* datum_image_len() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: datum_serialize
+ *
+ * Purpose: Serialize the supplied entry.
+ *
+ * Return: SUCCEED if successful, FAIL otherwise.
+ *
+ * Programmer: John Mainzer
+ * 10/30/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+datum_serialize(UNUSED haddr_t addr,
+ UNUSED size_t len,
+ void * image_ptr,
+ void * thing,
+ unsigned * flags_ptr,
+ haddr_t * new_addr_ptr,
+ size_t * new_len_ptr,
+ void ** new_image_ptr_ptr)
+{
+
+ const char * fcn_name = "datum_serialize()";
+ herr_t ret_value = SUCCEED;
+ int idx;
+ struct datum * entry_ptr;
+ struct mssg_t mssg;
+
+ HDassert( thing );
+ HDassert( image_ptr );
+ HDassert( flags_ptr );
+
+ *flags_ptr = 0;
+
+ HDassert( new_addr_ptr );
+ HDassert( new_len_ptr );
+ HDassert( new_image_ptr_ptr );
+
+ entry_ptr = (struct datum *)thing;
+
+ idx = addr_to_datum_index(entry_ptr->base_addr);
+
+ HDassert( idx >= 0 );
+ HDassert( idx < NUM_DATA_ENTRIES );
+ HDassert( idx < virt_num_data_entries );
+ HDassert( &(data[idx]) == entry_ptr );
+
+ if ( callbacks_verbose ) {
+
+ HDfprintf(stdout,
+ "%d: serialize() idx = %d, addr = %ld, len = %d.\n",
+ world_mpi_rank, idx, (long)addr, (int)len);
+ fflush(stdout);
+ }
+
+ HDassert( entry_ptr->header.addr == entry_ptr->base_addr );
+ HDassert( ( entry_ptr->header.size == entry_ptr->len ) ||
+ ( entry_ptr->header.size == entry_ptr->local_len ) );
+
+ HDassert( entry_ptr->header.is_dirty == entry_ptr->dirty );
+
+ if ( ( file_mpi_rank != 0 ) && ( entry_ptr->dirty ) ) {
+
+ ret_value = FAIL;
+ HDfprintf(stdout,
+ "%d:%s: Flushed dirty entry from non-zero file process.",
+ world_mpi_rank, fcn_name);
+ }
+
+ if ( ret_value == SUCCEED ) {
+
+ if ( entry_ptr->header.is_dirty ) {
+
+ /* compose the message */
+ mssg.req = WRITE_REQ_CODE;
+ mssg.src = world_mpi_rank;
+ mssg.dest = world_server_mpi_rank;
+ mssg.mssg_num = -1; /* set by send function */
+ mssg.base_addr = entry_ptr->base_addr;
+ mssg.len = entry_ptr->len;
+ mssg.ver = entry_ptr->ver;
+ mssg.magic = MSSG_MAGIC;
+
+ if ( ! send_mssg(&mssg, FALSE) ) {
+
+ nerrors++;
+ ret_value = FAIL;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ else
+ {
+ entry_ptr->header.is_dirty = FALSE;
+ entry_ptr->dirty = FALSE;
+ }
+ }
+ }
+
+#if DO_WRITE_REQ_ACK
+
+ if ( ( ret_value == SUCCEED ) && ( entry_ptr->header.is_dirty ) ) {
+
+ if ( ! recv_mssg(&mssg, WRITE_REQ_ACK_CODE) ) {
+
+ nerrors++;
+ ret_value = FAIL;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ } else if ( ( mssg.req != WRITE_REQ_ACK_CODE ) ||
+ ( mssg.src != world_server_mpi_rank ) ||
+ ( mssg.dest != world_mpi_rank ) ||
+ ( mssg.base_addr != entry_ptr->base_addr ) ||
+ ( mssg.len != entry_ptr->len ) ||
+ ( mssg.ver != entry_ptr->ver ) ||
+ ( mssg.magic != MSSG_MAGIC ) ) {
+
+ nerrors++;
+ ret_value = FAIL;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Bad data in write req ack.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+#endif /* DO_WRITE_REQ_ACK */
+
+ datum_flushes++;
+
+ if ( entry_ptr->header.is_pinned ) {
+
+ datum_pinned_flushes++;
+ HDassert( entry_ptr->global_pinned || entry_ptr->local_pinned );
+ }
+
+ return(ret_value);
+
+} /* datum_serialize() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: datum_free_icr
+ *
+ * Purpose: Nominally, this callback is supposed to free the
+ * in core representation of the entry.
+ *
+ * In the context of this test bed, we use it to do
+ * do all the processing we used to do on a destroy.
+ *
+ * Return: SUCCEED
+ *
+ * Programmer: John Mainzer
+ * 9/19/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+datum_free_icr(UNUSED haddr_t addr,
+ UNUSED size_t len,
+ void * thing)
+{
+ int idx;
+ struct datum * entry_ptr;
+
+ HDassert( thing );
+
+ entry_ptr = (struct datum *)thing;
+
+ idx = addr_to_datum_index(entry_ptr->base_addr);
+
+ HDassert( idx >= 0 );
+ HDassert( idx < NUM_DATA_ENTRIES );
+ HDassert( idx < virt_num_data_entries );
+ HDassert( &(data[idx]) == entry_ptr );
+
+ if ( callbacks_verbose ) {
+
+ HDfprintf(stdout,
+ "%d: free_icr() idx = %d, addr = %ld, len = %d, dirty = %d.\n",
+ world_mpi_rank, idx, (long)addr, (int)len,
+ (int)(entry_ptr->dirty));
+ fflush(stdout);
+ }
+
+ HDassert( entry_ptr->header.addr == entry_ptr->base_addr );
+ HDassert( ( entry_ptr->header.size == entry_ptr->len ) ||
+ ( entry_ptr->header.size == entry_ptr->local_len ) );
+
+ HDassert( !(entry_ptr->dirty) );
+ HDassert( !(entry_ptr->header.is_dirty) );
+ HDassert( !(entry_ptr->global_pinned) );
+ HDassert( !(entry_ptr->local_pinned) );
+ HDassert( !(entry_ptr->header.is_pinned) );
+
+ datum_destroys++;
+
+ return(SUCCEED);
+
+} /* datum_free_icr() */
+
+
+/*****************************************************************************/
+/************************** test utility functions ***************************/
+/*****************************************************************************/
+
+/*****************************************************************************
+ * Function: expunge_entry()
+ *
+ * Purpose: Expunge the entry indicated by the type and index, mark it
+ * as clean, and don't increment its version number.
+ *
+ * Do nothing if nerrors is non-zero on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 07/11/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+void
+expunge_entry(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ int32_t idx)
+{
+ const char * fcn_name = "expunge_entry()";
+ hbool_t in_cache;
+ herr_t result;
+ struct datum * entry_ptr;
+
+ HDassert( cache_ptr );
+ HDassert( file_ptr );
+ HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
+ HDassert( idx < virt_num_data_entries );
+
+ entry_ptr = &(data[idx]);
+
+ HDassert( !(entry_ptr->locked) );
+ HDassert( !(entry_ptr->global_pinned) );
+ HDassert( !(entry_ptr->local_pinned) );
+
+ entry_ptr->dirty = FALSE;
+
+ if ( nerrors == 0 ) {
+
+ result = H5AC2_expunge_entry(file_ptr, -1, &(types[0]),
+ entry_ptr->header.addr);
+
+ if ( result < 0 ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Error in H5AC2_expunge_entry().\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
+ HDassert( ! ((entry_ptr->header).is_dirty) );
+
+ result = H5C2_get_entry_status(cache_ptr, entry_ptr->base_addr,
+ NULL, &in_cache, NULL, NULL, NULL);
+
+ if ( result < 0 ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Error in H5C2_get_entry_status().\n",
+ world_mpi_rank, fcn_name);
+ }
+ } else if ( in_cache ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Expunged entry still in cache?!?\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ return;
+
+} /* expunge_entry() */
+
+
+/*****************************************************************************
+ * Function: insert_entry()
+ *
+ * Purpose: Insert the entry indicated by the type and index, mark it
+ * as dirty, and increment its version number.
+ *
+ * Do nothing if nerrors is non-zero on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 01/04/06
+ *
+ * Modifications:
+ *
+ * JRM -- 8/11/06
+ * Updated code to reflect the fact that entries can now be
+ * inserted pinned. Note that since all inserts are dirty,
+ * any pins must be global pins.
+ *
+ *****************************************************************************/
+
+void
+insert_entry(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ int32_t idx,
+ unsigned int flags)
+{
+ const char * fcn_name = "insert_entry()";
+ hbool_t insert_pinned;
+ herr_t result;
+ struct datum * entry_ptr;
+
+ HDassert( cache_ptr );
+ HDassert( file_ptr );
+ HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
+ HDassert( idx < virt_num_data_entries );
+
+ entry_ptr = &(data[idx]);
+
+ HDassert( !(entry_ptr->locked) );
+
+ insert_pinned = ((flags & H5C2__PIN_ENTRY_FLAG) != 0 );
+
+ if ( nerrors == 0 ) {
+
+ (entry_ptr->ver)++;
+ entry_ptr->dirty = TRUE;
+
+ result = H5AC2_set(file_ptr, H5P_DATASET_XFER_DEFAULT, &(types[0]),
+ entry_ptr->base_addr, entry_ptr->local_len,
+ (void *)(&(entry_ptr->header)), flags);
+
+ if ( ( result < 0 ) ||
+ ( entry_ptr->header.type != &(types[0]) ) ||
+ ( entry_ptr->len != entry_ptr->header.size ) ||
+ ( entry_ptr->base_addr != entry_ptr->header.addr ) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Error in H5AC2_set().\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ if ( ! (entry_ptr->header.is_dirty) ) {
+
+ /* it is possible that we just exceeded the dirty bytes
+ * threshold, triggering a write of the newly inserted
+ * entry. Test for this, and only flag an error if this
+ * is not the case.
+ */
+
+ struct H5AC2_aux_t * aux_ptr;
+
+ aux_ptr = ((H5AC2_aux_t *)(cache_ptr->aux_ptr));
+
+ if ( ! ( ( aux_ptr != NULL ) &&
+ ( aux_ptr->magic == H5AC2__H5AC2_AUX_T_MAGIC ) &&
+ ( aux_ptr->dirty_bytes == 0 ) ) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n",
+ world_mpi_rank, fcn_name, idx,
+ (int)(data[idx].header.is_dirty));
+ }
+ }
+ }
+
+ if ( insert_pinned ) {
+
+ HDassert( entry_ptr->header.is_pinned );
+ entry_ptr->global_pinned = TRUE;
+ global_pins++;
+
+ } else {
+
+ HDassert( ! ( entry_ptr->header.is_pinned ) );
+ entry_ptr->global_pinned = FALSE;
+
+ }
+
+ /* HDassert( entry_ptr->header.is_dirty ); */
+ HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
+ }
+
+ return;
+
+} /* insert_entry() */
+
+
+/*****************************************************************************
+ * Function: local_pin_and_unpin_random_entries()
+ *
+ * Purpose: Pin a random number of randomly selected entries in cache, and
+ * then unpin a random number of entries.
+ *
+ * Do nothing if nerrors is non-zero on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/12/06
+ *
+ * Modifications:
+ *
+ *****************************************************************************/
+
+void
+local_pin_and_unpin_random_entries(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ int min_idx,
+ int max_idx,
+ int min_count,
+ int max_count)
+{
+ /* const char * fcn_name = "local_pin_and_unpin_random_entries()"; */
+
+ if ( nerrors == 0 ) {
+
+ hbool_t via_unprotect;
+ int count;
+ int i;
+ int idx;
+
+ HDassert( cache_ptr );
+ HDassert( file_ptr );
+ HDassert( 0 <= min_idx );
+ HDassert( min_idx < max_idx );
+ HDassert( max_idx < NUM_DATA_ENTRIES );
+ HDassert( max_idx < virt_num_data_entries );
+ HDassert( 0 <= min_count );
+ HDassert( min_count < max_count );
+
+ count = (HDrand() % (max_count - min_count)) + min_count;
+
+ HDassert( min_count <= count );
+ HDassert( count <= max_count );
+
+ for ( i = 0; i < count; i++ )
+ {
+ local_pin_random_entry(cache_ptr, file_ptr, min_idx, max_idx);
+ }
+
+ count = (HDrand() % (max_count - min_count)) + min_count;
+
+ HDassert( min_count <= count );
+ HDassert( count <= max_count );
+
+ i = 0;
+ idx = 0;
+
+ while ( ( i < count ) && ( idx >= 0 ) )
+ {
+ via_unprotect = ( (((unsigned)i) & 0x0001) == 0 );
+ idx = local_unpin_next_pinned_entry(cache_ptr, file_ptr,
+ idx, via_unprotect);
+ i++;
+ }
+ }
+
+ return;
+
+} /* local_pin_and_unpin_random_entries() */
+
+
+/*****************************************************************************
+ * Function: local_pin_random_entry()
+ *
+ * Purpose: Pin a randomly selected entry in cache, and mark the entry
+ * as being locally pinned. Since this entry will not in
+ * general be pinned in any other cache, we can't mark it
+ * dirty.
+ *
+ * Do nothing if nerrors is non-zero on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/12/06
+ *
+ * Modifications:
+ *
+ *****************************************************************************/
+
+void
+local_pin_random_entry(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ int min_idx,
+ int max_idx)
+{
+ /* const char * fcn_name = "local_pin_random_entry()"; */
+ int idx;
+
+ if ( nerrors == 0 ) {
+
+ HDassert( cache_ptr );
+ HDassert( file_ptr );
+ HDassert( 0 <= min_idx );
+ HDassert( min_idx < max_idx );
+ HDassert( max_idx < NUM_DATA_ENTRIES );
+ HDassert( max_idx < virt_num_data_entries );
+
+ do
+ {
+ idx = (HDrand() % (max_idx - min_idx)) + min_idx;
+ HDassert( min_idx <= idx );
+ HDassert( idx <= max_idx );
+ }
+ while ( data[idx].global_pinned || data[idx].local_pinned );
+
+ pin_entry(cache_ptr, file_ptr, idx, FALSE, FALSE);
+ }
+
+ return;
+
+} /* local_pin_random_entry() */
+
+
+/*****************************************************************************
+ * Function: local_unpin_all_entries()
+ *
+ * Purpose: Unpin all local pinned entries.
+ *
+ * Do nothing if nerrors is non-zero on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/12/06
+ *
+ * Modifications:
+ *
+ *****************************************************************************/
+
+void
+local_unpin_all_entries(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ hbool_t via_unprotect)
+{
+ /* const char * fcn_name = "local_unpin_all_entries()"; */
+
+ if ( nerrors == 0 ) {
+
+ int idx;
+
+ HDassert( cache_ptr );
+ HDassert( file_ptr );
+
+ idx = 0;
+
+ while ( idx >= 0 )
+ {
+ idx = local_unpin_next_pinned_entry(cache_ptr, file_ptr,
+ idx, via_unprotect);
+ }
+ }
+
+ return;
+
+} /* local_unpin_all_entries() */
+
+
+/*****************************************************************************
+ * Function: local_unpin_next_pinned_entry()
+ *
+ * Purpose: Find the next locally pinned entry after the specified
+ * starting point, and unpin it.
+ *
+ * Do nothing if nerrors is non-zero on entry.
+ *
+ * Return: Index of the unpinned entry if there is one, or -1 if
+ * nerrors is non-zero on entry, or if there is no locally
+ * pinned entry.
+ *
+ * Programmer: John Mainzer
+ * 4/12/06
+ *
+ * Modifications:
+ *
+ *****************************************************************************/
+
+int
+local_unpin_next_pinned_entry(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ int start_idx,
+ hbool_t via_unprotect)
+{
+ /* const char * fcn_name = "local_unpin_next_pinned_entry()"; */
+ int i = 0;
+ int idx = -1;
+
+ if ( nerrors == 0 ) {
+
+ HDassert( cache_ptr );
+ HDassert( file_ptr );
+ HDassert( 0 <= start_idx );
+ HDassert( start_idx < NUM_DATA_ENTRIES );
+ HDassert( start_idx < virt_num_data_entries );
+
+ idx = start_idx;
+
+ while ( ( i < virt_num_data_entries ) &&
+ ( ! ( data[idx].local_pinned ) ) )
+ {
+ i++;
+ idx++;
+ if ( idx >= virt_num_data_entries ) {
+ idx = 0;
+ }
+ }
+
+ if ( data[idx].local_pinned ) {
+
+ unpin_entry(cache_ptr, file_ptr, idx, FALSE, FALSE, via_unprotect);
+
+ } else {
+
+ idx = -1;
+ }
+ }
+
+ return(idx);
+
+} /* local_unpin_next_pinned_entry() */
+
+
+/*****************************************************************************
+ * Function: lock_and_unlock_random_entries()
+ *
+ * Purpose: Obtain a random number in the closed interval [min_count,
+ * max_count]. Then protect and unprotect that number of
+ * random entries.
+ *
+ * Do nothing if nerrors is non-zero on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 1/12/06
+ *
+ * Modifications:
+ *
+ *****************************************************************************/
+
+void
+lock_and_unlock_random_entries(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ int min_idx,
+ int max_idx,
+ int min_count,
+ int max_count)
+{
+ /* const char * fcn_name = "lock_and_unlock_random_entries()"; */
+ int count;
+ int i;
+
+ if ( nerrors == 0 ) {
+
+ HDassert( cache_ptr );
+ HDassert( file_ptr );
+ HDassert( 0 <= min_count );
+ HDassert( min_count < max_count );
+
+ count = (HDrand() % (max_count - min_count)) + min_count;
+
+ HDassert( min_count <= count );
+ HDassert( count <= max_count );
+
+ for ( i = 0; i < count; i++ )
+ {
+ lock_and_unlock_random_entry(cache_ptr, file_ptr, min_idx, max_idx);
+ }
+ }
+
+ return;
+
+} /* lock_and_unlock_random_entries() */
+
+
+/*****************************************************************************
+ * Function: lock_and_unlock_random_entry()
+ *
+ * Purpose: Protect and then unprotect a random entry with index in
+ * the data[] array in the close interval [min_idx, max_idx].
+ *
+ * Do nothing if nerrors is non-zero on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 1/4/06
+ *
+ * Modifications:
+ *
+ *****************************************************************************/
+
+void
+lock_and_unlock_random_entry(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ int min_idx,
+ int max_idx)
+{
+ /* const char * fcn_name = "lock_and_unlock_random_entry()"; */
+ int idx;
+
+ if ( nerrors == 0 ) {
+
+ HDassert( cache_ptr );
+ HDassert( file_ptr );
+ HDassert( 0 <= min_idx );
+ HDassert( min_idx < max_idx );
+ HDassert( max_idx < NUM_DATA_ENTRIES );
+ HDassert( max_idx < virt_num_data_entries );
+
+ idx = (HDrand() % (max_idx - min_idx)) + min_idx;
+
+ HDassert( min_idx <= idx );
+ HDassert( idx <= max_idx );
+
+ lock_entry(cache_ptr, file_ptr, idx);
+ unlock_entry(cache_ptr, file_ptr, idx, H5AC2__NO_FLAGS_SET);
+ }
+
+ return;
+
+} /* lock_and_unlock_random_entry() */
+
+
+/*****************************************************************************
+ * Function: lock_entry()
+ *
+ * Purpose: Protect the entry indicated by the index.
+ *
+ * Do nothing if nerrors is non-zero on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 1/4/06
+ *
+ * Modifications:
+ *
+ * JRM -- 7/11/06
+ * Modified asserts to handle the new local_len field in
+ * datum.
+ *
+ *****************************************************************************/
+
+void
+lock_entry(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ int32_t idx)
+{
+ const char * fcn_name = "lock_entry()";
+ struct datum * entry_ptr;
+ H5C2_cache_entry_t * cache_entry_ptr;
+
+ if ( nerrors == 0 ) {
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
+ HDassert( idx < virt_num_data_entries );
+
+ entry_ptr = &(data[idx]);
+
+ HDassert( ! (entry_ptr->locked) );
+
+ cache_entry_ptr = H5AC2_protect(file_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types[0]), entry_ptr->base_addr,
+ entry_ptr->local_len, NULL, H5AC2_WRITE);
+
+ if ( ( cache_entry_ptr != (void *)(&(entry_ptr->header)) ) ||
+ ( entry_ptr->header.type != &(types[0]) ) ||
+ ( ( entry_ptr->len != entry_ptr->header.size ) &&
+ ( entry_ptr->local_len != entry_ptr->header.size ) ) ||
+ ( entry_ptr->base_addr != entry_ptr->header.addr ) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: error in H5AC2_protect().\n",
+ world_mpi_rank, fcn_name);
+ }
+ } else {
+
+ entry_ptr->locked = TRUE;
+
+ }
+
+ HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
+ }
+
+ return;
+
+} /* lock_entry() */
+
+
+/*****************************************************************************
+ * Function: mark_pinned_entry_dirty()
+ *
+ * Purpose: Mark dirty the entry indicated by the index,
+ *
+ * Do nothing if nerrors is non-zero on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/14/06
+ *
+ * Modifications:
+ *
+ *****************************************************************************/
+
+void
+mark_pinned_entry_dirty(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ int32_t idx,
+ hbool_t size_changed,
+ size_t new_size)
+{
+ const char * fcn_name = "mark_pinned_entry_dirty()";
+ herr_t result;
+ struct datum * entry_ptr;
+
+ if ( nerrors == 0 ) {
+
+ HDassert( file_ptr );
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
+ HDassert( idx < virt_num_data_entries );
+
+ entry_ptr = &(data[idx]);
+
+ HDassert ( entry_ptr->global_pinned );
+ HDassert ( ! (entry_ptr->local_pinned) );
+
+ (entry_ptr->ver)++;
+ entry_ptr->dirty = TRUE;
+
+ result = H5AC2_mark_pinned_entry_dirty(file_ptr,
+ (void *)entry_ptr,
+ size_changed,
+ new_size);
+
+ if ( result < 0 ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout,
+ "%d:%s: error in H5AC2_mark_pinned_entry_dirty().\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ else
+ {
+ global_dirty_pins++;
+ }
+ }
+
+ return;
+
+} /* mark_pinned_entry_dirty() */
+
+
+/*****************************************************************************
+ * Function: mark_pinned_or_protected_entry_dirty()
+ *
+ * Purpose: Use the H5AC2_mark_pinned_or_protected_entry_dirty() call to
+ * mark dirty the entry indicated by the index,
+ *
+ * Do nothing if nerrors is non-zero on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 5/18/06
+ *
+ * Modifications:
+ *
+ *****************************************************************************/
+
+void
+mark_pinned_or_protected_entry_dirty(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ int32_t idx)
+{
+ const char * fcn_name = "mark_pinned_or_protected_entry_dirty()";
+ herr_t result;
+ struct datum * entry_ptr;
+
+ if ( nerrors == 0 ) {
+
+ HDassert( file_ptr );
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
+ HDassert( idx < virt_num_data_entries );
+
+ entry_ptr = &(data[idx]);
+
+ HDassert ( entry_ptr->locked || entry_ptr->global_pinned );
+
+ (entry_ptr->ver)++;
+ entry_ptr->dirty = TRUE;
+
+ result = H5AC2_mark_pinned_or_protected_entry_dirty(file_ptr,
+ (void *)entry_ptr);
+
+ if ( result < 0 ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: error in %s.\n",
+ world_mpi_rank, fcn_name,
+ "H5AC2_mark_pinned_or_protected_entry_dirty()");
+ }
+ }
+ else if ( ! ( entry_ptr->locked ) )
+ {
+ global_dirty_pins++;
+ }
+ }
+
+ return;
+
+} /* mark_pinned_or_protected_entry_dirty() */
+
+
+/*****************************************************************************
+ * Function: pin_entry()
+ *
+ * Purpose: Pin the entry indicated by the index.
+ *
+ * Do nothing if nerrors is non-zero on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/11/06
+ *
+ * Modifications:
+ *
+ *****************************************************************************/
+
+void
+pin_entry(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ int32_t idx,
+ hbool_t global,
+ hbool_t dirty)
+{
+ /* const char * fcn_name = "pin_entry()"; */
+ unsigned int flags = H5AC2__PIN_ENTRY_FLAG;
+ struct datum * entry_ptr;
+
+ if ( nerrors == 0 ) {
+
+ HDassert( cache_ptr );
+ HDassert( file_ptr );
+ HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
+ HDassert( idx < virt_num_data_entries );
+
+ entry_ptr = &(data[idx]);
+
+ HDassert ( ! (entry_ptr->global_pinned) );
+ HDassert ( ! (entry_ptr->local_pinned) );
+ HDassert ( ! ( dirty && ( ! global ) ) );
+
+ lock_entry(cache_ptr, file_ptr, idx);
+
+ if ( dirty ) {
+
+ flags |= H5AC2__DIRTIED_FLAG;
+ }
+
+ unlock_entry(cache_ptr, file_ptr, idx, flags);
+
+ HDassert( (entry_ptr->header).is_pinned );
+ HDassert( ( ! dirty ) || ( (entry_ptr->header).is_dirty ) );
+
+ if ( global ) {
+
+ entry_ptr->global_pinned = TRUE;
+
+ global_pins++;
+
+ } else {
+
+ entry_ptr->local_pinned = TRUE;
+
+ local_pins++;
+
+ }
+ }
+
+ return;
+
+} /* pin_entry() */
+
+
+/*****************************************************************************
+ * Function: pin_protected_entry()
+ *
+ * Purpose: Insert the entry indicated by the type and index, mark it
+ * as dirty, and increment its version number.
+ *
+ * Do nothing if nerrors is non-zero on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 01/04/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+void
+pin_protected_entry(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ int32_t idx,
+ hbool_t global)
+{
+ const char * fcn_name = "pin_protected_entry()";
+ herr_t result;
+ struct datum * entry_ptr;
+
+ HDassert( cache_ptr );
+ HDassert( file_ptr );
+ HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
+ HDassert( idx < virt_num_data_entries );
+
+ entry_ptr = &(data[idx]);
+
+ HDassert( entry_ptr->locked );
+
+ if ( nerrors == 0 ) {
+
+ result = H5AC2_pin_protected_entry(file_ptr, (void *)entry_ptr);
+
+ if ( ( result < 0 ) ||
+ ( entry_ptr->header.type != &(types[0]) ) ||
+ ( ( entry_ptr->len != entry_ptr->header.size ) &&
+ ( entry_ptr->local_len != entry_ptr->header.size ) )||
+ ( entry_ptr->base_addr != entry_ptr->header.addr ) ||
+ ( ! ( (entry_ptr->header).is_pinned ) ) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout,
+ "%d:%s: Error in H5AC2_pin_protected entry().\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ if ( global ) {
+
+ entry_ptr->global_pinned = TRUE;
+
+ global_pins++;
+
+ } else {
+
+ entry_ptr->local_pinned = TRUE;
+
+ local_pins++;
+
+ }
+
+ HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
+ }
+
+ return;
+
+} /* pin_protected_entry() */
+
+
+/*****************************************************************************
+ * Function: rename_entry()
+ *
+ * Purpose: Rename the entry indicated old_idx to the entry indicated
+ * by new_idex. Touch up the data array so that flush will
+ * not choke.
+ *
+ * Do nothing if nerrors isn't zero, or if old_idx equals
+ * new_idx.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 1/10/06
+ *
+ * Modifications:
+ *
+ * 7/11/06 -- JRM
+ * Added support for the phony_len field in datum.
+ *
+ *****************************************************************************/
+
+void
+rename_entry(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ int32_t old_idx,
+ int32_t new_idx)
+{
+ const char * fcn_name = "rename_entry()";
+ herr_t result;
+ int tmp;
+ size_t tmp_len;
+ haddr_t old_addr = HADDR_UNDEF;
+ haddr_t new_addr = HADDR_UNDEF;
+ struct datum * old_entry_ptr;
+ struct datum * new_entry_ptr;
+
+ if ( ( nerrors == 0 ) && ( old_idx != new_idx ) ) {
+
+ HDassert( cache_ptr );
+ HDassert( file_ptr );
+ HDassert( ( 0 <= old_idx ) && ( old_idx < NUM_DATA_ENTRIES ) );
+ HDassert( old_idx < virt_num_data_entries );
+ HDassert( ( 0 <= new_idx ) && ( new_idx < NUM_DATA_ENTRIES ) );
+ HDassert( new_idx < virt_num_data_entries );
+
+ old_entry_ptr = &(data[old_idx]);
+ new_entry_ptr = &(data[new_idx]);
+
+ HDassert( ((old_entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
+ HDassert( !(old_entry_ptr->header.is_protected) );
+ HDassert( !(old_entry_ptr->locked) );
+ HDassert( old_entry_ptr->len == new_entry_ptr->len );
+
+ old_addr = old_entry_ptr->base_addr;
+ new_addr = new_entry_ptr->base_addr;
+
+ result = H5AC2_rename(file_ptr, &(types[0]), old_addr, new_addr);
+
+ if ( ( result < 0 ) || ( old_entry_ptr->header.addr != new_addr ) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5AC2_rename() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+
+ } else {
+
+ HDassert( ((old_entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
+ HDassert( old_entry_ptr->header.is_dirty );
+ old_entry_ptr->dirty = TRUE;
+
+ /* touch up versions, base_addrs, and data_index */
+
+ if ( old_entry_ptr->ver < new_entry_ptr->ver ) {
+
+ old_entry_ptr->ver = new_entry_ptr->ver;
+
+ } else {
+
+ (old_entry_ptr->ver)++;
+
+ }
+
+ old_entry_ptr->base_addr = new_addr;
+ new_entry_ptr->base_addr = old_addr;
+
+ data_index[old_entry_ptr->index] = new_idx;
+ data_index[new_entry_ptr->index] = old_idx;
+
+ tmp = old_entry_ptr->index;
+ old_entry_ptr->index = new_entry_ptr->index;
+ new_entry_ptr->index = tmp;
+
+ if ( old_entry_ptr->local_len != new_entry_ptr->local_len ) {
+
+ tmp_len = old_entry_ptr->local_len;
+ old_entry_ptr->local_len = new_entry_ptr->local_len;
+ new_entry_ptr->local_len = tmp_len;
+ }
+ }
+ }
+
+ return;
+
+} /* rename_entry() */
+
+
+/*****************************************************************************
+ * Function: resize_entry()
+ *
+ * Purpose: Resize the pinned entry indicated by idx to the new_size.
+ * Note that new_size must be greater than 0, and must be
+ * less than or equal to the original size of the entry.
+ *
+ * Do nothing if nerrors isn't zero.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/11/06
+ *
+ * Modifications:
+ *
+ * None
+ *
+ *****************************************************************************/
+
+void
+resize_entry(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ int32_t idx,
+ size_t new_size)
+{
+ const char * fcn_name = "resize_entry()";
+ herr_t result;
+ struct datum * entry_ptr;
+
+ if ( nerrors == 0 ) {
+
+ HDassert( cache_ptr );
+ HDassert( file_ptr );
+ HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
+ HDassert( idx < virt_num_data_entries );
+
+ entry_ptr = &(data[idx]);
+
+ HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
+ HDassert( !(entry_ptr->header.is_protected) );
+ HDassert( !(entry_ptr->locked) );
+ HDassert( ( entry_ptr->global_pinned ) &&
+ ( ! entry_ptr->local_pinned ) );
+ HDassert( ( entry_ptr->header.size == entry_ptr->len ) ||
+ ( entry_ptr->header.size == entry_ptr->local_len ) );
+ HDassert( new_size > 0 );
+ HDassert( new_size <= entry_ptr->len );
+
+ result = H5AC2_resize_pinned_entry(file_ptr, (void *)entry_ptr,
+ new_size);
+
+ if ( result < 0 ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5AC2_rename() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+
+ } else {
+
+ HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
+ HDassert( entry_ptr->header.is_dirty );
+ HDassert( entry_ptr->header.size == new_size );
+
+ entry_ptr->dirty = TRUE;
+ entry_ptr->local_len = new_size;
+
+ /* touch up version. */
+
+ (entry_ptr->ver)++;
+ }
+ }
+
+ return;
+
+} /* resize_entry() */
+
+
+/*****************************************************************************
+ *
+ * Function: setup_cache_for_test()
+ *
+ * Purpose: Setup the parallel cache for a test, and return the file id
+ * and a pointer to the cache's internal data structures.
+ *
+ * To do this, we must create a file, flush it (so that we
+ * don't have to worry about entries in the metadata cache),
+ * look up the address of the metadata cache, and then instruct
+ * the cache to omit sanity checks on dxpl IDs.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 1/4/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+hbool_t
+setup_cache_for_test(hid_t * fid_ptr,
+ H5F_t ** file_ptr_ptr,
+ H5C2_t ** cache_ptr_ptr)
+{
+ const char * fcn_name = "setup_cache_for_test()";
+ hbool_t success = FALSE; /* will set to TRUE if appropriate. */
+ hbool_t enable_rpt_fcn = FALSE;
+ hid_t fid = -1;
+ H5AC2_cache_config_t config;
+ H5F_t * file_ptr = NULL;
+ H5C2_t * cache_ptr = NULL;
+ haddr_t actual_base_addr;
+
+ HDassert ( fid_ptr != NULL );
+ HDassert ( file_ptr_ptr != NULL );
+ HDassert ( cache_ptr_ptr != NULL );
+
+ fid = H5Fcreate(filenames[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+
+ if ( fid < 0 ) {
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Fcreate() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ } else if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ } else {
+ file_ptr = (H5F_t *)H5I_object_verify(fid, H5I_FILE);
+ }
+
+ if ( file_ptr == NULL ) {
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Can't get file_ptr.\n",
+ world_mpi_rank, fcn_name);
+ }
+ } else {
+ cache_ptr = file_ptr->shared->cache2;
+ }
+
+ if ( cache_ptr == NULL ) {
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Can't get cache_ptr.\n",
+ world_mpi_rank, fcn_name);
+ }
+ } else if ( cache_ptr->magic != H5C2__H5C2_T_MAGIC ) {
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Bad cache_ptr magic.\n",
+ world_mpi_rank, fcn_name);
+ }
+ } else {
+ *fid_ptr = fid;
+ *file_ptr_ptr = file_ptr;
+ *cache_ptr_ptr = cache_ptr;
+#if 0 /* delete this eventually */
+ H5C2_set_skip_flags(cache_ptr, TRUE, TRUE);
+#endif /* JRM */
+ H5C2_stats__reset(cache_ptr);
+ success = TRUE;
+ }
+
+ if ( ( success ) && ( enable_rpt_fcn ) ) {
+
+ config.version = H5AC2__CURR_CACHE_CONFIG_VERSION;
+
+ if ( H5AC2_get_cache_auto_resize_config(cache_ptr, &config)
+ != SUCCEED ) {
+
+ HDfprintf(stdout,
+ "%d:%s: H5AC2_get_cache_auto_resize_config() failed.\n",
+ world_mpi_rank, fcn_name);
+
+ } else {
+
+ config.rpt_fcn_enabled = TRUE;
+
+ if ( H5AC2_set_cache_auto_resize_config(cache_ptr, &config)
+ != SUCCEED ) {
+
+ HDfprintf(stdout,
+ "%d:%s: H5AC2_set_cache_auto_resize_config() failed.\n",
+ world_mpi_rank, fcn_name);
+ } else {
+
+ HDfprintf(stdout, "%d:%s: rpt_fcn enabled.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+#if DO_SYNC_AFTER_WRITE
+
+ if ( success ) {
+
+ if ( H5AC2_set_write_done_callback(cache_ptr, do_sync) != SUCCEED ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout,
+ "%d:%s: H5C2_set_write_done_callback failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+#endif /* DO_SYNC_AFTER_WRITE */
+
+ if ( success ) { /* allocate space for test entries */
+
+ actual_base_addr = H5MF_alloc(file_ptr, H5FD_MEM_DEFAULT, H5P_DEFAULT,
+ (hsize_t)(max_addr + BASE_ADDR));
+
+ if ( actual_base_addr == HADDR_UNDEF ) {
+
+ success = FALSE;
+ nerrors++;
+
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5MF_alloc() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+
+ } else if ( actual_base_addr > BASE_ADDR ) {
+
+ /* If this happens, must increase BASE_ADDR so that the
+ * actual_base_addr is <= BASE_ADDR. This should only happen
+ * if the size of the superblock is increase.
+ */
+ success = FALSE;
+ nerrors++;
+
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: actual_base_addr > BASE_ADDR.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ return(success);
+
+} /* setup_cache_for_test() */
+
+
+/*****************************************************************************
+ *
+ * Function: setup_noblock_dxpl_id()
+ *
+ * Purpose: Setup the noblock_dxpl_id global. Increment nerrors if
+ * errors are detected. Do nothing if nerrors is non-zero
+ * on entry.
+ *
+ * Return: void.
+ *
+ * Programmer: JRM -- 1/5/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+/* So far we haven't needed this, but that may change.
+ * Keep it around for now
+ */
+#if 0
+void
+setup_noblock_dxpl_id(void)
+{
+ const char * fcn_name = "setup_noblock_dxpl_id()";
+ H5P_genclass_t *xfer_pclass; /* Dataset transfer property list
+ * class object
+ */
+ H5P_genplist_t *xfer_plist; /* Dataset transfer property list object */
+ unsigned block_before_meta_write; /* "block before meta write"
+ * property value
+ */
+ unsigned library_internal = 1; /* "library internal" property value */
+ H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode property value */
+
+ /* Sanity check */
+ HDassert(H5P_CLS_DATASET_XFER_g!=(-1));
+
+ /* Get the dataset transfer property list class object */
+ if ( ( nerrors == 0 ) &&
+ ( NULL == (xfer_pclass = H5I_object(H5P_CLS_DATASET_XFER_g)) ) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: can't get property list class.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ /* Get an ID for the non-blocking, collective H5AC2 dxpl */
+ if ( ( nerrors == 0 ) &&
+ ( (noblock_dxpl_id = H5P_create_id(xfer_pclass)) < 0 ) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: can't register property list.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ /* Get the property list object */
+ if ( ( nerrors == 0 ) &&
+ ( NULL == (xfer_plist = H5I_object(H5AC2_noblock_dxpl_id)) ) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: can't get new property list object.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ /* Insert 'block before metadata write' property */
+ block_before_meta_write=0;
+ if ( ( nerrors == 0 ) &&
+ ( H5P_insert(xfer_plist, H5AC2_BLOCK_BEFORE_META_WRITE_NAME,
+ H5AC2_BLOCK_BEFORE_META_WRITE_SIZE,
+ &block_before_meta_write,
+ NULL, NULL, NULL, NULL, NULL, NULL) < 0 ) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout,
+ "%d:%s: can't insert metadata cache dxpl property 1.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ /* Insert 'library internal' property */
+ if ( ( nerrors == 0 ) &&
+ ( H5P_insert(xfer_plist, H5AC2_LIBRARY_INTERNAL_NAME,
+ H5AC2_LIBRARY_INTERNAL_SIZE, &library_internal,
+ NULL, NULL, NULL, NULL, NULL, NULL ) < 0 ) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout,
+ "%d:%s: can't insert metadata cache dxpl property 2.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ /* Set the transfer mode */
+ xfer_mode = H5FD_MPIO_COLLECTIVE;
+ if ( ( nerrors == 0 ) &&
+ ( H5P_set(xfer_plist, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0 ) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: unable to set value.\n", world_mpi_rank,
+ fcn_name);
+ }
+ }
+
+ return(success);
+
+} /* setup_noblock_dxpl_id() */
+#endif
+
+
+/*****************************************************************************
+ *
+ * Function: setup_rand()
+ *
+ * Purpose: Use gettimeofday() to obtain a seed for rand(), print the
+ * seed to stdout, and then pass it to srand().
+ *
+ * Increment nerrors if any errors are detected.
+ *
+ * Return: void.
+ *
+ * Programmer: JRM -- 1/12/06
+ *
+ * Modifications:
+ *
+ * JRM -- 5/9/06
+ * Modified function to facilitate setting predefined seeds.
+ *
+ *****************************************************************************/
+
+void
+setup_rand(void)
+{
+ const char * fcn_name = "setup_rand()";
+ hbool_t use_predefined_seeds = FALSE;
+ int num_predefined_seeds = 3;
+ unsigned predefined_seeds[3] = {18669, 89925, 12577};
+ unsigned seed;
+ struct timeval tv;
+ struct timezone tz;
+
+ if ( ( use_predefined_seeds ) &&
+ ( world_mpi_size == num_predefined_seeds ) ) {
+
+ HDassert( world_mpi_rank >= 0 );
+ HDassert( world_mpi_rank < world_mpi_size );
+
+ seed = predefined_seeds[world_mpi_rank];
+ HDfprintf(stdout, "%d:%s: predefined_seed = %d.\n",
+ world_mpi_rank, fcn_name, seed);
+ fflush(stdout);
+ HDsrand(seed);
+
+ } else {
+
+ if ( HDgettimeofday(&tv, &tz) != 0 ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: gettimeofday() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ } else {
+ seed = (unsigned)tv.tv_usec;
+ HDfprintf(stdout, "%d:%s: seed = %d.\n",
+ world_mpi_rank, fcn_name, seed);
+ fflush(stdout);
+ HDsrand(seed);
+ }
+ }
+
+ return;
+
+} /* setup_rand() */
+
+
+/*****************************************************************************
+ *
+ * Function: take_down_cache()
+ *
+ * Purpose: Take down the parallel cache after a test.
+ *
+ * To do this, we must close the file, and delete if if
+ * possible.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 1/4/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+hbool_t
+take_down_cache(hid_t fid,
+ H5C2_t * cache_ptr)
+{
+ const char * fcn_name = "take_down_cache()";
+ hbool_t show_progress = FALSE;
+ hbool_t success = FALSE; /* will set to TRUE if appropriate. */
+ int mile_stone = 1;
+
+ if ( show_progress ) { /* 1 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ /* close the file and delete it */
+ if ( H5Fclose(fid) < 0 ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Fclose() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+
+ } else if ( world_mpi_rank == world_server_mpi_rank ) {
+
+ if ( HDremove(filenames[0]) < 0 ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: HDremove() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ } else {
+
+ success = TRUE;
+ }
+ } else {
+
+ success = TRUE;
+ }
+
+ if ( show_progress ) { /* 2 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ return(success);
+
+} /* take_down_cache() */
+
+
+/*****************************************************************************
+ * Function: unlock_entry()
+ *
+ * Purpose: Unprotect the entry indicated by the index.
+ *
+ * Do nothing if nerrors is non-zero on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 1/4/06
+ *
+ * Modifications:
+ *
+ * 7/11/06
+ * Updated for the new local_len field in datum.
+ *
+ *****************************************************************************/
+
+void
+unlock_entry(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ int32_t idx,
+ unsigned int flags)
+{
+ const char * fcn_name = "unlock_entry()";
+ herr_t dirtied;
+ herr_t result;
+ struct datum * entry_ptr;
+
+ if ( nerrors == 0 ) {
+
+ HDassert( cache_ptr );
+ HDassert( file_ptr );
+ HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
+ HDassert( idx < virt_num_data_entries );
+
+ entry_ptr = &(data[idx]);
+
+ HDassert( entry_ptr->locked );
+
+ dirtied = ((flags & H5AC2__DIRTIED_FLAG) == H5AC2__DIRTIED_FLAG );
+
+ if ( dirtied ) {
+
+ (entry_ptr->ver)++;
+ entry_ptr->dirty = TRUE;
+ }
+
+ result = H5AC2_unprotect(file_ptr, H5P_DATASET_XFER_DEFAULT, &(types[0]),
+ entry_ptr->base_addr, entry_ptr->local_len,
+ (void *)(&(entry_ptr->header)), flags);
+
+ if ( ( result < 0 ) ||
+ ( entry_ptr->header.type != &(types[0]) ) ||
+ ( ( entry_ptr->len != entry_ptr->header.size ) &&
+ ( entry_ptr->local_len != entry_ptr->header.size ) ) ||
+ ( entry_ptr->base_addr != entry_ptr->header.addr ) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: error in H5C2_unprotect().\n",
+ world_mpi_rank, fcn_name);
+ }
+ } else {
+
+ entry_ptr->locked = FALSE;
+
+ }
+
+ HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
+
+ if ( ( flags & H5AC2__DIRTIED_FLAG ) != 0
+ && ( (flags & H5C2__DELETED_FLAG) == 0 ) ) {
+
+ HDassert( entry_ptr->header.is_dirty );
+ HDassert( entry_ptr->dirty );
+ }
+ }
+
+ return;
+
+} /* unlock_entry() */
+
+
+/*****************************************************************************
+ * Function: unpin_entry()
+ *
+ * Purpose: Unpin the entry indicated by the index.
+ *
+ * Do nothing if nerrors is non-zero on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/12/06
+ *
+ * Modifications:
+ *
+ * JRM -- 8/15/06
+ * Added assertion that entry is pinned on entry.
+ *
+ *****************************************************************************/
+
+void
+unpin_entry(H5C2_t * cache_ptr,
+ H5F_t * file_ptr,
+ int32_t idx,
+ hbool_t global,
+ hbool_t dirty,
+ hbool_t via_unprotect)
+{
+ const char * fcn_name = "unpin_entry()";
+ herr_t result;
+ unsigned int flags = H5AC2__UNPIN_ENTRY_FLAG;
+ struct datum * entry_ptr;
+
+ if ( nerrors == 0 ) {
+
+ HDassert( cache_ptr );
+ HDassert( file_ptr );
+ HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
+ HDassert( idx < virt_num_data_entries );
+
+ entry_ptr = &(data[idx]);
+
+ HDassert( (entry_ptr->header).is_pinned );
+ HDassert ( ! ( entry_ptr->global_pinned && entry_ptr->local_pinned) );
+ HDassert ( ( global && entry_ptr->global_pinned ) ||
+ ( ! global && entry_ptr->local_pinned ) );
+ HDassert ( ! ( dirty && ( ! global ) ) );
+
+ if ( via_unprotect ) {
+
+ lock_entry(cache_ptr, file_ptr, idx);
+
+ if ( dirty ) {
+
+ flags |= H5AC2__DIRTIED_FLAG;
+ }
+
+ unlock_entry(cache_ptr, file_ptr, idx, flags);
+
+ } else {
+
+ if ( dirty ) {
+
+ mark_pinned_entry_dirty(cache_ptr, file_ptr, idx, FALSE,
+ (size_t)0);
+
+ }
+
+ result = H5AC2_unpin_entry(file_ptr, (void *)entry_ptr);
+
+ if ( result < 0 ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: error in H5AC2_unpin_entry().\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ HDassert( ! ((entry_ptr->header).is_pinned) );
+
+ if ( global ) {
+
+ entry_ptr->global_pinned = FALSE;
+
+ } else {
+
+ entry_ptr->local_pinned = FALSE;
+
+ }
+ }
+
+ return;
+
+} /* unpin_entry() */
+
+
+/*****************************************************************************/
+/****************************** test functions *******************************/
+/*****************************************************************************/
+
+/*****************************************************************************
+ *
+ * Function: server_smoke_check()
+ *
+ * Purpose: Quick smoke check for the server process.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 12/21/05
+ *
+ * Modifications:
+ *
+ * JRM -- 5/9/06
+ * Added code supporting the write request ack message. This
+ * message was added to eliminate one possible cause of a
+ * bug spotted on cobalt. If this doesn't fix the problem,
+ * it will narrow things down a bit.
+ *
+ * JRM -- 5/10/06
+ * Added call to do_sync(). This is part of an attempt to
+ * optimize out the slowdown caused by the addition of the
+ * write request ack message.
+ *
+ *****************************************************************************/
+
+hbool_t
+server_smoke_check(void)
+{
+ const char * fcn_name = "server_smoke_check()";
+ hbool_t success = TRUE;
+ int max_nerrors;
+ struct mssg_t mssg;
+
+ if ( world_mpi_rank == 0 ) {
+
+ TESTING("server smoke check");
+ }
+
+ nerrors = 0;
+ init_data();
+ reset_stats();
+
+ if ( world_mpi_rank == world_server_mpi_rank ) {
+
+ if ( ! server_main() ) {
+
+ /* some error occured in the server -- report failure */
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+ else /* run the clients */
+ {
+ /* compose the write message */
+ mssg.req = WRITE_REQ_CODE;
+ mssg.src = world_mpi_rank;
+ mssg.dest = world_server_mpi_rank;
+ mssg.mssg_num = -1; /* set by send function */
+ mssg.base_addr = data[world_mpi_rank].base_addr;
+ mssg.len = data[world_mpi_rank].len;
+ mssg.ver = ++(data[world_mpi_rank].ver);
+ mssg.magic = MSSG_MAGIC;
+
+ if ( ! ( success = send_mssg(&mssg, FALSE) ) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on write.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+#if DO_WRITE_REQ_ACK
+
+ /* try to receive the write ack from the server */
+ if ( success ) {
+
+ success = recv_mssg(&mssg, WRITE_REQ_ACK_CODE);
+
+ if ( ! success ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ /* verify that we received the expected ack message */
+ if ( success ) {
+
+ if ( ( mssg.req != WRITE_REQ_ACK_CODE ) ||
+ ( mssg.src != world_server_mpi_rank ) ||
+ ( mssg.dest != world_mpi_rank ) ||
+ ( mssg.base_addr != data[world_mpi_rank].base_addr ) ||
+ ( mssg.len != data[world_mpi_rank].len ) ||
+ ( mssg.ver != data[world_mpi_rank].ver ) ||
+ ( mssg.magic != MSSG_MAGIC ) ) {
+
+ success = FALSE;
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Bad data in write req ack.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+#endif /* DO_WRITE_REQ_ACK */
+
+ do_sync();
+
+ /* compose the read message */
+ mssg.req = READ_REQ_CODE;
+ mssg.src = world_mpi_rank;
+ mssg.dest = world_server_mpi_rank;
+ mssg.mssg_num = -1; /* set by send function */
+ mssg.base_addr = data[world_mpi_rank].base_addr;
+ mssg.len = data[world_mpi_rank].len;
+ mssg.ver = 0; /* bogus -- should be corrected by server */
+ mssg.magic = MSSG_MAGIC;
+
+ if ( success ) {
+
+ success = send_mssg(&mssg, FALSE);
+
+ if ( ! success ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on write.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ /* try to receive the reply from the server */
+ if ( success ) {
+
+ success = recv_mssg(&mssg, READ_REQ_REPLY_CODE);
+
+ if ( ! success ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ /* verify that we got the expected result */
+ if ( success ) {
+
+ if ( ( mssg.req != READ_REQ_REPLY_CODE ) ||
+ ( mssg.src != world_server_mpi_rank ) ||
+ ( mssg.dest != world_mpi_rank ) ||
+ ( mssg.base_addr != data[world_mpi_rank].base_addr ) ||
+ ( mssg.len != data[world_mpi_rank].len ) ||
+ ( mssg.ver != data[world_mpi_rank].ver ) ||
+ ( mssg.magic != MSSG_MAGIC ) ) {
+
+ success = FALSE;
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Bad data in read req reply.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ /* compose the done message */
+ mssg.req = DONE_REQ_CODE;
+ mssg.src = world_mpi_rank;
+ mssg.dest = world_server_mpi_rank;
+ mssg.mssg_num = -1; /* set by send function */
+ mssg.base_addr = 0; /* not used */
+ mssg.len = 0; /* not used */
+ mssg.ver = 0; /* not used */
+ mssg.magic = MSSG_MAGIC;
+
+ if ( success ) {
+
+ success = send_mssg(&mssg, FALSE);
+
+ if ( ! success ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+ }
+
+ max_nerrors = get_max_nerrors();
+
+ if ( world_mpi_rank == 0 ) {
+
+ if ( max_nerrors == 0 ) {
+
+ PASSED();
+
+ } else {
+
+ failures++;
+ H5_FAILED();
+ }
+ }
+
+ success = ( ( success ) && ( max_nerrors == 0 ) );
+
+ return(success);
+
+} /* server_smoke_check() */
+
+/*****************************************************************************
+ *
+ * Function: smoke_check_1()
+ *
+ * Purpose: First smoke check for the parallel cache.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 1/4/06
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+hbool_t
+smoke_check_1(void)
+{
+ const char * fcn_name = "smoke_check_1()";
+ hbool_t success = TRUE;
+ hbool_t show_progress = FALSE;
+ int i;
+ int max_nerrors;
+ int mile_stone = 1;
+ hid_t fid = -1;
+ H5F_t * file_ptr = NULL;
+ H5C2_t * cache_ptr = NULL;
+ struct mssg_t mssg;
+
+ if ( world_mpi_rank == 0 ) {
+
+ TESTING("smoke check #1");
+ }
+
+ if ( show_progress ) { /* 1 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ nerrors = 0;
+ init_data();
+ reset_stats();
+
+ if ( world_mpi_rank == world_server_mpi_rank ) {
+
+ if ( show_progress ) { /* 2s */
+ HDfprintf(stdout, "%d:%s - %0ds -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ if ( ! server_main() ) {
+
+ /* some error occured in the server -- report failure */
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ if ( show_progress ) { /* 3s */
+ HDfprintf(stdout, "%d:%s - %0ds -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+ }
+ else /* run the clients */
+ {
+
+ if ( show_progress ) { /* 2 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr) ) {
+
+ nerrors++;
+ fid = -1;
+ cache_ptr = NULL;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ if ( show_progress ) { /* 3 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ for ( i = 0; i < (virt_num_data_entries / 2); i++ )
+ {
+ insert_entry(cache_ptr, file_ptr, i, H5AC2__NO_FLAGS_SET);
+ }
+
+ if ( show_progress ) { /* 4 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
+ {
+ lock_entry(cache_ptr, file_ptr, i);
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__NO_FLAGS_SET);
+ }
+
+ if ( show_progress ) { /* 5 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ /* rename the first half of the entries... */
+ for ( i = 0; i < (virt_num_data_entries / 2); i++ )
+ {
+ lock_entry(cache_ptr, file_ptr, i);
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__NO_FLAGS_SET);
+ rename_entry(cache_ptr, file_ptr, i,
+ (i + (virt_num_data_entries / 2)));
+ }
+
+ if ( show_progress ) { /* 6 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ /* ...and then rename them back. */
+ for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
+ {
+ lock_entry(cache_ptr, file_ptr, i);
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__NO_FLAGS_SET);
+ rename_entry(cache_ptr, file_ptr, i,
+ (i + (virt_num_data_entries / 2)));
+ }
+
+ if ( show_progress ) { /* 7 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ if ( fid >= 0 ) {
+
+ if ( ! take_down_cache(fid, cache_ptr) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ if ( show_progress ) { /* 8 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ /* verify that all instance of datum are back where the started
+ * and are clean.
+ */
+
+ for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
+ {
+ HDassert( data_index[i] == i );
+ HDassert( ! (data[i].dirty) );
+ }
+
+ if ( show_progress ) { /* 9 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ /* compose the done message */
+ mssg.req = DONE_REQ_CODE;
+ mssg.src = world_mpi_rank;
+ mssg.dest = world_server_mpi_rank;
+ mssg.mssg_num = -1; /* set by send function */
+ mssg.base_addr = 0; /* not used */
+ mssg.len = 0; /* not used */
+ mssg.ver = 0; /* not used */
+ mssg.magic = MSSG_MAGIC;
+
+ if ( success ) {
+
+ success = send_mssg(&mssg, FALSE);
+
+ if ( ! success ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ if ( show_progress ) { /* 10 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+ }
+
+ max_nerrors = get_max_nerrors();
+
+ if ( world_mpi_rank == 0 ) {
+
+ if ( max_nerrors == 0 ) {
+
+ PASSED();
+
+ } else {
+
+ failures++;
+ H5_FAILED();
+ }
+ }
+
+ success = ( ( success ) && ( max_nerrors == 0 ) );
+
+ return(success);
+
+} /* smoke_check_1() */
+
+
+/*****************************************************************************
+ *
+ * Function: smoke_check_2()
+ *
+ * Purpose: Second smoke check for the parallel cache.
+ *
+ * Introduce random reads, but keep all processes with roughly
+ * the same work load.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 1/12/06
+ *
+ * Modifications:
+ *
+ * JRM -- 4/13/06
+ * Added pinned entry tests.
+ *
+ * JRM -- 4/28/06
+ * Modified test to rename pinned entries.
+ *
+ *****************************************************************************/
+
+hbool_t
+smoke_check_2(void)
+{
+ const char * fcn_name = "smoke_check_2()";
+ hbool_t success = TRUE;
+ int i;
+ int max_nerrors;
+ hid_t fid = -1;
+ H5F_t * file_ptr = NULL;
+ H5C2_t * cache_ptr = NULL;
+ struct mssg_t mssg;
+
+ if ( world_mpi_rank == 0 ) {
+
+ TESTING("smoke check #2");
+ }
+
+ nerrors = 0;
+ init_data();
+ reset_stats();
+
+ if ( world_mpi_rank == world_server_mpi_rank ) {
+
+ if ( ! server_main() ) {
+
+ /* some error occured in the server -- report failure */
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+ else /* run the clients */
+ {
+ if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr) ) {
+
+ nerrors++;
+ fid = -1;
+ cache_ptr = NULL;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ for ( i = 0; i < (virt_num_data_entries / 2); i++ )
+ {
+ insert_entry(cache_ptr, file_ptr, i, H5AC2__NO_FLAGS_SET);
+
+ if ( i > 100 ) {
+
+ lock_and_unlock_random_entries(cache_ptr, file_ptr,
+ (i - 100), i, 0, 10);
+ }
+ }
+
+ for ( i = 0; i < (virt_num_data_entries / 2); i+=61 )
+ {
+ /* Make sure we don't step on any locally pinned entries */
+ if ( data[i].local_pinned ) {
+ unpin_entry(cache_ptr, file_ptr, i, FALSE, FALSE, FALSE);
+ }
+
+ pin_entry(cache_ptr, file_ptr, i, TRUE, FALSE);
+ }
+
+ for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-=2 )
+ {
+ lock_entry(cache_ptr, file_ptr, i);
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__NO_FLAGS_SET);
+ lock_and_unlock_random_entries(cache_ptr, file_ptr, 0,
+ (virt_num_data_entries / 20),
+ 0, 100);
+ local_pin_and_unpin_random_entries(cache_ptr, file_ptr, 0,
+ (virt_num_data_entries / 4),
+ 0, 3);
+ }
+
+ for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
+ {
+ lock_entry(cache_ptr, file_ptr, i);
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__DIRTIED_FLAG);
+ lock_and_unlock_random_entries(cache_ptr, file_ptr, 0,
+ (virt_num_data_entries / 10),
+ 0, 100);
+ }
+
+ /* we can't rename pinned entries, so release any local pins now. */
+ local_unpin_all_entries(cache_ptr, file_ptr, FALSE);
+
+ /* rename the first half of the entries... */
+ for ( i = 0; i < (virt_num_data_entries / 2); i++ )
+ {
+ lock_entry(cache_ptr, file_ptr, i);
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__NO_FLAGS_SET);
+ rename_entry(cache_ptr, file_ptr, i,
+ (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(cache_ptr, file_ptr, 0,
+ ((virt_num_data_entries / 50) - 1),
+ 0, 100);
+ }
+
+ /* ...and then rename them back. */
+ for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
+ {
+ lock_entry(cache_ptr, file_ptr, i);
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__DIRTIED_FLAG);
+ rename_entry(cache_ptr, file_ptr, i,
+ (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(cache_ptr, file_ptr, 0,
+ (virt_num_data_entries / 100),
+ 0, 100);
+ }
+
+ for ( i = 0; i < (virt_num_data_entries / 2); i+=61 )
+ {
+ hbool_t via_unprotect = ( (((unsigned)i) & 0x01) == 0 );
+ hbool_t dirty = ( (((unsigned)i) & 0x02) == 0 );
+
+ unpin_entry(cache_ptr, file_ptr, i, TRUE, dirty, via_unprotect);
+ }
+
+ if ( fid >= 0 ) {
+
+ if ( ! take_down_cache(fid, cache_ptr) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ /* verify that all instance of datum are back where the started
+ * and are clean.
+ */
+
+ for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
+ {
+ HDassert( data_index[i] == i );
+ HDassert( ! (data[i].dirty) );
+ }
+
+ /* compose the done message */
+ mssg.req = DONE_REQ_CODE;
+ mssg.src = world_mpi_rank;
+ mssg.dest = world_server_mpi_rank;
+ mssg.mssg_num = -1; /* set by send function */
+ mssg.base_addr = 0; /* not used */
+ mssg.len = 0; /* not used */
+ mssg.ver = 0; /* not used */
+ mssg.magic = MSSG_MAGIC;
+
+ if ( success ) {
+
+ success = send_mssg(&mssg, FALSE);
+
+ if ( ! success ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+ }
+
+ max_nerrors = get_max_nerrors();
+
+ if ( world_mpi_rank == 0 ) {
+
+ if ( max_nerrors == 0 ) {
+
+ PASSED();
+
+ } else {
+
+ failures++;
+ H5_FAILED();
+ }
+ }
+
+ success = ( ( success ) && ( max_nerrors == 0 ) );
+
+ return(success);
+
+} /* smoke_check_2() */
+
+
+/*****************************************************************************
+ *
+ * Function: smoke_check_3()
+ *
+ * Purpose: Third smoke check for the parallel cache.
+ *
+ * Use random reads to vary the loads on the diffferent
+ * processors. Also force different cache size adjustments.
+ *
+ * In this test, load process 0 heavily, and the other
+ * processes lightly.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 1/13/06
+ *
+ * Modifications:
+ *
+ * Added code intended to ensure correct operation with large
+ * numbers of processors.
+ * JRM - 1/31/06
+ *
+ * Added pinned entry tests. JRM - 4/14/06
+ *
+ *****************************************************************************/
+
+hbool_t
+smoke_check_3(void)
+{
+ const char * fcn_name = "smoke_check_3()";
+ hbool_t success = TRUE;
+ int i;
+ int max_nerrors;
+ int min_count;
+ int max_count;
+ int min_idx;
+ int max_idx;
+ hid_t fid = -1;
+ H5F_t * file_ptr = NULL;
+ H5C2_t * cache_ptr = NULL;
+ struct mssg_t mssg;
+
+ if ( world_mpi_rank == 0 ) {
+
+ TESTING("smoke check #3");
+ }
+
+ nerrors = 0;
+ init_data();
+ reset_stats();
+
+ if ( world_mpi_rank == world_server_mpi_rank ) {
+
+ if ( ! server_main() ) {
+
+ /* some error occured in the server -- report failure */
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+ else /* run the clients */
+ {
+ if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr) ) {
+
+ nerrors++;
+ fid = -1;
+ cache_ptr = NULL;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ min_count = 100 / ((file_mpi_rank + 1) * (file_mpi_rank + 1));
+ max_count = min_count + 50;
+
+ for ( i = 0; i < (virt_num_data_entries / 4); i++ )
+ {
+ insert_entry(cache_ptr, file_ptr, i, H5AC2__NO_FLAGS_SET);
+
+ if ( i > 100 ) {
+
+ lock_and_unlock_random_entries(cache_ptr, file_ptr,
+ (i - 100), i,
+ min_count, max_count);
+ }
+ }
+
+
+ min_count = 100 / ((file_mpi_rank + 2) * (file_mpi_rank + 2));
+ max_count = min_count + 50;
+
+ for ( i = (virt_num_data_entries / 4);
+ i < (virt_num_data_entries / 2);
+ i++ )
+ {
+
+ insert_entry(cache_ptr, file_ptr, i, H5AC2__NO_FLAGS_SET);
+
+ if ( i % 59 == 0 ) {
+
+ hbool_t dirty = ( (i % 2) == 0);
+
+ if ( data[i].local_pinned ) {
+ unpin_entry(cache_ptr, file_ptr, i, FALSE, FALSE, FALSE);
+ }
+
+ pin_entry(cache_ptr, file_ptr, i, TRUE, dirty);
+
+ HDassert( !dirty || data[i].header.is_dirty );
+ HDassert( data[i].header.is_pinned );
+ HDassert( data[i].global_pinned );
+ HDassert( ! data[i].local_pinned );
+ }
+
+ if ( i > 100 ) {
+
+ lock_and_unlock_random_entries(cache_ptr, file_ptr,
+ (i - 100), i,
+ min_count, max_count);
+ }
+
+ local_pin_and_unpin_random_entries(cache_ptr, file_ptr,
+ 0, virt_num_data_entries / 4,
+ 0, (file_mpi_rank + 2));
+
+ }
+
+
+ /* flush the file to be sure that we have no problems flushing
+ * pinned entries
+ */
+ if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+
+ min_idx = 0;
+ max_idx = ((virt_num_data_entries / 10) /
+ ((file_mpi_rank + 1) * (file_mpi_rank + 1))) - 1;
+ if ( max_idx <= min_idx ) {
+
+ max_idx = min_idx + 10;
+ }
+
+ for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
+ {
+ if ( ( i >= (virt_num_data_entries / 4) ) && ( i % 59 == 0 ) ) {
+
+ hbool_t via_unprotect = ( (((unsigned)i) & 0x02) == 0 );
+ hbool_t dirty = ( (((unsigned)i) & 0x04) == 0 );
+
+ HDassert( data[i].global_pinned );
+ HDassert( ! data[i].local_pinned );
+
+ unpin_entry(cache_ptr, file_ptr, i, TRUE, dirty,
+ via_unprotect);
+ }
+ if ( i % 2 == 0 ) {
+
+ lock_entry(cache_ptr, file_ptr, i);
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__NO_FLAGS_SET);
+ local_pin_and_unpin_random_entries(cache_ptr, file_ptr, 0,
+ virt_num_data_entries / 2,
+ 0, 2);
+ lock_and_unlock_random_entries(cache_ptr, file_ptr,
+ min_idx, max_idx, 0, 100);
+ }
+ }
+
+ min_idx = 0;
+ max_idx = ((virt_num_data_entries / 10) /
+ ((file_mpi_rank + 3) * (file_mpi_rank + 3))) - 1;
+ if ( max_idx <= min_idx ) {
+
+ max_idx = min_idx + 10;
+ }
+
+ for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
+ {
+ lock_entry(cache_ptr, file_ptr, i);
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__DIRTIED_FLAG);
+ lock_and_unlock_random_entries(cache_ptr, file_ptr,
+ min_idx, max_idx, 0, 100);
+ }
+
+ /* we can't rename pinned entries, so release any local pins now. */
+ local_unpin_all_entries(cache_ptr, file_ptr, FALSE);
+
+ min_count = 10 / (file_mpi_rank + 1);
+ max_count = min_count + 100;
+
+ /* rename the first half of the entries... */
+ for ( i = 0; i < (virt_num_data_entries / 2); i++ )
+ {
+ lock_entry(cache_ptr, file_ptr, i);
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__NO_FLAGS_SET);
+ rename_entry(cache_ptr, file_ptr, i,
+ (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(cache_ptr, file_ptr, 0,
+ (virt_num_data_entries / 20),
+ min_count, max_count);
+ }
+
+ /* ...and then rename them back. */
+ for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
+ {
+ lock_entry(cache_ptr, file_ptr, i);
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__DIRTIED_FLAG);
+ rename_entry(cache_ptr, file_ptr, i,
+ (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(cache_ptr, file_ptr, 0,
+ (virt_num_data_entries / 40),
+ min_count, max_count);
+ }
+
+ /* finally, do some dirty lock/unlocks while we give the cache
+ * a chance t reduce its size.
+ */
+ min_count = 200 / ((file_mpi_rank + 1) * (file_mpi_rank + 1));
+ max_count = min_count + 100;
+
+ for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
+ {
+ local_pin_and_unpin_random_entries(cache_ptr, file_ptr, 0,
+ (virt_num_data_entries / 2),
+ 0, 5);
+
+ lock_entry(cache_ptr, file_ptr, i);
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__DIRTIED_FLAG);
+
+ if ( i > 100 ) {
+
+ lock_and_unlock_random_entries(cache_ptr, file_ptr,
+ (i - 100), i,
+ min_count, max_count);
+ }
+ }
+
+ /* release any local pins before we take down the cache. */
+ local_unpin_all_entries(cache_ptr, file_ptr, FALSE);
+
+ if ( fid >= 0 ) {
+
+ if ( ! take_down_cache(fid, cache_ptr) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ /* verify that all instances of datum are back where the started
+ * and are clean.
+ */
+
+ for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
+ {
+ HDassert( data_index[i] == i );
+ HDassert( ! (data[i].dirty) );
+ }
+
+ /* compose the done message */
+ mssg.req = DONE_REQ_CODE;
+ mssg.src = world_mpi_rank;
+ mssg.dest = world_server_mpi_rank;
+ mssg.mssg_num = -1; /* set by send function */
+ mssg.base_addr = 0; /* not used */
+ mssg.len = 0; /* not used */
+ mssg.ver = 0; /* not used */
+ mssg.magic = MSSG_MAGIC;
+
+ if ( success ) {
+
+
+ success = send_mssg(&mssg, FALSE);
+
+ if ( ! success ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+ }
+
+ max_nerrors = get_max_nerrors();
+
+ if ( world_mpi_rank == 0 ) {
+
+ if ( max_nerrors == 0 ) {
+
+ PASSED();
+
+ } else {
+
+ failures++;
+ H5_FAILED();
+ }
+ }
+
+ success = ( ( success ) && ( max_nerrors == 0 ) );
+
+ return(success);
+
+} /* smoke_check_3() */
+
+
+/*****************************************************************************
+ *
+ * Function: smoke_check_4()
+ *
+ * Purpose: Fourth smoke check for the parallel cache.
+ *
+ * Use random reads to vary the loads on the diffferent
+ * processors. Also force different cache size adjustments.
+ *
+ * In this test, load process 0 lightly, and the other
+ * processes heavily.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 1/13/06
+ *
+ * Modifications:
+ *
+ * Added code intended to insure correct operation with large
+ * numbers of processors.
+ * JRM - 1/31/06
+ *
+ * Added code testing pinned insertion of entries.
+ *
+ * JRM - 8/15/06
+ *
+ *****************************************************************************/
+
+hbool_t
+smoke_check_4(void)
+{
+ const char * fcn_name = "smoke_check_4()";
+ hbool_t success = TRUE;
+ int i;
+ int max_nerrors;
+ int min_count;
+ int max_count;
+ int min_idx;
+ int max_idx;
+ hid_t fid = -1;
+ H5F_t * file_ptr = NULL;
+ H5C2_t * cache_ptr = NULL;
+ struct mssg_t mssg;
+
+ if ( world_mpi_rank == 0 ) {
+
+ TESTING("smoke check #4");
+ }
+
+ nerrors = 0;
+ init_data();
+ reset_stats();
+
+ if ( world_mpi_rank == world_server_mpi_rank ) {
+
+ if ( ! server_main() ) {
+
+ /* some error occured in the server -- report failure */
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+ else /* run the clients */
+ {
+ if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr) ) {
+
+ nerrors++;
+ fid = -1;
+ cache_ptr = NULL;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+
+ min_count = 100 * (file_mpi_rank % 4);
+ max_count = min_count + 50;
+
+ for ( i = 0; i < (virt_num_data_entries / 4); i++ )
+ {
+ insert_entry(cache_ptr, file_ptr, i, H5AC2__NO_FLAGS_SET);
+
+ if ( i > 100 ) {
+
+ lock_and_unlock_random_entries(cache_ptr, file_ptr,
+ (i - 100), i,
+ min_count, max_count);
+ }
+ }
+
+ min_count = 10 * (file_mpi_rank % 4);
+ max_count = min_count + 100;
+
+ for ( i = (virt_num_data_entries / 4);
+ i < (virt_num_data_entries / 2);
+ i++ )
+ {
+ if ( i % 2 == 0 ) {
+
+ insert_entry(cache_ptr, file_ptr, i, H5AC2__NO_FLAGS_SET);
+
+ } else {
+
+ /* Insert some entries pinned, and then unpin them
+ * immediately. We have tested pinned entries elsewhere,
+ * so it should be sufficient to verify that the
+ * entries are in fact pinned (which unpin_entry() should do).
+ */
+ insert_entry(cache_ptr, file_ptr, i, H5C2__PIN_ENTRY_FLAG);
+ unpin_entry(cache_ptr, file_ptr, i, TRUE, FALSE, FALSE);
+ }
+
+ if ( i % 59 == 0 ) {
+
+ hbool_t dirty = ( (i % 2) == 0);
+
+ if ( data[i].local_pinned ) {
+ unpin_entry(cache_ptr, file_ptr, i, FALSE, FALSE, FALSE);
+ }
+
+ pin_entry(cache_ptr, file_ptr, i, TRUE, dirty);
+
+ HDassert( !dirty || data[i].header.is_dirty );
+ HDassert( data[i].header.is_pinned );
+ HDassert( data[i].global_pinned );
+ HDassert( ! data[i].local_pinned );
+ }
+
+ if ( i > 100 ) {
+
+ lock_and_unlock_random_entries(cache_ptr, file_ptr,
+ (i - 100), i,
+ min_count, max_count);
+ }
+
+ local_pin_and_unpin_random_entries(cache_ptr, file_ptr, 0,
+ (virt_num_data_entries / 4),
+ 0, (file_mpi_rank + 2));
+ }
+
+
+ /* flush the file to be sure that we have no problems flushing
+ * pinned entries
+ */
+ if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+
+ min_idx = 0;
+ max_idx = (((virt_num_data_entries / 10) / 4) *
+ ((file_mpi_rank % 4) + 1)) - 1;
+
+ for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
+ {
+ if ( ( i >= (virt_num_data_entries / 4) ) && ( i % 59 == 0 ) ) {
+
+ hbool_t via_unprotect = ( (((unsigned)i) & 0x02) == 0 );
+ hbool_t dirty = ( (((unsigned)i) & 0x04) == 0 );
+
+ HDassert( data[i].global_pinned );
+ HDassert( ! data[i].local_pinned );
+
+ unpin_entry(cache_ptr, file_ptr, i, TRUE, dirty, via_unprotect);
+ }
+
+ if ( i % 2 == 0 ) {
+
+ lock_entry(cache_ptr, file_ptr, i);
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__NO_FLAGS_SET);
+ lock_and_unlock_random_entries(cache_ptr, file_ptr,
+ min_idx, max_idx, 0, 100);
+ }
+ }
+
+ min_idx = 0;
+ max_idx = (((virt_num_data_entries / 10) / 8) *
+ ((file_mpi_rank % 4) + 1)) - 1;
+
+ for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
+ {
+ lock_entry(cache_ptr, file_ptr, i);
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__DIRTIED_FLAG);
+ lock_and_unlock_random_entries(cache_ptr, file_ptr,
+ min_idx, max_idx, 0, 100);
+ }
+
+ /* we can't rename pinned entries, so release any local pins now. */
+ local_unpin_all_entries(cache_ptr, file_ptr, FALSE);
+
+ min_count = 10 * (file_mpi_rank % 4);
+ max_count = min_count + 100;
+
+ /* rename the first half of the entries... */
+ for ( i = 0; i < (virt_num_data_entries / 2); i++ )
+ {
+ lock_entry(cache_ptr, file_ptr, i);
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__NO_FLAGS_SET);
+ rename_entry(cache_ptr, file_ptr, i,
+ (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(cache_ptr, file_ptr, 0,
+ (virt_num_data_entries / 20),
+ min_count, max_count);
+ }
+
+ /* ...and then rename them back. */
+ for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
+ {
+ lock_entry(cache_ptr, file_ptr, i);
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__DIRTIED_FLAG);
+ rename_entry(cache_ptr, file_ptr, i,
+ (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(cache_ptr, file_ptr, 0,
+ (virt_num_data_entries / 40),
+ min_count, max_count);
+ }
+
+ /* finally, do some dirty lock/unlocks while we give the cache
+ * a chance t reduce its size.
+ */
+ min_count = 100 * (file_mpi_rank % 4);
+ max_count = min_count + 100;
+
+ for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
+ {
+ lock_entry(cache_ptr, file_ptr, i);
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__DIRTIED_FLAG);
+
+ if ( i > 100 ) {
+
+ lock_and_unlock_random_entries(cache_ptr, file_ptr,
+ (i - 100), i,
+ min_count, max_count);
+ }
+ }
+
+ if ( fid >= 0 ) {
+
+ if ( ! take_down_cache(fid, cache_ptr) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ /* verify that all instance of datum are back where the started
+ * and are clean.
+ */
+
+ for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
+ {
+ HDassert( data_index[i] == i );
+ HDassert( ! (data[i].dirty) );
+ }
+
+ /* compose the done message */
+ mssg.req = DONE_REQ_CODE;
+ mssg.src = world_mpi_rank;
+ mssg.dest = world_server_mpi_rank;
+ mssg.mssg_num = -1; /* set by send function */
+ mssg.base_addr = 0; /* not used */
+ mssg.len = 0; /* not used */
+ mssg.ver = 0; /* not used */
+ mssg.magic = MSSG_MAGIC;
+
+ if ( success ) {
+
+
+ success = send_mssg(&mssg, FALSE);
+
+ if ( ! success ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+ }
+
+ max_nerrors = get_max_nerrors();
+
+ if ( world_mpi_rank == 0 ) {
+
+ if ( max_nerrors == 0 ) {
+
+ PASSED();
+
+ } else {
+
+ failures++;
+ H5_FAILED();
+ }
+ }
+
+ success = ( ( success ) && ( max_nerrors == 0 ) );
+
+ return(success);
+
+} /* smoke_check_4() */
+
+
+/*****************************************************************************
+ *
+ * Function: smoke_check_5()
+ *
+ * Purpose: Similar to smoke check 1, but modified to verify that
+ * H5AC2_mark_pinned_or_protected_entry_dirty() works in
+ * the parallel case.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 5/18/06
+ *
+ * Modifications:
+ *
+ * JRM -- 7/12/06
+ * Added test code for H5AC2_expunge_entry() and
+ * H5AC2_resize_pinned_entry().
+ *
+ *****************************************************************************/
+
+hbool_t
+smoke_check_5(void)
+{
+ const char * fcn_name = "smoke_check_5()";
+ hbool_t show_progress = FALSE;
+ hbool_t show_detailed_progress = FALSE;
+ hbool_t success = TRUE;
+ int i;
+ int max_nerrors;
+ int mile_stone = 1;
+ hid_t fid = -1;
+ H5F_t * file_ptr = NULL;
+ H5C2_t * cache_ptr = NULL;
+ struct mssg_t mssg;
+
+ if ( world_mpi_rank == 0 ) {
+
+ TESTING("smoke check #5");
+ }
+
+ nerrors = 0;
+ init_data();
+ reset_stats();
+
+ if ( show_progress ) { /* 1 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ if ( world_mpi_rank == world_server_mpi_rank ) {
+
+ if ( show_progress ) { /* 2 */
+ HDfprintf(stdout, "%d:%s - %0ds -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ if ( ! server_main() ) {
+
+ /* some error occured in the server -- report failure */
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ if ( show_progress ) { /* 3 */
+ HDfprintf(stdout, "%d:%s - %0ds -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+ }
+ else /* run the clients */
+ {
+ if ( show_progress ) { /* 2 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr) ) {
+
+ nerrors++;
+ fid = -1;
+ cache_ptr = NULL;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ if ( show_progress ) { /* 3 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ for ( i = 0; i < (virt_num_data_entries / 2); i++ )
+ {
+ insert_entry(cache_ptr, file_ptr, i, H5AC2__NO_FLAGS_SET);
+ }
+
+ if ( show_progress ) { /* 4 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ /* flush the file so we can lock known clean entries. */
+ if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ if ( show_progress ) { /* 5 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ for ( i = 0; i < (virt_num_data_entries / 4); i++ )
+ {
+ if ( show_detailed_progress )
+ {
+ HDfprintf(stdout, "%d:(lock %d)\n", world_mpi_rank, i);
+ fflush(stdout);
+ }
+ lock_entry(cache_ptr, file_ptr, i);
+
+ if ( i % 2 == 0 )
+ {
+ if ( show_detailed_progress )
+ {
+ HDfprintf(stdout, "%d:(mpoped %d)\n", world_mpi_rank, i);
+ fflush(stdout);
+ }
+ mark_pinned_or_protected_entry_dirty(cache_ptr, file_ptr, i);
+ }
+
+ if ( show_detailed_progress )
+ {
+ HDfprintf(stdout, "%d:(unlock %d)\n", world_mpi_rank, i);
+ fflush(stdout);
+ }
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__NO_FLAGS_SET);
+
+ if ( i % 2 == 1 )
+ {
+ if ( i % 4 == 1 ) {
+
+ if ( show_detailed_progress )
+ {
+ HDfprintf(stdout, "%d:(lock %d)\n", world_mpi_rank, i);
+ fflush(stdout);
+ }
+ lock_entry(cache_ptr, file_ptr, i);
+ if ( show_detailed_progress )
+ {
+ HDfprintf(stdout, "%d:(unlock %d)\n", world_mpi_rank, i);
+ fflush(stdout);
+ }
+ unlock_entry(cache_ptr, file_ptr, i, H5AC2__DIRTIED_FLAG);
+ }
+
+ if ( show_detailed_progress )
+ {
+ HDfprintf(stdout, "%d:(expunge %d)\n", world_mpi_rank, i);
+ fflush(stdout);
+ }
+ expunge_entry(cache_ptr, file_ptr, i);
+ }
+ }
+
+ if ( show_progress ) { /* 6 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ for ( i = (virt_num_data_entries / 2) - 1;
+ i >= (virt_num_data_entries / 4);
+ i-- )
+ {
+ pin_entry(cache_ptr, file_ptr, i, TRUE, FALSE);
+
+ if ( i % 2 == 0 )
+ {
+ if ( i % 8 <= 4 ) {
+
+ resize_entry(cache_ptr, file_ptr, i, data[i].len / 2);
+ }
+
+ if ( i % 4 == 0 )
+ {
+ mark_pinned_or_protected_entry_dirty(cache_ptr,
+ file_ptr, i);
+ }
+ else
+ {
+ mark_pinned_entry_dirty(cache_ptr, file_ptr, i,
+ FALSE, (size_t)0);
+ }
+
+ if ( i % 8 <= 4 ) {
+
+ resize_entry(cache_ptr, file_ptr, i, data[i].len);
+ }
+ }
+
+ unpin_entry(cache_ptr, file_ptr, i, TRUE, FALSE, FALSE);
+ }
+
+ if ( show_progress ) { /* 7 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ if ( fid >= 0 ) {
+
+ if ( ! take_down_cache(fid, cache_ptr) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ if ( show_progress ) { /* 8 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ /* verify that all instance of datum are back where the started
+ * and are clean.
+ */
+
+ for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
+ {
+ HDassert( data_index[i] == i );
+ HDassert( ! (data[i].dirty) );
+ }
+
+ if ( show_progress ) { /* 9 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+
+ /* compose the done message */
+ mssg.req = DONE_REQ_CODE;
+ mssg.src = world_mpi_rank;
+ mssg.dest = world_server_mpi_rank;
+ mssg.mssg_num = -1; /* set by send function */
+ mssg.base_addr = 0; /* not used */
+ mssg.len = 0; /* not used */
+ mssg.ver = 0; /* not used */
+ mssg.magic = MSSG_MAGIC;
+
+ if ( success ) {
+
+ success = send_mssg(&mssg, FALSE);
+
+ if ( ! success ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ if ( show_progress ) { /* 10 */
+ HDfprintf(stdout, "%d:%s - %0d -- success = %d\n",
+ world_mpi_rank, fcn_name, mile_stone++, (int)success);
+ fflush(stdout);
+ }
+ }
+
+ max_nerrors = get_max_nerrors();
+
+ if ( world_mpi_rank == 0 ) {
+
+ if ( max_nerrors == 0 ) {
+
+ PASSED();
+
+ } else {
+
+ failures++;
+ H5_FAILED();
+ }
+ }
+
+ success = ( ( success ) && ( max_nerrors == 0 ) );
+
+ return(success);
+
+} /* smoke_check_5() */
+
+
+/*****************************************************************************
+ *
+ * Function: trace_file_check()
+ *
+ * Purpose: A basic test of the trace file capability. In essence,
+ * we invoke all operations that generate trace file output,
+ * and then verify that the expected output was generated.
+ *
+ * Note that the trace file is currently implemented at the
+ * H5AC2 level, so all calls have to go through H5AC2. Thus it
+ * is more convenient to test trace file capabilities in the
+ * parallel cache test which works at the H5AC2 level, instead
+ * of in the serial test code which does everything at the
+ * H5C2 level.
+ *
+ * The function must test trace file output in the following
+ * functions:
+ *
+ * - H5AC2_flush()
+ * - H5AC2_set()
+ * - H5AC2_mark_pinned_entry_dirty()
+ * - H5AC2_mark_pinned_or_protected_entry_dirty()
+ * H5AC2_rename()
+ * - H5AC2_pin_protected_entry()
+ * - H5AC2_protect()
+ * - H5AC2_unpin_entry()
+ * - H5AC2_unprotect()
+ * - H5AC2_set_cache_auto_resize_config()
+ * - H5AC2_expunge_entry()
+ * - H5AC2_resize_pinned_entry()
+ *
+ * This test is skipped if H5_METADATA_TRACE_FILE is undefined.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 6/13/06
+ *
+ * Modifications:
+ *
+ * JRM -- 7/11/06
+ * Updated fro H5AC2_expunge_entry() and
+ * H5AC2_resize_pinned_entry().
+ *
+ *****************************************************************************/
+
+hbool_t
+trace_file_check(void)
+{
+ hbool_t success = TRUE;
+
+#ifdef H5_METADATA_TRACE_FILE
+
+ const char * fcn_name = "trace_file_check()";
+ const char * expected_output[] =
+ {
+ "### HDF5 metadata cache trace file version 1 ###\n",
+ "H5AC2_set_cache_auto_resize_config 1 0 1 0 \"t_cache2_trace.txt\" 1 0 1048576 0.500000 16777216 1048576 50000 1 0.900000 2.000000 1 4194304 3 0.999000 0.900000 1 1048576 3 1 0.100000 262144 0\n",
+ "H5AC2_set 0x400 2 15 0x0 2 0\n",
+ "H5AC2_set 0x402 2 15 0x0 2 0\n",
+ "H5AC2_set 0x404 4 15 0x0 4 0\n",
+ "H5AC2_set 0x408 6 15 0x0 6 0\n",
+ "H5AC2_protect 0x400 2 15 H5AC2_WRITE 2 1\n",
+ "H5AC2_mark_pinned_or_protected_entry_dirty 0x400 0\n",
+ "H5AC2_unprotect 0x400 15 2 0 0\n",
+ "H5AC2_protect 0x402 2 15 H5AC2_WRITE 2 1\n",
+ "H5AC2_pin_protected_entry 0x402 0\n",
+ "H5AC2_unprotect 0x402 15 2 0 0\n",
+ "H5AC2_unpin_entry 0x402 0\n",
+ "H5AC2_expunge_entry 0x402 15 0\n",
+ "H5AC2_protect 0x404 4 15 H5AC2_WRITE 4 1\n",
+ "H5AC2_pin_protected_entry 0x404 0\n",
+ "H5AC2_unprotect 0x404 15 4 0 0\n",
+ "H5AC2_mark_pinned_entry_dirty 0x404 0 0 0\n",
+ "H5AC2_resize_pinned_entry 0x404 2 0\n",
+ "H5AC2_resize_pinned_entry 0x404 4 0\n",
+ "H5AC2_unpin_entry 0x404 0\n",
+ "H5AC2_rename 0x400 0x8e65 15 0\n",
+ "H5AC2_rename 0x8e65 0x400 15 0\n",
+ "H5AC2_flush 0x0 0\n",
+ NULL
+ };
+ char buffer[256];
+ char trace_file_name[64];
+ hbool_t done = FALSE;
+ int i;
+ int max_nerrors;
+ int expected_line_len;
+ int actual_line_len;
+ hid_t fid = -1;
+ H5F_t * file_ptr = NULL;
+ H5C2_t * cache_ptr = NULL;
+ FILE * trace_file_ptr = NULL;
+ H5AC2_cache_config_t config;
+ struct mssg_t mssg;
+
+#endif /* H5_METADATA_TRACE_FILE */
+
+ if ( world_mpi_rank == 0 ) {
+
+ TESTING("trace file collection");
+ }
+
+#ifdef H5_METADATA_TRACE_FILE
+
+ nerrors = 0;
+ init_data();
+ reset_stats();
+
+ if ( world_mpi_rank == world_server_mpi_rank ) {
+
+ if ( ! server_main() ) {
+
+ /* some error occured in the server -- report failure */
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+ else /* run the clients */
+ {
+
+ if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr) ) {
+
+ nerrors++;
+ fid = -1;
+ cache_ptr = NULL;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ if ( nerrors == 0 ) {
+
+ config.version = H5AC2__CURR_CACHE_CONFIG_VERSION;
+
+ if ( H5AC2_get_cache_auto_resize_config(cache_ptr, &config)
+ != SUCCEED ) {
+
+ nerrors++;
+ HDfprintf(stdout,
+ "%d:%s: H5AC2_get_cache_auto_resize_config() failed.\n",
+ world_mpi_rank, fcn_name);
+
+ } else {
+
+ config.open_trace_file = TRUE;
+ strcpy(config.trace_file_name, "t_cache2_trace.txt");
+
+ if ( H5AC2_set_cache_auto_resize_config(cache_ptr, &config)
+ != SUCCEED ) {
+
+ nerrors++;
+ HDfprintf(stdout,
+ "%d:%s: H5AC2_set_cache_auto_resize_config() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ insert_entry(cache_ptr, file_ptr, 0, H5AC2__NO_FLAGS_SET);
+ insert_entry(cache_ptr, file_ptr, 1, H5AC2__NO_FLAGS_SET);
+ insert_entry(cache_ptr, file_ptr, 2, H5AC2__NO_FLAGS_SET);
+ insert_entry(cache_ptr, file_ptr, 3, H5AC2__NO_FLAGS_SET);
+
+ lock_entry(cache_ptr, file_ptr, 0);
+ mark_pinned_or_protected_entry_dirty(cache_ptr, file_ptr, 0);
+ unlock_entry(cache_ptr, file_ptr, 0, H5AC2__NO_FLAGS_SET);
+
+ lock_entry(cache_ptr, file_ptr, 1);
+ pin_protected_entry(cache_ptr, file_ptr, 1, TRUE);
+ unlock_entry(cache_ptr, file_ptr, 1, H5AC2__NO_FLAGS_SET);
+ unpin_entry(cache_ptr, file_ptr, 1, TRUE, FALSE, FALSE);
+
+ expunge_entry(cache_ptr,file_ptr, 1);
+
+ lock_entry(cache_ptr, file_ptr, 2);
+ pin_protected_entry(cache_ptr, file_ptr, 2, TRUE);
+ unlock_entry(cache_ptr, file_ptr, 2, H5AC2__NO_FLAGS_SET);
+ mark_pinned_entry_dirty(cache_ptr, file_ptr, 2, FALSE, 0);
+ resize_entry(cache_ptr, file_ptr, 2, data[2].len / 2);
+ resize_entry(cache_ptr, file_ptr, 2, data[2].len);
+ unpin_entry(cache_ptr, file_ptr, 2, TRUE, FALSE, FALSE);
+
+ rename_entry(cache_ptr, file_ptr, 0, 20);
+ rename_entry(cache_ptr, file_ptr, 0, 20);
+
+ if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ if ( nerrors == 0 ) {
+
+ config.version = H5AC2__CURR_CACHE_CONFIG_VERSION;
+
+ if ( H5AC2_get_cache_auto_resize_config(cache_ptr, &config)
+ != SUCCEED ) {
+
+ nerrors++;
+ HDfprintf(stdout,
+ "%d:%s: H5AC2_get_cache_auto_resize_config() failed.\n",
+ world_mpi_rank, fcn_name);
+
+ } else {
+
+ config.open_trace_file = FALSE;
+ config.close_trace_file = TRUE;
+ config.trace_file_name[0] = '\0';
+
+ if ( H5AC2_set_cache_auto_resize_config(cache_ptr, &config)
+ != SUCCEED ) {
+
+ nerrors++;
+ HDfprintf(stdout,
+ "%d:%s: H5AC2_set_cache_auto_resize_config() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ if ( fid >= 0 ) {
+
+ if ( ! take_down_cache(fid, cache_ptr) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ /* verify that all instance of datum are back where the started
+ * and are clean.
+ */
+
+ for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
+ {
+ HDassert( data_index[i] == i );
+ HDassert( ! (data[i].dirty) );
+ }
+
+ /* compose the done message */
+ mssg.req = DONE_REQ_CODE;
+ mssg.src = world_mpi_rank;
+ mssg.dest = world_server_mpi_rank;
+ mssg.mssg_num = -1; /* set by send function */
+ mssg.base_addr = 0; /* not used */
+ mssg.len = 0; /* not used */
+ mssg.ver = 0; /* not used */
+ mssg.magic = MSSG_MAGIC;
+
+ if ( success ) {
+
+ success = send_mssg(&mssg, FALSE);
+
+ if ( ! success ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ if ( nerrors == 0 ) {
+
+ sprintf(trace_file_name, "t_cache2_trace.txt.%d",
+ (int)file_mpi_rank);
+
+ if ( (trace_file_ptr = HDfopen(trace_file_name, "r")) == NULL ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: HDfopen failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ i = 0;
+ while ( ( nerrors == 0 ) && ( ! done ) )
+ {
+ if ( expected_output[i] == NULL ) {
+
+ expected_line_len = 0;
+
+ } else {
+
+ expected_line_len = HDstrlen(expected_output[i]);
+ }
+
+ if ( HDfgets(buffer, 255, trace_file_ptr) != NULL ) {
+
+ actual_line_len = strlen(buffer);
+
+ } else {
+
+ actual_line_len = 0;
+ }
+
+ if ( ( actual_line_len == 0 ) && ( expected_line_len == 0 ) ) {
+
+ done = TRUE;
+
+ } else if ( ( actual_line_len != expected_line_len ) ||
+ ( HDstrcmp(buffer, expected_output[i]) != 0 ) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout,
+ "%d:%s: Unexpected data in trace file line %d.\n",
+ world_mpi_rank, fcn_name, i);
+ HDfprintf(stdout, "%d:%s: expected = \"%s\" %d\n",
+ world_mpi_rank, fcn_name, expected_output[i],
+ expected_line_len);
+ HDfprintf(stdout, "%d:%s: actual = \"%s\" %d\n",
+ world_mpi_rank, fcn_name, buffer,
+ actual_line_len);
+ }
+ } else {
+ i++;
+ }
+ }
+
+ if ( trace_file_ptr != NULL ) {
+
+ HDfclose(trace_file_ptr);
+ trace_file_ptr = NULL;
+#if 1
+ HDremove(trace_file_name);
+#endif
+ }
+ }
+
+ max_nerrors = get_max_nerrors();
+
+ if ( world_mpi_rank == 0 ) {
+
+ if ( max_nerrors == 0 ) {
+
+ PASSED();
+
+ } else {
+
+ failures++;
+ H5_FAILED();
+ }
+ }
+
+ success = ( ( success ) && ( max_nerrors == 0 ) );
+
+#else /* H5_METADATA_TRACE_FILE */
+
+ if ( world_mpi_rank == 0 ) {
+
+ SKIPPED();
+
+ HDfprintf(stdout, " trace file support disabled.\n");
+ }
+
+#endif /* H5_METADATA_TRACE_FILE */
+
+ return(success);
+
+} /* trace_file_check() */
+
+
+/*****************************************************************************
+ *
+ * Function: main()
+ *
+ * Purpose: Main function for the parallel cache test.
+ *
+ * Return: Success: 0
+ *
+ * Failure: 1
+ *
+ * Programmer: JRM -- 12/23/05
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *****************************************************************************/
+
+int
+main(int argc, char **argv)
+{
+ const char * fcn_name = "main()";
+ int express_test;
+ unsigned u;
+ int mpi_size;
+ int mpi_rank;
+ int max_nerrors;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ world_mpi_size = mpi_size;
+ world_mpi_rank = mpi_rank;
+ world_server_mpi_rank = mpi_size - 1;
+ world_mpi_comm = MPI_COMM_WORLD;
+
+ H5open();
+
+ express_test = do_express_test();
+#if 0 /* JRM */
+ express_test = 0;
+#endif /* JRM */
+ if ( express_test ) {
+
+ virt_num_data_entries = EXPRESS_VIRT_NUM_DATA_ENTRIES;
+
+ } else {
+
+ virt_num_data_entries = STD_VIRT_NUM_DATA_ENTRIES;
+ }
+
+#ifdef H5_HAVE_MPE
+ if ( MAINPROCESS ) { printf(" Tests compiled for MPE.\n"); }
+ virt_num_data_entries = MPE_VIRT_NUM_DATA_ENTIES;
+#endif /* H5_HAVE_MPE */
+
+
+ if (MAINPROCESS){
+ printf("===================================\n");
+ printf("Parallel metadata cache tests\n");
+ printf(" mpi_size = %d\n", mpi_size);
+ printf(" express_test = %d\n", express_test);
+ printf("===================================\n");
+ }
+
+ if ( mpi_size < 3 ) {
+
+ if ( MAINPROCESS ) {
+
+ printf(" Need at least 3 processes. Exiting.\n");
+ }
+ goto finish;
+ }
+
+ set_up_file_communicator();
+
+ setup_derived_types();
+
+ /* h5_fixname() will hang some processes don't participate.
+ *
+ * Thus we set up the fapl global with the world communicator,
+ * make our calls to h5_fixname(), discard the fapl, and then
+ * create it again with the file communicator.
+ */
+
+ /* setup file access property list with the world communicator */
+ if ( FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS)) ) {
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Pcreate() failed 1.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ if ( H5Pset_fapl_mpio(fapl, world_mpi_comm, MPI_INFO_NULL) < 0 ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 1.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ /* fix the file names */
+ for ( u = 0; u < sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; ++u )
+ {
+ if ( h5_fixname(FILENAME[u], fapl, filenames[u],
+ sizeof(filenames[u])) == NULL ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: h5_fixname() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ break;
+ }
+ }
+
+ /* close the fapl before we set it up again */
+ if ( H5Pclose(fapl) < 0 ) {
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Pclose() failed.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ /* now create the fapl again, excluding the server process. */
+ if ( world_mpi_rank != world_server_mpi_rank ) {
+
+ /* setup file access property list */
+ if ( FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS)) ) {
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Pcreate() failed 2.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+
+ if ( H5Pset_fapl_mpio(fapl, file_mpi_comm, MPI_INFO_NULL) < 0 ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 2.\n",
+ world_mpi_rank, fcn_name);
+ }
+ }
+ }
+
+ setup_rand();
+
+ max_nerrors = get_max_nerrors();
+
+ if ( max_nerrors != 0 ) {
+
+ /* errors in setup -- no point in continuing */
+
+ if ( world_mpi_rank == 0 ) {
+
+ HDfprintf(stdout, "Errors in test initialization. Exiting.\n");
+ }
+ goto finish;
+ }
+
+ /* run the tests */
+#if 1
+ server_smoke_check();
+#endif
+#if 1
+ smoke_check_1();
+#endif
+#if 1
+ smoke_check_2();
+#endif
+#if 1
+ smoke_check_3();
+#endif
+#if 1
+ smoke_check_4();
+#endif
+#if 1
+ smoke_check_5();
+#endif
+#if 1
+ trace_file_check();
+#endif
+
+finish:
+ /* make sure all processes are finished before final report, cleanup
+ * and exit.
+ */
+ MPI_Barrier(MPI_COMM_WORLD);
+ if (MAINPROCESS){ /* only process 0 reports */
+ printf("===================================\n");
+ if (failures){
+ printf("***metadata cache tests detected %d failures***\n",
+ failures);
+ }
+ else{
+ printf("metadata cache tests finished with no failures\n");
+ }
+ printf("===================================\n");
+ }
+
+ /* close HDF5 library */
+ H5close();
+
+ /* MPI_Finalize must be called AFTER H5close which may use MPI calls */
+ MPI_Finalize();
+
+ /* cannot just return (failures) because exit code is limited to 1byte */
+ return(failures != 0);
+}
+