From 8a7b9b3221f192ed0e64b00c3a90d5e6a1fb9e19 Mon Sep 17 00:00:00 2001 From: John Mainzer Date: Fri, 28 Apr 2006 08:27:54 -0500 Subject: [svn-r12311] Purpose: Add pinned entry capability to cache. Description: For frequently accessed cache entries, the protect/unprotect overhead is sometimes a bottleneck. Solution: Allow entries to be pinned in the cache. Pinned entries can't be evicted, but can be flushed or modified. Platforms tested: h5committested -- minus one small typo in test/cache.c whose fix was tested on copper and heping only. Misc. update: --- MANIFEST | 1 + src/H5AC.c | 520 +++-- src/H5ACpkg.h | 315 +++ src/H5ACprivate.h | 28 +- src/H5C.c | 1651 +++++++++++---- src/H5Cpkg.h | 101 +- src/H5Cprivate.h | 59 +- src/H5Edefin.h | 3 + src/H5Einit.h | 15 + src/H5Epubgen.h | 6 + src/H5Eterm.h | 3 + src/H5err.txt | 3 + test/cache.c | 5626 +++++++++++++++++++++++++++++++++++++++++---------- test/cache_common.c | 333 ++- test/cache_common.h | 69 + testpar/t_cache.c | 1178 +++++++++-- 16 files changed, 8103 insertions(+), 1808 deletions(-) create mode 100644 src/H5ACpkg.h diff --git a/MANIFEST b/MANIFEST index 0abd403..924e7fd 100644 --- a/MANIFEST +++ b/MANIFEST @@ -396,6 +396,7 @@ ./src/H5Aprivate.h ./src/H5Apublic.h ./src/H5AC.c +./src/H5ACpkg.h ./src/H5ACprivate.h ./src/H5ACpublic.h ./src/H5B.c diff --git a/src/H5AC.c b/src/H5AC.c index 0c288b5..1889c85 100644 --- a/src/H5AC.c +++ b/src/H5AC.c @@ -43,6 +43,7 @@ */ #define H5C_PACKAGE /*suppress error about including H5Cpkg */ +#define H5AC_PACKAGE /*suppress error about including H5ACpkg */ #define H5F_PACKAGE /*suppress error about including H5Fpkg */ /* Interface initialization */ @@ -53,7 +54,7 @@ #endif /* H5_HAVE_PARALLEL */ #include "H5private.h" /* Generic Functions */ -#include "H5ACprivate.h" /* Metadata cache */ +#include "H5ACpkg.h" /* Metadata cache */ #include "H5Cpkg.h" /* Cache */ #include "H5Dprivate.h" /* Dataset functions */ #include "H5Eprivate.h" /* Error handling */ @@ -64,268 +65,9 @@ #include "H5MMprivate.h" /* Memory management */ #include "H5Pprivate.h" /* Property lists */ -#define H5AC_DEBUG_DIRTY_BYTES_CREATION 0 - -/*------------------------------------------------------------------------- - * It is a bit difficult to set ranges of allowable values on the - * dirty_bytes_threshold field of H5AC_aux_t. The following are - * probably broader than they should be. - *------------------------------------------------------------------------- - */ - -#define H5AC__MIN_DIRTY_BYTES_THRESHOLD (int32_t) \ - (H5C__MIN_MAX_CACHE_SIZE / 2) -#define H5AC__DEFAULT_DIRTY_BYTES_THRESHOLD (256 * 1024) -#define H5AC__MAX_DIRTY_BYTES_THRESHOLD (int32_t) \ - (H5C__MAX_MAX_CACHE_SIZE / 4) - -/**************************************************************************** - * - * structure H5AC_aux_t - * - * While H5AC has become a wrapper for the cache implemented in H5C.c, there - * are some features of the metadata cache that are specific to it, and which - * therefore do not belong in the more generic H5C cache code. - * - * In particular, there is the matter of synchronizing writes from the - * metadata cache to disk in the PHDF5 case. - * - * Prior to this update, the presumption was that all metadata caches would - * write the same data at the same time since all operations modifying - * metadata must be performed collectively. Given this assumption, it was - * safe to allow only the writes from process 0 to actually make it to disk, - * while metadata writes from all other processes were discarded. - * - * Unfortunately, this presumption is in error as operations that read - * metadata need not be collective, but can change the location of dirty - * entries in the metadata cache LRU lists. This can result in the same - * metadata write operation triggering writes from the metadata caches on - * some processes, but not all (causing a hang), or in different sets of - * entries being written from different caches (potentially resulting in - * metadata corruption in the file). - * - * To deal with this issue, I decided to apply a paradigm shift to the way - * metadata is written to disk. - * - * With this set of changes, only the metadata cache on process 0 is able - * to write metadata to disk, although metadata caches on all other - * processes can read metadata from disk as before. - * - * To keep all the other caches from getting plugged up with dirty metadata, - * process 0 periodically broadcasts a list of entries that it has flushed - * since that last notice, and which are currently clean. The other caches - * mark these entries as clean as well, which allows them to evict the - * entries as needed. - * - * One obvious problem in this approach is synchronizing the broadcasts - * and receptions, as different caches may see different amounts of - * activity. - * - * The current solution is for the caches to track the number of bytes - * of newly generated dirty metadata, and to broadcast and receive - * whenever this value exceeds some user specified threshold. - * - * Maintaining this count is easy for all processes not on process 0 -- - * all that is necessary is to add the size of the entry to the total - * whenever there is an insertion, a rename of a previously clean entry, - * or whever a previously clean entry is marked dirty in an unprotect. - * - * On process 0, we have to be careful not to count dirty bytes twice. - * If an entry is marked dirty, flushed, and marked dirty again, all - * within a single reporting period, it only th first marking should - * be added to the dirty bytes generated tally, as that is all that - * the other processes will see. - * - * At present, this structure exists to maintain the fields needed to - * implement the above scheme, and thus is only used in the parallel - * case. However, other uses may arise in the future. - * - * Instance of this structure are associated with metadata caches via - * the aux_ptr field of H5C_t (see H5Cpkg.h). The H5AC code is - * responsible for allocating, maintaining, and discarding instances - * of H5AC_aux_t. - * - * The remainder of this header comments documents the individual fields - * of the structure. - * - * JRM - 6/27/05 - * - * magic: Unsigned 32 bit integer always set to - * H5AC__H5AC_AUX_T_MAGIC. This field is used to validate - * pointers to instances of H5AC_aux_t. - * - * mpi_comm: MPI communicator associated with the file for which the - * cache has been created. - * - * mpi_rank: MPI rank of this process within mpi_comm. - * - * mpi_size: Number of processes in mpi_comm. - * - * write_permitted: Boolean flag used to control whether the cache - * is permitted to write to file. - * - * dirty_bytes_threshold: Integer field containing the dirty bytes - * generation threashold. Whenever dirty byte creation - * exceeds this value, the metadata cache on process 0 - * broadcasts a list of the entries it has flushed since - * the last broadcast (or since the beginning of execution) - * and which are currently clean (if they are still in the - * cache) - * - * Similarly, metadata caches on processes other than process - * 0 will attempt to receive a list of clean entries whenever - * the threshold is exceeded. - * - * dirty_bytes: Integer field containing the number of bytes of dirty - * metadata generated since the beginning of the computation, - * or (more typically) since the last clean entries list - * broadcast. This field is reset to zero after each such - * broadcast. - * - * dirty_bytes_propagations: This field only exists when the - * H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE. - * - * It is used to track the number of times the cleaned list - * has been propagated from process 0 to the other - * processes. - * - * unprotect_dirty_bytes: This field only exists when the - * H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE. - * - * It is used to track the number of dirty bytes created - * via unprotect operations since the last time the cleaned - * list was propagated. - * - * unprotect_dirty_bytes_updates: This field only exists when the - * H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE. - * - * It is used to track the number of times dirty bytes have - * been created via unprotect operations since the last time - * the cleaned list was propagated. - * - * insert_dirty_bytes: This field only exists when the - * H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE. - * - * It is used to track the number of dirty bytes created - * via insert operations since the last time the cleaned - * list was propagated. - * - * insert_dirty_bytes_updates: This field only exists when the - * H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE. - * - * It is used to track the number of times dirty bytes have - * been created via insert operations since the last time - * the cleaned list was propagated. - * - * rename_dirty_bytes: This field only exists when the - * H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE. - * - * It is used to track the number of dirty bytes created - * via rename operations since the last time the cleaned - * list was propagated. - * - * rename_dirty_bytes_updates: This field only exists when the - * H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE. - * - * It is used to track the number of times dirty bytes have - * been created via rename operations since the last time - * the cleaned list was propagated. - * - * d_slist_ptr: Pointer to an instance of H5SL_t used to maintain a list - * of entries that have been dirtied since the last time they - * were listed in a clean entries broadcast. This list is - * only maintained by the metadata cache on process 0 -- it - * it used to maintain a view of the dirty entries as seen - * by the other caches, so as to keep the dirty bytes count - * in synchronization with them. - * - * Thus on process 0, the dirty_bytes count is incremented - * only if either - * - * 1) an entry is inserted in the metadata cache, or - * - * 2) a previously clean entry is renamed, and it does not - * already appear in the dirty entry list, or - * - * 3) a previously clean entry is unprotected with the - * dirtied flag set and the entry does not already appear - * in the dirty entry list. - * - * Entries are added to the dirty entry list whever they cause - * the dirty bytes count to be increased. They are removed - * when they appear in a clean entries broadcast. Note that - * renames must be reflected in the dirty entry list. - * - * To reitterate, this field is only used on process 0 -- it - * should be NULL on all other processes. - * - * d_slist_len: Integer field containing the number of entries in the - * dirty entry list. This field should always contain the - * value 0 on all processes other than process 0. It exists - * primarily for sanity checking. - * - * c_slist_ptr: Pointer to an instance of H5SL_t used to maintain a list - * of entries that were dirty, have been flushed - * to disk since the last clean entries broadcast, and are - * still clean. Since only process 0 can write to disk, this - * list only exists on process 0. - * - * In essence, this slist is used to assemble the contents of - * the next clean entries broadcast. The list emptied after - * each broadcast. - * - * c_slist_len: Integer field containing the number of entries in the clean - * entries list (*c_slist_ptr). This field should always - * contain the value 0 on all processes other than process 0. - * It exists primarily for sanity checking. - * - ****************************************************************************/ #ifdef H5_HAVE_PARALLEL -#define H5AC__H5AC_AUX_T_MAGIC (unsigned)0x00D0A01 - -typedef struct H5AC_aux_t -{ - uint32_t magic; - - MPI_Comm mpi_comm; - - int mpi_rank; - - int mpi_size; - - hbool_t write_permitted; - - int32_t dirty_bytes_threshold; - - int32_t dirty_bytes; - -#if H5AC_DEBUG_DIRTY_BYTES_CREATION - - int32_t dirty_bytes_propagations; - - int32_t unprotect_dirty_bytes; - int32_t unprotect_dirty_bytes_updates; - - int32_t insert_dirty_bytes; - int32_t insert_dirty_bytes_updates; - - int32_t rename_dirty_bytes; - int32_t rename_dirty_bytes_updates; - -#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */ - - H5SL_t * d_slist_ptr; - - int32_t d_slist_len; - - H5SL_t * c_slist_ptr; - - int32_t c_slist_len; - -} H5AC_aux_t; /* struct H5AC_aux_t */ - /* Declare a free list to manage the H5AC_aux_t struct */ H5FL_DEFINE_STATIC(H5AC_aux_t); @@ -1221,6 +963,88 @@ done: /*------------------------------------------------------------------------- + * Function: H5AC_get_entry_status + * + * Purpose: Given a file address, determine whether the metadata + * cache contains an entry at that location. If it does, + * also determine whether the entry is dirty, protected, + * pinned, etc. and return that information to the caller + * in *status_ptr. + * + * If the specified entry doesn't exist, set *status_ptr + * to zero. + * + * On error, the value of *status_ptr is undefined. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 4/27/06 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +herr_t +H5AC_get_entry_status(H5C_t * cache_ptr, + haddr_t addr, + unsigned * status_ptr) +{ + herr_t ret_value = SUCCEED; /* Return value */ + herr_t result; + hbool_t in_cache; + hbool_t is_dirty; + hbool_t is_protected; + hbool_t is_pinned; + size_t entry_size; + unsigned status = 0; + + FUNC_ENTER_NOAPI(H5AC_get_entry_status, FAIL) + + if ( ( cache_ptr == NULL ) || + ( cache_ptr->magic != H5C__H5C_T_MAGIC ) || + ( ! H5F_addr_defined(addr) ) || + ( status_ptr == NULL ) ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad param(s) on entry.") + } + + result = H5C_get_entry_status(cache_ptr, addr, &entry_size, &in_cache, + &is_dirty, &is_protected, &is_pinned); + + if ( result < 0 ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "H5C_get_entry_status() failed.") + } + + if ( in_cache ) { + + status |= H5AC_ES__IN_CACHE; + + if ( is_dirty ) + status |= H5AC_ES__IS_DIRTY; + + if ( is_protected ) + status |= H5AC_ES__IS_PROTECTED; + + if ( is_pinned ) + status |= H5AC_ES__IS_PINNED; + } + + *status_ptr = status; + +done: + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5AC_get_entry_status() */ + + +/*------------------------------------------------------------------------- * Function: H5AC_set * * Purpose: Adds the specified thing to the cache. The thing need not @@ -1365,6 +1189,99 @@ done: /*------------------------------------------------------------------------- + * Function: H5AC_mark_pinned_entry_dirty + * + * Purpose: Mark a pinned entry as dirty. The target entry MUST be + * be pinned, and MUST be unprotected. + * + * If the entry has changed size, the function updates + * data structures for the size change. + * + * If the entry is not already dirty, the function places + * the entry on the skip list. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 4/11/06 + * + * Modifications: + * + * None + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC_mark_pinned_entry_dirty(H5C_t * cache_ptr, + void * thing, + hbool_t size_changed, + size_t new_size) +{ + herr_t result; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5AC_mark_pinned_entry_dirty, FAIL) + +#ifdef H5_HAVE_PARALLEL + + HDassert( cache_ptr ); + HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); + HDassert( thing ); + + if ( ( ((H5AC_info_t *)thing)->is_dirty == FALSE ) && + ( NULL != cache_ptr->aux_ptr) ) { + + H5AC_info_t * entry_ptr; + + HDassert( ( size_changed == TRUE ) || ( size_changed == FALSE ) ); + + entry_ptr = (H5AC_info_t *)thing; + + if ( ! ( entry_ptr->is_pinned ) ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \ + "Entry isn't pinned??") + } + + if ( entry_ptr->is_protected ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \ + "Entry is protected??") + } + + result = H5AC_log_dirtied_entry(cache_ptr, + entry_ptr, + entry_ptr->addr, + size_changed, + new_size); + + if ( result < 0 ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ + "H5AC_log_dirtied_entry() failed.") + } + } +#endif /* H5_HAVE_PARALLEL */ + + result = H5C_mark_pinned_entry_dirty(cache_ptr, + thing, + size_changed, + new_size); + if ( result < 0 ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \ + "H5C_mark_pinned_entry_dirty() failed.") + + } + +done: + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5AC_mark_pinned_entry_dirty() */ + + +/*------------------------------------------------------------------------- * Function: H5AC_rename * * Purpose: Use this function to notify the cache that an object's @@ -1474,6 +1391,47 @@ done: /*------------------------------------------------------------------------- + * Function: H5AC_pin_protected_entry() + * + * Purpose: Pin a protected cache entry. The entry must be protected + * at the time of call, and must be unpinned. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 4/27/06 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC_pin_protected_entry(H5C_t * cache_ptr, + void * thing) +{ + herr_t result; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5AC_pin_protected_entry, FAIL) + + result = H5C_pin_protected_entry(cache_ptr, thing); + + if ( result < 0 ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, \ + "H5C_pin_protected_entry() failed.") + } + +done: + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5AC_pin_protected_entry() */ + + +/*------------------------------------------------------------------------- * Function: H5AC_protect * * Purpose: If the target entry is not in the cache, load it. If @@ -1579,6 +1537,46 @@ done: /*------------------------------------------------------------------------- + * Function: H5AC_unpin_entry() + * + * Purpose: Unpin a cache entry. The entry must be unprotected at + * the time of call, and must be pinned. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 4/11/06 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC_unpin_entry(H5C_t * cache_ptr, + void * thing) +{ + herr_t result; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5AC_unpin_entry, FAIL) + + result = H5C_unpin_entry(cache_ptr, thing); + + if ( result < 0 ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "H5C_unpin_entry() failed.") + } + +done: + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5AC_unpin_entry() */ + + +/*------------------------------------------------------------------------- * Function: H5AC_unprotect * * Purpose: Undo an H5AC_protect() call -- specifically, mark the @@ -3212,7 +3210,7 @@ H5AC_log_renamed_entry(H5AC_t * cache_ptr, /* get entry status, size, etc here */ if ( H5C_get_entry_status(cache_ptr, old_addr, &entry_size, &entry_in_cache, - &entry_dirty, NULL) < 0 ) { + &entry_dirty, NULL, NULL) < 0 ) { HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get entry status.") diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h new file mode 100644 index 0000000..903eca3 --- /dev/null +++ b/src/H5ACpkg.h @@ -0,0 +1,315 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the files COPYING and Copyright.html. COPYING can be found at the root * + * of the source code distribution tree; Copyright.html can be found at the * + * root level of an installed copy of the electronic HDF5 document set and * + * is linked from the top-level documents page. It can also be found at * + * http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have * + * access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Programmer: John Mainzer -- 4/19/06 + * + * Purpose: This file contains declarations which are normally visible + * only within the H5AC package (just H5AC.c at present). + * + * Source files outside the H5AC package should include + * H5ACprivate.h instead. + * + * The one exception to this rule is testpar/t_cache.c. The + * test code is easier to write if it can look at H5AC_aux_t. + * Indeed, this is the main reason why this file was created. + */ + +#ifndef H5AC_PACKAGE +#error "Do not include this file outside the H5AC package!" +#endif + +#ifndef _H5ACpkg_H +#define _H5ACpkg_H + +#define H5C_PACKAGE /*suppress error about including H5Cpkg */ + +/* Get package's private header */ +#include "H5Cprivate.h" + + +/* Get needed headers */ +#include "H5Cpkg.h" /* Cache */ +#include "H5SLprivate.h" /* Skip lists */ + +#ifdef H5_HAVE_PARALLEL +#include +#endif /* H5_HAVE_PARALLEL */ + + +#define H5AC_DEBUG_DIRTY_BYTES_CREATION 0 + +/*------------------------------------------------------------------------- + * It is a bit difficult to set ranges of allowable values on the + * dirty_bytes_threshold field of H5AC_aux_t. The following are + * probably broader than they should be. + *------------------------------------------------------------------------- + */ + +#define H5AC__MIN_DIRTY_BYTES_THRESHOLD (int32_t) \ + (H5C__MIN_MAX_CACHE_SIZE / 2) +#define H5AC__DEFAULT_DIRTY_BYTES_THRESHOLD (256 * 1024) +#define H5AC__MAX_DIRTY_BYTES_THRESHOLD (int32_t) \ + (H5C__MAX_MAX_CACHE_SIZE / 4) + +/**************************************************************************** + * + * structure H5AC_aux_t + * + * While H5AC has become a wrapper for the cache implemented in H5C.c, there + * are some features of the metadata cache that are specific to it, and which + * therefore do not belong in the more generic H5C cache code. + * + * In particular, there is the matter of synchronizing writes from the + * metadata cache to disk in the PHDF5 case. + * + * Prior to this update, the presumption was that all metadata caches would + * write the same data at the same time since all operations modifying + * metadata must be performed collectively. Given this assumption, it was + * safe to allow only the writes from process 0 to actually make it to disk, + * while metadata writes from all other processes were discarded. + * + * Unfortunately, this presumption is in error as operations that read + * metadata need not be collective, but can change the location of dirty + * entries in the metadata cache LRU lists. This can result in the same + * metadata write operation triggering writes from the metadata caches on + * some processes, but not all (causing a hang), or in different sets of + * entries being written from different caches (potentially resulting in + * metadata corruption in the file). + * + * To deal with this issue, I decided to apply a paradigm shift to the way + * metadata is written to disk. + * + * With this set of changes, only the metadata cache on process 0 is able + * to write metadata to disk, although metadata caches on all other + * processes can read metadata from disk as before. + * + * To keep all the other caches from getting plugged up with dirty metadata, + * process 0 periodically broadcasts a list of entries that it has flushed + * since that last notice, and which are currently clean. The other caches + * mark these entries as clean as well, which allows them to evict the + * entries as needed. + * + * One obvious problem in this approach is synchronizing the broadcasts + * and receptions, as different caches may see different amounts of + * activity. + * + * The current solution is for the caches to track the number of bytes + * of newly generated dirty metadata, and to broadcast and receive + * whenever this value exceeds some user specified threshold. + * + * Maintaining this count is easy for all processes not on process 0 -- + * all that is necessary is to add the size of the entry to the total + * whenever there is an insertion, a rename of a previously clean entry, + * or whever a previously clean entry is marked dirty in an unprotect. + * + * On process 0, we have to be careful not to count dirty bytes twice. + * If an entry is marked dirty, flushed, and marked dirty again, all + * within a single reporting period, it only th first marking should + * be added to the dirty bytes generated tally, as that is all that + * the other processes will see. + * + * At present, this structure exists to maintain the fields needed to + * implement the above scheme, and thus is only used in the parallel + * case. However, other uses may arise in the future. + * + * Instance of this structure are associated with metadata caches via + * the aux_ptr field of H5C_t (see H5Cpkg.h). The H5AC code is + * responsible for allocating, maintaining, and discarding instances + * of H5AC_aux_t. + * + * The remainder of this header comments documents the individual fields + * of the structure. + * + * JRM - 6/27/05 + * + * magic: Unsigned 32 bit integer always set to + * H5AC__H5AC_AUX_T_MAGIC. This field is used to validate + * pointers to instances of H5AC_aux_t. + * + * mpi_comm: MPI communicator associated with the file for which the + * cache has been created. + * + * mpi_rank: MPI rank of this process within mpi_comm. + * + * mpi_size: Number of processes in mpi_comm. + * + * write_permitted: Boolean flag used to control whether the cache + * is permitted to write to file. + * + * dirty_bytes_threshold: Integer field containing the dirty bytes + * generation threashold. Whenever dirty byte creation + * exceeds this value, the metadata cache on process 0 + * broadcasts a list of the entries it has flushed since + * the last broadcast (or since the beginning of execution) + * and which are currently clean (if they are still in the + * cache) + * + * Similarly, metadata caches on processes other than process + * 0 will attempt to receive a list of clean entries whenever + * the threshold is exceeded. + * + * dirty_bytes: Integer field containing the number of bytes of dirty + * metadata generated since the beginning of the computation, + * or (more typically) since the last clean entries list + * broadcast. This field is reset to zero after each such + * broadcast. + * + * dirty_bytes_propagations: This field only exists when the + * H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE. + * + * It is used to track the number of times the cleaned list + * has been propagated from process 0 to the other + * processes. + * + * unprotect_dirty_bytes: This field only exists when the + * H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE. + * + * It is used to track the number of dirty bytes created + * via unprotect operations since the last time the cleaned + * list was propagated. + * + * unprotect_dirty_bytes_updates: This field only exists when the + * H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE. + * + * It is used to track the number of times dirty bytes have + * been created via unprotect operations since the last time + * the cleaned list was propagated. + * + * insert_dirty_bytes: This field only exists when the + * H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE. + * + * It is used to track the number of dirty bytes created + * via insert operations since the last time the cleaned + * list was propagated. + * + * insert_dirty_bytes_updates: This field only exists when the + * H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE. + * + * It is used to track the number of times dirty bytes have + * been created via insert operations since the last time + * the cleaned list was propagated. + * + * rename_dirty_bytes: This field only exists when the + * H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE. + * + * It is used to track the number of dirty bytes created + * via rename operations since the last time the cleaned + * list was propagated. + * + * rename_dirty_bytes_updates: This field only exists when the + * H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE. + * + * It is used to track the number of times dirty bytes have + * been created via rename operations since the last time + * the cleaned list was propagated. + * + * d_slist_ptr: Pointer to an instance of H5SL_t used to maintain a list + * of entries that have been dirtied since the last time they + * were listed in a clean entries broadcast. This list is + * only maintained by the metadata cache on process 0 -- it + * it used to maintain a view of the dirty entries as seen + * by the other caches, so as to keep the dirty bytes count + * in synchronization with them. + * + * Thus on process 0, the dirty_bytes count is incremented + * only if either + * + * 1) an entry is inserted in the metadata cache, or + * + * 2) a previously clean entry is renamed, and it does not + * already appear in the dirty entry list, or + * + * 3) a previously clean entry is unprotected with the + * dirtied flag set and the entry does not already appear + * in the dirty entry list. + * + * Entries are added to the dirty entry list whever they cause + * the dirty bytes count to be increased. They are removed + * when they appear in a clean entries broadcast. Note that + * renames must be reflected in the dirty entry list. + * + * To reitterate, this field is only used on process 0 -- it + * should be NULL on all other processes. + * + * d_slist_len: Integer field containing the number of entries in the + * dirty entry list. This field should always contain the + * value 0 on all processes other than process 0. It exists + * primarily for sanity checking. + * + * c_slist_ptr: Pointer to an instance of H5SL_t used to maintain a list + * of entries that were dirty, have been flushed + * to disk since the last clean entries broadcast, and are + * still clean. Since only process 0 can write to disk, this + * list only exists on process 0. + * + * In essence, this slist is used to assemble the contents of + * the next clean entries broadcast. The list emptied after + * each broadcast. + * + * c_slist_len: Integer field containing the number of entries in the clean + * entries list (*c_slist_ptr). This field should always + * contain the value 0 on all processes other than process 0. + * It exists primarily for sanity checking. + * + ****************************************************************************/ + +#ifdef H5_HAVE_PARALLEL + +#define H5AC__H5AC_AUX_T_MAGIC (unsigned)0x00D0A01 + +typedef struct H5AC_aux_t +{ + uint32_t magic; + + MPI_Comm mpi_comm; + + int mpi_rank; + + int mpi_size; + + hbool_t write_permitted; + + int32_t dirty_bytes_threshold; + + int32_t dirty_bytes; + +#if H5AC_DEBUG_DIRTY_BYTES_CREATION + + int32_t dirty_bytes_propagations; + + int32_t unprotect_dirty_bytes; + int32_t unprotect_dirty_bytes_updates; + + int32_t insert_dirty_bytes; + int32_t insert_dirty_bytes_updates; + + int32_t rename_dirty_bytes; + int32_t rename_dirty_bytes_updates; + +#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */ + + H5SL_t * d_slist_ptr; + + int32_t d_slist_len; + + H5SL_t * c_slist_ptr; + + int32_t c_slist_len; + +} H5AC_aux_t; /* struct H5AC_aux_t */ + +#endif /* H5_HAVE_PARALLEL */ + +#endif /* _H5Cpkg_H */ diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h index 9d89621..a6844e4 100644 --- a/src/H5ACprivate.h +++ b/src/H5ACprivate.h @@ -221,23 +221,45 @@ extern hid_t H5AC_ind_dxpl_id; #define H5AC__DELETED_FLAG H5C__DELETED_FLAG #define H5AC__DIRTIED_FLAG H5C__DIRTIED_FLAG #define H5AC__SIZE_CHANGED_FLAG H5C__SIZE_CHANGED_FLAG +#define H5AC__PIN_ENTRY_FLAG H5C__PIN_ENTRY_FLAG +#define H5AC__UNPIN_ENTRY_FLAG H5C__UNPIN_ENTRY_FLAG #define H5AC__FLUSH_INVALIDATE_FLAG H5C__FLUSH_INVALIDATE_FLAG #define H5AC__FLUSH_CLEAR_ONLY_FLAG H5C__FLUSH_CLEAR_ONLY_FLAG #define H5AC__FLUSH_MARKED_ENTRIES_FLAG H5C__FLUSH_MARKED_ENTRIES_FLAG +/* #defines of flags used to report entry status in the + * H5AC_get_entry_status() call. + */ + +#define H5AC_ES__IN_CACHE 0x0001 +#define H5AC_ES__IS_DIRTY 0x0002 +#define H5AC_ES__IS_PROTECTED 0x0004 +#define H5AC_ES__IS_PINNED 0x0008 + + +/* external function declarations: */ H5_DLL herr_t H5AC_init(void); H5_DLL herr_t H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr); +H5_DLL herr_t H5AC_get_entry_status(H5C_t * cache_ptr, haddr_t addr, + unsigned * status_ptr); H5_DLL herr_t H5AC_set(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr, void *thing, unsigned int flags); -H5_DLL void *H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, - haddr_t addr, const void *udata1, void *udata2, - H5AC_protect_t rw); +H5_DLL herr_t H5AC_pin_protected_entry(H5C_t * cache_ptr, void * thing); +H5_DLL void * H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, + haddr_t addr, const void *udata1, void *udata2, + H5AC_protect_t rw); +H5_DLL herr_t H5AC_unpin_entry(H5C_t * cache_ptr, + void * thing); H5_DLL herr_t H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr, void *thing, unsigned flags); H5_DLL herr_t H5AC_flush(H5F_t *f, hid_t dxpl_id, unsigned flags); +H5_DLL herr_t H5AC_mark_pinned_entry_dirty(H5C_t * cache_ptr, + void * thing, + hbool_t size_changed, + size_t new_size); H5_DLL herr_t H5AC_rename(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t new_addr); diff --git a/src/H5C.c b/src/H5C.c index 120dd7d..fcc9e49 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -143,7 +143,7 @@ * * One could argue that I should have given the epoch markers a positive * size, but this would break the index_size = LRU_list_size + pl_size - * invarient. + * + pel_size invarient. * * Alternatively, I could pass the current decr_mode in to the macro, * and just skip the check whenever epoch markers may be in use. @@ -528,6 +528,11 @@ if ( ( (entry_ptr) == NULL ) || \ * H5C__UPDATE_CACHE_HIT_RATE_STATS(), which is always active as * the cache hit rate stats are always collected and available. * + * Changes: + * + * JRM -- 3/21/06 + * Added / updated macros for pinned entry related stats. + * ***********************************************************************/ #define H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) \ @@ -538,6 +543,9 @@ if ( ( (entry_ptr) == NULL ) || \ #if H5C_COLLECT_CACHE_STATS +#define H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) \ + (((cache_ptr)->dirty_pins)[(entry_ptr)->type->id])++; + #define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \ (((cache_ptr)->insertions)[(entry_ptr)->type->id])++; \ if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ @@ -558,7 +566,11 @@ if ( ( (entry_ptr) == NULL ) || \ if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \ (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ - (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; + (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ + if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ + (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ + if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ + (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; #define H5C__UPDATE_STATS_FOR_RENAME(cache_ptr, entry_ptr) \ (((cache_ptr)->renames)[(entry_ptr)->type->id])++; @@ -591,19 +603,29 @@ if ( ( (entry_ptr) == NULL ) || \ (cache_ptr)->total_failed_ht_search_depth += depth; \ } +#define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) \ + ((cache_ptr)->unpins)[(entry_ptr)->type->id]++; + #if H5C_COLLECT_CACHE_ENTRY_STATS #define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) \ (entry_ptr)->accesses = 0; \ (entry_ptr)->clears = 0; \ - (entry_ptr)->flushes = 0; - -#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \ - (((cache_ptr)->clears)[(entry_ptr)->type->id])++; \ + (entry_ptr)->flushes = 0; \ + (entry_ptr)->pins = 0; + +#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \ + (((cache_ptr)->clears)[(entry_ptr)->type->id])++; \ + if ( (entry_ptr)->is_pinned ) { \ + (((cache_ptr)->pinned_clears)[(entry_ptr)->type->id])++; \ + } \ ((entry_ptr)->clears)++; -#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \ - (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \ +#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \ + (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \ + if ( (entry_ptr)->is_pinned ) { \ + (((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \ + } \ ((entry_ptr)->flushes)++; #define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) \ @@ -633,6 +655,11 @@ if ( ( (entry_ptr) == NULL ) || \ ((cache_ptr)->max_size)[(entry_ptr)->type->id] \ = (entry_ptr)->size; \ } \ + if ( (entry_ptr)->pins > \ + ((cache_ptr)->max_pins)[(entry_ptr)->type->id] ) { \ + ((cache_ptr)->max_pins)[(entry_ptr)->type->id] \ + = (entry_ptr)->pins; \ + } #define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \ if ( hit ) \ @@ -654,15 +681,29 @@ if ( ( (entry_ptr) == NULL ) || \ } \ ((entry_ptr)->accesses)++; +#define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \ + ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ + (entry_ptr)->pins++; \ + if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ + (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ + if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ + (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; + #else /* H5C_COLLECT_CACHE_ENTRY_STATS */ #define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) -#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \ +#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \ + if ( (entry_ptr)->is_pinned ) { \ + (((cache_ptr)->pinned_clears)[(entry_ptr)->type->id])++; \ + } \ (((cache_ptr)->clears)[(entry_ptr)->type->id])++; -#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \ - (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; +#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \ + (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \ + if ( (entry_ptr)->is_pinned ) { \ + (((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \ + } #define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) \ (((cache_ptr)->evictions)[(entry_ptr)->type->id])++; @@ -681,11 +722,19 @@ if ( ( (entry_ptr) == NULL ) || \ if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \ (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; +#define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \ + ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ + if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ + (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ + if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ + (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; + #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ #else /* H5C_COLLECT_CACHE_STATS */ #define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) +#define H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) #define H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) #define H5C__UPDATE_STATS_FOR_RENAME(cache_ptr, entry_ptr) #define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) @@ -697,6 +746,8 @@ if ( ( (entry_ptr) == NULL ) || \ #define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) #define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) #define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) +#define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) +#define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) #endif /* H5C_COLLECT_CACHE_STATS */ @@ -1139,7 +1190,11 @@ if ( ( (cache_ptr) == NULL ) || \ * * Modifications: * - * None. + * JRM -- 3/20/06 + * Modified macro to ignore pinned entries. Pinned entries + * do not appear in the data structures maintained by the + * replacement policy code, and thus this macro has nothing + * to do if called for such an entry. * *------------------------------------------------------------------------- */ @@ -1154,50 +1209,55 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( !((entry_ptr)->is_protected) ); \ HDassert( (entry_ptr)->size > 0 ); \ \ - /* modified LRU specific code */ \ + if ( ! ((entry_ptr)->is_pinned) ) { \ \ - /* remove the entry from the LRU list, and re-insert it at the head. */ \ + /* modified LRU specific code */ \ \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ + /* remove the entry from the LRU list, and re-insert it at the head.\ + */ \ \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ \ - /* Use the dirty flag to infer whether the entry is on the clean or \ - * dirty LRU list, and remove it. Then insert it at the head of the \ - * same LRU list. \ - * \ - * At least initially, all entries should be clean. That may change, \ - * so we may as well deal with both cases now. \ - */ \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ \ - if ( (entry_ptr)->is_dirty ) { \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ + /* Use the dirty flag to infer whether the entry is on the clean or \ + * dirty LRU list, and remove it. Then insert it at the head of \ + * the same LRU list. \ + * \ + * At least initially, all entries should be clean. That may \ + * change, so we may as well deal with both cases now. \ + */ \ \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ - } else { \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ + if ( (entry_ptr)->is_dirty ) { \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - } \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + } else { \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ \ - /* End modified LRU specific code. */ \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ \ + /* End modified LRU specific code. */ \ + } \ } /* H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS */ #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ @@ -1210,20 +1270,25 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( !((entry_ptr)->is_protected) ); \ HDassert( (entry_ptr)->size > 0 ); \ \ - /* modified LRU specific code */ \ + if ( ! ((entry_ptr)->is_pinned) ) { \ \ - /* remove the entry from the LRU list, and re-insert it at the head. */ \ + /* modified LRU specific code */ \ \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ + /* remove the entry from the LRU list, and re-insert it at the head \ + */ \ \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ \ - /* End modified LRU specific code. */ \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ \ + /* End modified LRU specific code. */ \ + } \ } /* H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS */ #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ @@ -1261,6 +1326,10 @@ if ( ( (cache_ptr) == NULL ) || \ * dirty LRU lists, and the other not. Yet another attempt * at optimization. * + * JRM - 3/20/06 + * Pinned entries can't be evicted, so this entry should never + * be called on a pinned entry. Added assert to verify this. + * *------------------------------------------------------------------------- */ @@ -1272,6 +1341,7 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_pinned) ); \ HDassert( (entry_ptr)->size > 0 ); \ \ /* modified LRU specific code */ \ @@ -1311,6 +1381,7 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_pinned) ); \ HDassert( (entry_ptr)->size > 0 ); \ \ /* modified LRU specific code */ \ @@ -1354,10 +1425,16 @@ if ( ( (cache_ptr) == NULL ) || \ * pre-processor, I'll have to remove them. * * JRM - 7/28/04 - * Split macro into two version, one supporting the clean and + * Split macro into two versions, one supporting the clean and * dirty LRU lists, and the other not. Yet another attempt * at optimization. * + * JRM - 3/20/06 + * While pinned entries can be flushed, they don't reside in + * the replacement policy data structures when unprotected. + * Thus I modified this macro to do nothing if the entry is + * pinned. + * *------------------------------------------------------------------------- */ @@ -1371,47 +1448,54 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( !((entry_ptr)->is_protected) ); \ HDassert( (entry_ptr)->size > 0 ); \ \ - /* modified LRU specific code */ \ + if ( ! ((entry_ptr)->is_pinned) ) { \ \ - /* remove the entry from the LRU list, and re-insert it at the head. */ \ + /* modified LRU specific code */ \ \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ + /* remove the entry from the LRU list, and re-insert it at the \ + * head. \ + */ \ \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ \ - /* since the entry is being flushed or cleared, one would think that it \ - * must be dirty -- but that need not be the case. Use the dirty flag \ - * to infer whether the entry is on the clean or dirty LRU list, and \ - * remove it. Then insert it at the head of the clean LRU list. \ - * \ - * The function presumes that a dirty entry will be either cleared or \ - * flushed shortly, so it is OK if we put a dirty entry on the clean \ - * LRU list. \ - */ \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ \ - if ( (entry_ptr)->is_dirty ) { \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ - } else { \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - } \ + /* since the entry is being flushed or cleared, one would think \ + * that it must be dirty -- but that need not be the case. Use the \ + * dirty flag to infer whether the entry is on the clean or dirty \ + * LRU list, and remove it. Then insert it at the head of the \ + * clean LRU list. \ + * \ + * The function presumes that a dirty entry will be either cleared \ + * or flushed shortly, so it is OK if we put a dirty entry on the \ + * clean LRU list. \ + */ \ \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ + if ( (entry_ptr)->is_dirty ) { \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + } else { \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ \ - /* End modified LRU specific code. */ \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ \ + /* End modified LRU specific code. */ \ + } \ } /* H5C__UPDATE_RP_FOR_FLUSH */ #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ @@ -1424,20 +1508,26 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( !((entry_ptr)->is_protected) ); \ HDassert( (entry_ptr)->size > 0 ); \ \ - /* modified LRU specific code */ \ + if ( ! ((entry_ptr)->is_pinned) ) { \ \ - /* remove the entry from the LRU list, and re-insert it at the head. */ \ + /* modified LRU specific code */ \ \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ + /* remove the entry from the LRU list, and re-insert it at the \ + * head. \ + */ \ \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ \ - /* End modified LRU specific code. */ \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ \ + /* End modified LRU specific code. */ \ + } \ } /* H5C__UPDATE_RP_FOR_FLUSH */ #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ @@ -1475,6 +1565,10 @@ if ( ( (cache_ptr) == NULL ) || \ * dirty LRU lists, and the other not. Yet another attempt * at optimization. * + * JRM - 3/10/06 + * This macro should never be called on a pinned entry. + * Inserted an assert to verify this. + * *------------------------------------------------------------------------- */ @@ -1486,6 +1580,7 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_pinned) ); \ HDassert( (entry_ptr)->size > 0 ); \ \ /* modified LRU specific code */ \ @@ -1524,6 +1619,7 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_pinned) ); \ HDassert( (entry_ptr)->size > 0 ); \ \ /* modified LRU specific code */ \ @@ -1577,6 +1673,11 @@ if ( ( (cache_ptr) == NULL ) || \ * dirty LRU lists, and the other not. Yet another attempt * at optimization. * + * JRM - 3/17/06 + * Modified macro to attempt to remove pinned entriese from + * the pinned entry list instead of from the data structures + * maintained by the replacement policy. + * *------------------------------------------------------------------------- */ @@ -1589,38 +1690,49 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ HDassert( (entry_ptr)->size > 0 ); \ + \ + if ( (entry_ptr)->is_pinned ) { \ \ - /* modified LRU specific code */ \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ + (cache_ptr)->pel_tail_ptr, \ + (cache_ptr)->pel_len, \ + (cache_ptr)->pel_size, (fail_val)) \ \ - /* remove the entry from the LRU list. */ \ + } else { \ \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ + /* modified LRU specific code */ \ \ - /* Similarly, remove the entry from the clean or dirty LRU list \ - * as appropriate. \ - */ \ + /* remove the entry from the LRU list. */ \ \ - if ( (entry_ptr)->is_dirty ) { \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ + /* Similarly, remove the entry from the clean or dirty LRU list \ + * as appropriate. \ + */ \ \ - } else { \ + if ( (entry_ptr)->is_dirty ) { \ \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - } \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + \ + } else { \ \ - /* End modified LRU specific code. */ \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ + \ + /* End modified LRU specific code. */ \ + } \ \ - /* Regardless of the replacement policy, now add the entry to the \ - * protected list. \ + /* Regardless of the replacement policy, or whether the entry is \ + * pinned, now add the entry to the protected list. \ */ \ \ H5C__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \ @@ -1638,19 +1750,30 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ HDassert( (entry_ptr)->size > 0 ); \ + \ + if ( (entry_ptr)->is_pinned ) { \ + \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ + (cache_ptr)->pel_tail_ptr, \ + (cache_ptr)->pel_len, \ + (cache_ptr)->pel_size, (fail_val)) \ + \ + } else { \ \ - /* modified LRU specific code */ \ + /* modified LRU specific code */ \ \ - /* remove the entry from the LRU list. */ \ + /* remove the entry from the LRU list. */ \ \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ \ - /* End modified LRU specific code. */ \ + /* End modified LRU specific code. */ \ + } \ \ - /* Regardless of the replacement policy, now add the entry to the \ - * protected list. \ + /* Regardless of the replacement policy, or whether the entry is \ + * pinned, now add the entry to the protected list. \ */ \ \ H5C__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \ @@ -1705,6 +1828,12 @@ if ( ( (cache_ptr) == NULL ) || \ * to allow it to function correctly should that policy * be relaxed in the future. * + * JRM - 3/17/06 + * Modified macro to do nothing if the entry is pinned. + * In this case, the entry is on the pinned entry list, not + * in the replacement policy data structures, so there is + * nothing to be done. + * *------------------------------------------------------------------------- */ @@ -1718,57 +1847,62 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( !((entry_ptr)->is_protected) ); \ HDassert( (entry_ptr)->size > 0 ); \ \ - /* modified LRU specific code */ \ + if ( ! ((entry_ptr)->is_pinned) ) { \ \ - /* remove the entry from the LRU list, and re-insert it at the head. */ \ + /* modified LRU specific code */ \ \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ + /* remove the entry from the LRU list, and re-insert it at the head. \ + */ \ \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ \ - /* remove the entry from either the clean or dirty LUR list as \ - * indicated by the was_dirty parameter \ - */ \ - if ( was_dirty ) { \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ + /* remove the entry from either the clean or dirty LUR list as \ + * indicated by the was_dirty parameter \ + */ \ + if ( was_dirty ) { \ \ - } else { \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - } \ + } else { \ \ - /* insert the entry at the head of either the clean or dirty LRU list \ - * as appropriate. \ - */ \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ \ - if ( (entry_ptr)->is_dirty ) { \ + /* insert the entry at the head of either the clean or dirty LRU \ + * list as appropriate. \ + */ \ \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ + if ( (entry_ptr)->is_dirty ) { \ \ - } else { \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - } \ + } else { \ \ - /* End modified LRU specific code. */ \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ \ + /* End modified LRU specific code. */ \ + } \ } /* H5C__UPDATE_RP_FOR_RENAME */ #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ @@ -1781,20 +1915,25 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( !((entry_ptr)->is_protected) ); \ HDassert( (entry_ptr)->size > 0 ); \ \ - /* modified LRU specific code */ \ + if ( ! ((entry_ptr)->is_pinned) ) { \ \ - /* remove the entry from the LRU list, and re-insert it at the head. */ \ + /* modified LRU specific code */ \ \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ + /* remove the entry from the LRU list, and re-insert it at the head. \ + */ \ \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ \ - /* End modified LRU specific code. */ \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ \ + /* End modified LRU specific code. */ \ + } \ } /* H5C__UPDATE_RP_FOR_RENAME */ #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ @@ -1802,59 +1941,48 @@ if ( ( (cache_ptr) == NULL ) || \ /*------------------------------------------------------------------------- * - * Macro: H5C__UPDATE_RP_FOR_UNPROTECT + * Macro: H5C__UPDATE_RP_FOR_UNPIN * * Purpose: Update the replacement policy data structures for an - * unprotect of the specified cache entry. + * unpin of the specified cache entry. * * To do this, unlink the specified entry from the protected - * list, and re-insert it in the data structures used by the - * current replacement policy. + * entry list, and re-insert it in the data structures used + * by the current replacement policy. * * At present, we only support the modified LRU policy, so * this function deals with that case unconditionally. If - * we ever support other replacement policies, the function + * we ever support other replacement policies, the macro * should switch on the current policy and act accordingly. * * Return: N/A * - * Programmer: John Mainzer, 5/19/04 + * Programmer: John Mainzer, 3/22/06 * * Modifications: * - * JRM - 7/27/04 - * Converted the function H5C_update_rp_for_unprotect() to - * the macro H5C__UPDATE_RP_FOR_UNPROTECT in an effort to - * squeeze a bit more performance out of the cache. - * - * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause dificulties with - * pre-processor, I'll have to remove them. - * - * JRM - 7/28/04 - * Split macro into two version, one supporting the clean and - * dirty LRU lists, and the other not. Yet another attempt - * at optimization. + * None. * *------------------------------------------------------------------------- */ #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS -#define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \ +#define H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \ { \ HDassert( (cache_ptr) ); \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ - HDassert( (entry_ptr)->is_protected); \ + HDassert( ! ((entry_ptr)->is_protected) ); \ + HDassert( (entry_ptr)->is_pinned); \ HDassert( (entry_ptr)->size > 0 ); \ \ /* Regardless of the replacement policy, remove the entry from the \ - * protected list. \ + * pinned entry list. \ */ \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \ - (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \ - (cache_ptr)->pl_size, (fail_val)) \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ + (cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \ + (cache_ptr)->pel_size, (fail_val)) \ \ /* modified LRU specific code */ \ \ @@ -1886,24 +2014,25 @@ if ( ( (cache_ptr) == NULL ) || \ \ /* End modified LRU specific code. */ \ \ -} /* H5C__UPDATE_RP_FOR_UNPROTECT */ +} /* H5C__UPDATE_RP_FOR_UNPIN */ #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -#define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \ +#define H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \ { \ HDassert( (cache_ptr) ); \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ - HDassert( (entry_ptr)->is_protected); \ + HDassert( ! ((entry_ptr)->is_protected) ); \ + HDassert( (entry_ptr)->is_pinned); \ HDassert( (entry_ptr)->size > 0 ); \ \ /* Regardless of the replacement policy, remove the entry from the \ - * protected list. \ + * pinned entry list. \ */ \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \ - (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \ - (cache_ptr)->pl_size, (fail_val)) \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ + (cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \ + (cache_ptr)->pel_size, (fail_val)) \ \ /* modified LRU specific code */ \ \ @@ -1916,6 +2045,151 @@ if ( ( (cache_ptr) == NULL ) || \ \ /* End modified LRU specific code. */ \ \ +} /* H5C__UPDATE_RP_FOR_UNPIN */ + +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + + +/*------------------------------------------------------------------------- + * + * Macro: H5C__UPDATE_RP_FOR_UNPROTECT + * + * Purpose: Update the replacement policy data structures for an + * unprotect of the specified cache entry. + * + * To do this, unlink the specified entry from the protected + * list, and re-insert it in the data structures used by the + * current replacement policy. + * + * At present, we only support the modified LRU policy, so + * this function deals with that case unconditionally. If + * we ever support other replacement policies, the function + * should switch on the current policy and act accordingly. + * + * Return: N/A + * + * Programmer: John Mainzer, 5/19/04 + * + * Modifications: + * + * JRM - 7/27/04 + * Converted the function H5C_update_rp_for_unprotect() to + * the macro H5C__UPDATE_RP_FOR_UNPROTECT in an effort to + * squeeze a bit more performance out of the cache. + * + * At least for the first cut, I am leaving the comments and + * white space in the macro. If they cause dificulties with + * pre-processor, I'll have to remove them. + * + * JRM - 7/28/04 + * Split macro into two version, one supporting the clean and + * dirty LRU lists, and the other not. Yet another attempt + * at optimization. + * + * JRM - 3/17/06 + * Modified macro to put pinned entries on the pinned entry + * list instead of inserting them in the data structures + * maintained by the replacement policy. + * + *------------------------------------------------------------------------- + */ + +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + +#define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( (entry_ptr)->is_protected); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + /* Regardless of the replacement policy, remove the entry from the \ + * protected list. \ + */ \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \ + (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \ + (cache_ptr)->pl_size, (fail_val)) \ + \ + if ( (entry_ptr)->is_pinned ) { \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ + (cache_ptr)->pel_tail_ptr, \ + (cache_ptr)->pel_len, \ + (cache_ptr)->pel_size, (fail_val)) \ + \ + } else { \ + \ + /* modified LRU specific code */ \ + \ + /* insert the entry at the head of the LRU list. */ \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* Similarly, insert the entry at the head of either the clean or \ + * dirty LRU list as appropriate. \ + */ \ + \ + if ( (entry_ptr)->is_dirty ) { \ + \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + \ + } else { \ + \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ + \ + /* End modified LRU specific code. */ \ + } \ + \ +} /* H5C__UPDATE_RP_FOR_UNPROTECT */ + +#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + +#define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( (entry_ptr)->is_protected); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + /* Regardless of the replacement policy, remove the entry from the \ + * protected list. \ + */ \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \ + (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \ + (cache_ptr)->pl_size, (fail_val)) \ + \ + if ( (entry_ptr)->is_pinned ) { \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ + (cache_ptr)->pel_tail_ptr, \ + (cache_ptr)->pel_len, \ + (cache_ptr)->pel_size, (fail_val)) \ + \ + } else { \ + \ + /* modified LRU specific code */ \ + \ + /* insert the entry at the head of the LRU list. */ \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* End modified LRU specific code. */ \ + } \ } /* H5C__UPDATE_RP_FOR_UNPROTECT */ #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ @@ -1974,6 +2248,12 @@ static herr_t H5C_flush_single_entry(H5F_t * f, hbool_t * first_flush_ptr, hbool_t del_entry_from_slist_on_destroy); +static herr_t H5C_flush_invalidate_cache(H5F_t * f, + hid_t primary_dxpl_id, + hid_t secondary_dxpl_id, + H5C_t * cache_ptr, + unsigned flags); + static void * H5C_load_entry(H5F_t * f, hid_t dxpl_id, const H5C_class_t * type, @@ -2176,6 +2456,9 @@ done: * JRM -- 1/20/06 * Added initialization of the new prefix field in H5C_t. * + * JRM -- 3/16/06 + * Added initialization for the pinned entry related fields. + * *------------------------------------------------------------------------- */ @@ -2259,6 +2542,11 @@ H5C_create(size_t max_cache_size, cache_ptr->pl_head_ptr = NULL; cache_ptr->pl_tail_ptr = NULL; + cache_ptr->pel_len = 0; + cache_ptr->pel_size = (size_t)0; + cache_ptr->pel_head_ptr = NULL; + cache_ptr->pel_tail_ptr = NULL; + cache_ptr->LRU_list_len = 0; cache_ptr->LRU_list_size = (size_t)0; cache_ptr->LRU_head_ptr = NULL; @@ -2320,6 +2608,7 @@ H5C_create(size_t max_cache_size, ((cache_ptr->epoch_markers)[i]).type = &epoch_marker_class; ((cache_ptr->epoch_markers)[i]).is_dirty = FALSE; ((cache_ptr->epoch_markers)[i]).is_protected = FALSE; + ((cache_ptr->epoch_markers)[i]).is_pinned = FALSE; ((cache_ptr->epoch_markers)[i]).in_slist = FALSE; ((cache_ptr->epoch_markers)[i]).ht_next = NULL; ((cache_ptr->epoch_markers)[i]).ht_prev = NULL; @@ -2331,6 +2620,7 @@ H5C_create(size_t max_cache_size, ((cache_ptr->epoch_markers)[i]).accesses = 0; ((cache_ptr->epoch_markers)[i]).clears = 0; ((cache_ptr->epoch_markers)[i]).flushes = 0; + ((cache_ptr->epoch_markers)[i]).pins = 0; #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ } @@ -2721,12 +3011,15 @@ done: * and then restoring LRU order. * * However, it is possible that the cache will contain other, - * unmarked entries, when we make this call. This new flag - * allows us to ignore this. + * unmarked protected entries, when we make this call. This + * new flag allows us to ignore them. * * Note that even with this flag set, it is still an error * to try to flush a protected entry. * + * JRM -- 3/25/065 + * Updated function to handle pinned entries. + * *------------------------------------------------------------------------- */ herr_t @@ -2744,7 +3037,6 @@ H5C_flush_cache(H5F_t * f, hbool_t ignore_protected; hbool_t tried_to_flush_protected_entry = FALSE; int32_t protected_entries = 0; - int32_t i; H5SL_node_t * node_ptr = NULL; H5C_cache_entry_t * entry_ptr = NULL; #if H5C_DO_SANITY_CHECKS @@ -2773,146 +3065,75 @@ H5C_flush_cache(H5F_t * f, HDassert( ! ( destroy && ignore_protected ) ); - if ( ( destroy ) && ( cache_ptr->epoch_markers_active > 0 ) ) { + if ( destroy ) { - status = H5C__autoadjust__ageout__remove_all_markers(cache_ptr); + status = H5C_flush_invalidate_cache(f, + primary_dxpl_id, + secondary_dxpl_id, + cache_ptr, + flags); - if ( status != SUCCEED ) { + if ( status < 0 ) { - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ - "error removing all epoch markers.") + /* This shouldn't happen -- if it does, we are toast so + * just scream and die. + */ + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \ + "flush invalidate failed.") } - } - - - if ( cache_ptr->slist_len == 0 ) { - - node_ptr = NULL; - HDassert( cache_ptr->slist_size == 0 ); - } else { - node_ptr = H5SL_first(cache_ptr->slist_ptr); - -#if H5C_DO_SANITY_CHECKS - /* H5C_flush_single_entry() now removes dirty entries from the - * slist as it flushes them. Thus for sanity checks we must - * make note of the initial slist length and size before we - * do any flushes. - */ - initial_slist_len = cache_ptr->slist_len; - initial_slist_size = cache_ptr->slist_size; -#endif /* H5C_DO_SANITY_CHECKS */ - - } + if ( cache_ptr->slist_len == 0 ) { - while ( node_ptr != NULL ) - { - entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); + node_ptr = NULL; + HDassert( cache_ptr->slist_size == 0 ); - /* increment node pointer now, before we delete its target - * from the slist. - */ - node_ptr = H5SL_next(node_ptr); + } else { - HDassert( entry_ptr != NULL ); - HDassert( entry_ptr->in_slist ); + node_ptr = H5SL_first(cache_ptr->slist_ptr); #if H5C_DO_SANITY_CHECKS - actual_slist_len++; - actual_slist_size += entry_ptr->size; + /* H5C_flush_single_entry() now removes dirty entries from the + * slist as it flushes them. Thus for sanity checks we must + * make note of the initial slist length and size before we + * do any flushes. + */ + initial_slist_len = cache_ptr->slist_len; + initial_slist_size = cache_ptr->slist_size; #endif /* H5C_DO_SANITY_CHECKS */ - if ( ( ! flush_marked_entries ) || ( entry_ptr->flush_marker ) ) { - - if ( entry_ptr->is_protected ) { - - /* we probably have major problems -- but lets flush - * everything we can before we decide whether to flag - * an error. - */ - tried_to_flush_protected_entry = TRUE; - protected_entries++; - - } else { - - status = H5C_flush_single_entry(f, - primary_dxpl_id, - secondary_dxpl_id, - cache_ptr, - NULL, - entry_ptr->addr, - flags, - &first_flush, - FALSE); - if ( status < 0 ) { - - /* This shouldn't happen -- if it does, we are toast so - * just scream and die. - */ - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \ - "Can't flush entry.") - } - } } - } /* while */ -#if H5C_DO_SANITY_CHECKS - HDassert( actual_slist_len == initial_slist_len ); - HDassert( actual_slist_size == initial_slist_size ); - - if ( (flags & H5C__FLUSH_INVALIDATE_FLAG) != 0 ) { + while ( node_ptr != NULL ) + { + entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - HDassert( cache_ptr->slist_len == initial_slist_len ); - HDassert( cache_ptr->slist_size == initial_slist_size ); + /* increment node pointer now, before we delete its target + * from the slist. + */ + node_ptr = H5SL_next(node_ptr); - } else if ( ! flush_marked_entries ) { + HDassert( entry_ptr != NULL ); + HDassert( entry_ptr->in_slist ); - HDassert( cache_ptr->slist_len == 0 ); - HDassert( cache_ptr->slist_size == 0 ); - } +#if H5C_DO_SANITY_CHECKS + actual_slist_len++; + actual_slist_size += entry_ptr->size; #endif /* H5C_DO_SANITY_CHECKS */ - if ( destroy ) { - - if(cache_ptr->slist_ptr) { - - /* Release all nodes from skip list, but keep list active */ - H5SL_release(cache_ptr->slist_ptr); - - } - cache_ptr->slist_len = 0; - cache_ptr->slist_size = 0; - - /* Since we are doing a destroy, we must make a pass through - * the hash table and flush all entries that remain. Note that - * all remaining entries entries must be clean, so this will - * not result in any writes to disk. - */ - for ( i = 0; i < H5C__HASH_TABLE_LEN; i++ ) - { - while ( cache_ptr->index[i] ) - { - entry_ptr = cache_ptr->index[i]; + if ( ( ! flush_marked_entries ) || ( entry_ptr->flush_marker ) ) { if ( entry_ptr->is_protected ) { - /* we have major problems -- but lets flush and destroy - * everything we can before we flag an error. + /* we probably have major problems -- but lets flush + * everything we can before we decide whether to flag + * an error. */ + tried_to_flush_protected_entry = TRUE; + protected_entries++; - H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr) - - if ( !entry_ptr->in_slist ) { - - protected_entries++; - HDassert( !(entry_ptr->is_dirty) ); - } } else { - HDassert( !(entry_ptr->is_dirty) ); - HDassert( !(entry_ptr->in_slist) ); - status = H5C_flush_single_entry(f, primary_dxpl_id, secondary_dxpl_id, @@ -2932,42 +3153,28 @@ H5C_flush_cache(H5F_t * f, } } } - } - - HDassert( protected_entries == cache_ptr->pl_len ); - - if ( protected_entries > 0 ) - { - /* the caller asked us to flush and destroy a cache that - * contains one or more protected entries. Since we can't - * flush protected entries, we haven't destroyed them either. - * Since they are all on the protected list, just re-insert - * them into the cache before we flag an error. - */ - entry_ptr = cache_ptr->pl_head_ptr; + } /* while */ - while ( entry_ptr != NULL ) - { - entry_ptr->in_slist = FALSE; - - H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL) +#if H5C_DO_SANITY_CHECKS + HDassert( actual_slist_len == initial_slist_len ); + HDassert( actual_slist_size == initial_slist_size ); - if ( entry_ptr->is_dirty ) { + if ( ! flush_marked_entries ) { - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr) - } - entry_ptr = entry_ptr->next; - } + HDassert( cache_ptr->slist_len == 0 ); + HDassert( cache_ptr->slist_size == 0 ); } - } +#endif /* H5C_DO_SANITY_CHECKS */ - HDassert( protected_entries <= cache_ptr->pl_len ); + HDassert( protected_entries <= cache_ptr->pl_len ); - if ( ( ( cache_ptr->pl_len > 0 ) && ( !ignore_protected ) ) - || - ( tried_to_flush_protected_entry ) ) { + if ( ( ( cache_ptr->pl_len > 0 ) && ( !ignore_protected ) ) + || + ( tried_to_flush_protected_entry ) ) { - HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "cache has protected items") + HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, \ + "cache has protected items") + } } done: @@ -3395,6 +3602,9 @@ done: * * Modifications: * + * JRM -- 4/26/06 + * Added the is_pinned_ptr parameter and supporting code. + * *------------------------------------------------------------------------- */ @@ -3404,7 +3614,8 @@ H5C_get_entry_status(H5C_t * cache_ptr, size_t * size_ptr, hbool_t * in_cache_ptr, hbool_t * is_dirty_ptr, - hbool_t * is_protected_ptr) + hbool_t * is_protected_ptr, + hbool_t * is_pinned_ptr) { herr_t ret_value = SUCCEED; /* Return value */ H5C_cache_entry_t * entry_ptr = NULL; @@ -3451,6 +3662,11 @@ H5C_get_entry_status(H5C_t * cache_ptr, *is_protected_ptr = entry_ptr->is_protected; } + + if ( is_pinned_ptr != NULL ) { + + *is_pinned_ptr = entry_ptr->is_pinned; + } } done: @@ -3512,6 +3728,10 @@ done: * Added support for the new write_permitted field of * the H5C_t structure. * + * JRM -- 3/16/06 + * Added initialization for the new is_pinned field of the + * H5C_cache_entry_t structure. + * *------------------------------------------------------------------------- */ @@ -3693,6 +3913,8 @@ H5C_insert_entry(H5F_t * f, entry_ptr->is_protected = FALSE; + entry_ptr->is_pinned = FALSE; + H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL) /* New entries are presumed to be dirty, so this if statement is @@ -3776,6 +3998,9 @@ done: * Leave the old code in place for now (commented out) for * benchmarking. * + * JRM -- 4/13/06 + * Updated function to deal with pinned entries. + * *------------------------------------------------------------------------- */ @@ -3796,6 +4021,9 @@ H5C_mark_entries_as_clean(H5F_t * f, int initial_list_len; haddr_t addr; #if H5C_DO_SANITY_CHECKS + int pinned_entries_marked = 0; + int protected_entries_marked = 0; + int other_entries_marked = 0; haddr_t last_addr; #endif /* H5C_DO_SANITY_CHECKS */ H5C_cache_entry_t * clear_ptr = NULL; @@ -3895,17 +4123,31 @@ H5C_mark_entries_as_clean(H5F_t * f, HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.") } } -#else /* modified code -- commented out for now */ +#else /* modified code */ } else { /* Mark the entry to be cleared on unprotect. We will * scan the LRU list shortly, and clear all those entries * not currently protected. */ entry_ptr->clear_on_unprotect = TRUE; +#if H5C_DO_SANITY_CHECKS + if ( entry_ptr->is_protected ) { + + protected_entries_marked++; + + } else if ( entry_ptr->is_pinned ) { + + pinned_entries_marked++; + + } else { + + other_entries_marked++; + } +#endif /* H5C_DO_SANITY_CHECKS */ } #endif /* end modified code */ } -#if 1 /* modified code -- commented out for now */ +#if 1 /* modified code */ /* Scan through the LRU list from back to front, and flush the * entries whose clear_on_unprotect flags are set. Observe that * any protected entries will not be on the LRU, and therefore @@ -3947,6 +4189,48 @@ H5C_mark_entries_as_clean(H5F_t * f, entries_examined++; } +#if H5C_DO_SANITY_CHECKS + HDassert( entries_cleared == other_entries_marked ); +#endif /* H5C_DO_SANITY_CHECKS */ + + /* It is also possible that some of the cleared entries are on the + * pinned list. Must scan that also. + */ + + entry_ptr = cache_ptr->pel_head_ptr; + + while ( entry_ptr != NULL ) + { + if ( entry_ptr->clear_on_unprotect ) { + + entry_ptr->clear_on_unprotect = FALSE; + clear_ptr = entry_ptr; + entry_ptr = entry_ptr->next; + entries_cleared++; + + if ( H5C_flush_single_entry(f, + primary_dxpl_id, + secondary_dxpl_id, + cache_ptr, + clear_ptr->type, + clear_ptr->addr, + H5C__FLUSH_CLEAR_ONLY_FLAG, + &first_flush, + TRUE) < 0 ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.") + } + } else { + + entry_ptr = entry_ptr->next; + } + } + +#if H5C_DO_SANITY_CHECKS + HDassert( entries_cleared == pinned_entries_marked + other_entries_marked ); + HDassert( entries_cleared + protected_entries_marked == ce_array_len ); +#endif /* H5C_DO_SANITY_CHECKS */ + HDassert( ( entries_cleared == ce_array_len ) || ( (ce_array_len - entries_cleared) <= cache_ptr->pl_len ) ); @@ -3963,7 +4247,7 @@ H5C_mark_entries_as_clean(H5F_t * f, } HDassert( (entries_cleared + i) == ce_array_len ); #endif /* H5C_DO_SANITY_CHECKS */ -#endif /* modified code -- commented out for now */ +#endif /* modified code */ done: @@ -3983,6 +4267,103 @@ done: /*------------------------------------------------------------------------- + * Function: H5C_mark_pinned_entry_dirty + * + * Purpose: Mark a pinned entry as dirty. The target entry MUST be + * be pinned, and MUST be unprotected. + * + * If the entry has changed size, the function updates + * data structures for the size change. + * + * If the entry is not already dirty, the function places + * the entry on the skip list. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 3/22/06 + * + * Modifications: + * + * None + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_mark_pinned_entry_dirty(H5C_t * cache_ptr, + void * thing, + hbool_t size_changed, + size_t new_size) +{ + herr_t ret_value = SUCCEED; /* Return value */ + H5C_cache_entry_t * entry_ptr; + + FUNC_ENTER_NOAPI(H5C_mark_pinned_entry_dirty, FAIL) + + HDassert( cache_ptr ); + HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); + HDassert( thing ); + HDassert( ( size_changed == TRUE ) || ( size_changed == FALSE ) ); + + entry_ptr = (H5C_cache_entry_t *)thing; + + if ( ! ( entry_ptr->is_pinned ) ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \ + "Entry isn't pinned??") + } + + if ( entry_ptr->is_protected ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \ + "Entry is protected??") + } + + /* mark the entry as dirty if it isn't already */ + entry_ptr->is_dirty = TRUE; + + /* update for change in entry size if necessary */ + if ( ( size_changed ) && ( entry_ptr->size != new_size ) ) { + + /* update the protected entry list */ + H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pel_len), \ + (cache_ptr->pel_size), \ + (entry_ptr->size), (new_size)); + + /* update the hash table */ + H5C__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size),\ + (new_size)); + + /* if the entry is in the skip list, update that too */ + if ( entry_ptr->in_slist ) { + + H5C__UPDATE_SLIST_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size),\ + (new_size)); + } + + /* update statistics just before changing the entry size */ + H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE((cache_ptr), (entry_ptr), \ + (new_size)); + + /* finally, update the entry size proper */ + entry_ptr->size = new_size; + } + + if ( ! (entry_ptr->in_slist) ) { + + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr) + } + + H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) + +done: + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5C_mark_pinned_entry_dirty() */ + + +/*------------------------------------------------------------------------- * * Function: H5C_rename_entry * @@ -4005,6 +4386,12 @@ done: * moving management of the is_dirty field of * H5C_cache_entry_t into the H5C code. * + * JRM -- 4/3/06 + * Updated function to disallow renaming of pinned entries. + * + * JRM -- 4/27/06 + * Updated function to support renaming of pinned entries. + * *------------------------------------------------------------------------- */ @@ -4039,14 +4426,20 @@ H5C_rename_entry(H5C_t * cache_ptr, H5C__SEARCH_INDEX(cache_ptr, old_addr, entry_ptr, FAIL) - if ( ( entry_ptr == NULL ) || ( entry_ptr->type != type ) ) + if ( ( entry_ptr == NULL ) || ( entry_ptr->type != type ) ) { /* the old item doesn't exist in the cache, so we are done. */ HGOTO_DONE(SUCCEED) + } HDassert( entry_ptr->addr == old_addr ); HDassert( entry_ptr->type == type ); - HDassert( !(entry_ptr->is_protected) ); + + if ( entry_ptr->is_protected ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTRENAME, FAIL, \ + "Target entry is protected.") + } H5C__SEARCH_INDEX(cache_ptr, new_addr, test_entry_ptr, FAIL) @@ -4091,14 +4484,7 @@ H5C_rename_entry(H5C_t * cache_ptr, H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL) - /* remove this if statement once this set of mods - * is up and running. -- JRM - */ - - if ( entry_ptr->is_dirty ) { - - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr) - } + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr) H5C__UPDATE_RP_FOR_RENAME(cache_ptr, entry_ptr, was_dirty, FAIL) @@ -4116,7 +4502,64 @@ done: FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_rename_entry() */ +} /* H5C_rename_entry() */ + + +/*------------------------------------------------------------------------- + * Function: H5C_pin_protected_entry() + * + * Purpose: Pin a protected cache entry. The entry must be protected + * at the time of call, and must be unpinned. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 4/26/06 + * + * Modifications: + * + * JRM -- 4/26/06 + * Modified routine to allow it to operate on protected + * entries. + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_pin_protected_entry(H5C_t * cache_ptr, + void * thing) +{ + herr_t ret_value = SUCCEED; /* Return value */ + H5C_cache_entry_t * entry_ptr; + + FUNC_ENTER_NOAPI(H5C_pin_protected_entry, FAIL) + + HDassert( cache_ptr ); + HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); + HDassert( thing ); + + entry_ptr = (H5C_cache_entry_t *)thing; + + HDassert( H5F_addr_defined(entry_ptr->addr) ); + + if ( ! ( entry_ptr->is_protected ) ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Entry isn't protected") + } + + if ( entry_ptr->is_pinned ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Entry is already pinned") + } + + entry_ptr->is_pinned = TRUE; + + H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) + +done: + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5C_pin_protected_entry() */ /*------------------------------------------------------------------------- @@ -4242,7 +4685,7 @@ H5C_protect(H5F_t * f, entry_ptr = (H5C_cache_entry_t *)thing; - /* try to free up some space if necessay */ + /* try to free up some space if necessary */ if ( (cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size ) { @@ -4851,6 +5294,9 @@ done: * Added code to use the prefix field of H5C_t to allow * tagging of statistics output. * + * JRM -- 3/21/06 + * Added code supporting the pinned entry related stats. + * *------------------------------------------------------------------------- */ @@ -4876,11 +5322,17 @@ H5C_stats(H5C_t * cache_ptr, int64_t total_renames = 0; int64_t total_size_increases = 0; int64_t total_size_decreases = 0; + int64_t total_pins = 0; + int64_t total_unpins = 0; + int64_t total_dirty_pins = 0; + int64_t total_pinned_flushes = 0; + int64_t total_pinned_clears = 0; int32_t aggregate_max_accesses = 0; int32_t aggregate_min_accesses = 1000000; int32_t aggregate_max_clears = 0; int32_t aggregate_max_flushes = 0; size_t aggregate_max_size = 0; + int32_t aggregate_max_pins = 0; double hit_rate; double average_successful_search_depth = 0.0; double average_failed_search_depth = 0.0; @@ -4911,6 +5363,11 @@ H5C_stats(H5C_t * cache_ptr, total_renames += cache_ptr->renames[i]; total_size_increases += cache_ptr->size_increases[i]; total_size_decreases += cache_ptr->size_decreases[i]; + total_pins += cache_ptr->pins[i]; + total_unpins += cache_ptr->unpins[i]; + total_dirty_pins += cache_ptr->dirty_pins[i]; + total_pinned_flushes += cache_ptr->pinned_flushes[i]; + total_pinned_clears += cache_ptr->pinned_clears[i]; #if H5C_COLLECT_CACHE_ENTRY_STATS if ( aggregate_max_accesses < cache_ptr->max_accesses[i] ) aggregate_max_accesses = cache_ptr->max_accesses[i]; @@ -4924,6 +5381,8 @@ H5C_stats(H5C_t * cache_ptr, aggregate_max_flushes = cache_ptr->max_flushes[i]; if ( aggregate_max_size < cache_ptr->max_size[i] ) aggregate_max_size = cache_ptr->max_size[i]; + if ( aggregate_max_pins < cache_ptr->max_pins[i] ) + aggregate_max_pins = cache_ptr->max_pins[i]; #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ } @@ -4998,6 +5457,14 @@ H5C_stats(H5C_t * cache_ptr, (long)(cache_ptr->max_pl_len)); HDfprintf(stdout, + "%s current (max) PEL size / length = %ld (%ld) / %ld (%ld)\n", + cache_ptr->prefix, + (long)(cache_ptr->pel_size), + (long)(cache_ptr->max_pel_size), + (long)(cache_ptr->pel_len), + (long)(cache_ptr->max_pel_len)); + + HDfprintf(stdout, "%s current LRU list size / length = %ld / %ld\n", cache_ptr->prefix, (long)(cache_ptr->LRU_list_size), @@ -5039,6 +5506,18 @@ H5C_stats(H5C_t * cache_ptr, (long)total_size_increases, (long)total_size_decreases); + HDfprintf(stdout, + "%s Total entry pins (dirty) / unpins = %ld (%ld) / %ld\n", + cache_ptr->prefix, + (long)total_pins, + (long)total_dirty_pins, + (long)total_unpins); + + HDfprintf(stdout, "%s Total pinned flushes / clears = %ld / %ld\n", + cache_ptr->prefix, + (long)total_pinned_flushes, + (long)total_pinned_clears); + #if H5C_COLLECT_CACHE_ENTRY_STATS HDfprintf(stdout, "%s aggregate max / min accesses = %d / %d\n", @@ -5051,10 +5530,10 @@ H5C_stats(H5C_t * cache_ptr, (int)aggregate_max_clears, (int)aggregate_max_flushes); - HDfprintf(stdout, "%s aggregate max_size = %d\n", + HDfprintf(stdout, "%s aggregate max_size / max_pins = %d / %d\n", cache_ptr->prefix, - (int)aggregate_max_size); - + (int)aggregate_max_size, + (int)aggregate_max_pins); #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ @@ -5103,6 +5582,18 @@ H5C_stats(H5C_t * cache_ptr, (long)(cache_ptr->size_increases[i]), (long)(cache_ptr->size_decreases[i])); + HDfprintf(stdout, + "%s entry pins / unpins = %ld / %ld\n", + cache_ptr->prefix, + (long)(cache_ptr->pins[i]), + (long)(cache_ptr->unpins[i])); + + HDfprintf(stdout, + "%s entry dirty pins/pin'd flushes = %ld / %ld\n", + cache_ptr->prefix, + (long)(cache_ptr->dirty_pins[i]), + (long)(cache_ptr->pinned_flushes[i])); + #if H5C_COLLECT_CACHE_ENTRY_STATS HDfprintf(stdout, @@ -5118,9 +5609,10 @@ H5C_stats(H5C_t * cache_ptr, cache_ptr->max_flushes[i]); HDfprintf(stdout, - "%s entry max_size = %d\n", + "%s entry max_size / max_pins = %d / %d\n", cache_ptr->prefix, - (int)(cache_ptr->max_size[i])); + (int)(cache_ptr->max_size[i]), + (int)(cache_ptr->max_pins[i])); #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ @@ -5156,6 +5648,9 @@ done: * JRM - 9/8/05 * Updated for size increase / decrease statistics. * + * JRM - 3/20/06 + * Updated for pin / unpin related statistics. + * *------------------------------------------------------------------------- */ @@ -5179,6 +5674,11 @@ H5C_stats__reset(H5C_t * cache_ptr) cache_ptr->flushes[i] = 0; cache_ptr->evictions[i] = 0; cache_ptr->renames[i] = 0; + cache_ptr->pins[i] = 0; + cache_ptr->unpins[i] = 0; + cache_ptr->dirty_pins[i] = 0; + cache_ptr->pinned_flushes[i] = 0; + cache_ptr->pinned_clears[i] = 0; cache_ptr->size_increases[i] = 0; cache_ptr->size_decreases[i] = 0; } @@ -5199,6 +5699,9 @@ H5C_stats__reset(H5C_t * cache_ptr) cache_ptr->max_pl_len = 0; cache_ptr->max_pl_size = (size_t)0; + cache_ptr->max_pel_len = 0; + cache_ptr->max_pel_size = (size_t)0; + #if H5C_COLLECT_CACHE_ENTRY_STATS for ( i = 0; i <= cache_ptr->max_type_id; i++ ) @@ -5208,6 +5711,7 @@ H5C_stats__reset(H5C_t * cache_ptr) cache_ptr->max_clears[i] = 0; cache_ptr->max_flushes[i] = 0; cache_ptr->max_size[i] = (size_t)0; + cache_ptr->max_pins[i] = 0; } #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ @@ -5219,6 +5723,61 @@ H5C_stats__reset(H5C_t * cache_ptr) /*------------------------------------------------------------------------- + * Function: H5C_unpin_entry() + * + * Purpose: Unpin a cache entry. The entry must be unprotected at + * the time of call, and must be pinned. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 3/22/06 + * + * Modifications: + * + * JRM -- 4/26/06 + * Modified routine to allow it to operate on protected + * entries. + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_unpin_entry(H5C_t * cache_ptr, + void * thing) +{ + herr_t ret_value = SUCCEED; /* Return value */ + H5C_cache_entry_t * entry_ptr; + + FUNC_ENTER_NOAPI(H5C_unpin_entry, FAIL) + + HDassert( cache_ptr ); + HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); + HDassert( thing ); + + entry_ptr = (H5C_cache_entry_t *)thing; + + if ( ! ( entry_ptr->is_pinned ) ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Entry isn't pinned") + } + + if ( ! ( entry_ptr->is_protected ) ) { + + H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, FAIL) + } + + entry_ptr->is_pinned = FALSE; + + H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) + +done: + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5C_unpin_entry() */ + + +/*------------------------------------------------------------------------- * Function: H5C_unprotect * * Purpose: Undo an H5C_protect() call -- specifically, mark the @@ -5294,6 +5853,10 @@ H5C_stats__reset(H5C_t * cache_ptr) * JRM -- 9/23/05 * Moved the size_changed parameter into flags. * + * JRM -- 3/21/06 + * Unpdated function to pin and unpin entries as directed via + * the new H5C__PIN_ENTRY_FLAG and H5C__UNPIN_ENTRY_FLAG flags. + * *------------------------------------------------------------------------- */ herr_t @@ -5311,6 +5874,8 @@ H5C_unprotect(H5F_t * f, hbool_t dirtied; hbool_t set_flush_marker; hbool_t size_changed; + hbool_t pin_entry; + hbool_t unpin_entry; #ifdef H5_HAVE_PARALLEL hbool_t clear_entry = FALSE; #endif /* H5_HAVE_PARALLEL */ @@ -5324,6 +5889,8 @@ H5C_unprotect(H5F_t * f, dirtied = ( (flags & H5C__DIRTIED_FLAG) != 0 ); set_flush_marker = ( (flags & H5C__SET_FLUSH_MARKER_FLAG) != 0 ); size_changed = ( (flags & H5C__SIZE_CHANGED_FLAG) != 0 ); + pin_entry = ( (flags & H5C__PIN_ENTRY_FLAG) != 0 ); + unpin_entry = ( (flags & H5C__UNPIN_ENTRY_FLAG) != 0 ); HDassert( cache_ptr ); HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); @@ -5336,6 +5903,7 @@ H5C_unprotect(H5F_t * f, HDassert( ( size_changed == TRUE ) || ( size_changed == FALSE ) ); HDassert( ( ! size_changed ) || ( dirtied ) ); HDassert( ( ! size_changed ) || ( new_size > 0 ) ); + HDassert( ! ( pin_entry && unpin_entry ) ); entry_ptr = (H5C_cache_entry_t *)thing; @@ -5415,6 +5983,32 @@ H5C_unprotect(H5F_t * f, entry_ptr->size = new_size; } + /* Pin or unpin the entry as requested. */ + if ( pin_entry ) { + + if ( entry_ptr->is_pinned ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, \ + "Entry already pinned???") + } + entry_ptr->is_pinned = TRUE; + H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) + + } else if ( unpin_entry ) { + + if ( ! ( entry_ptr->is_pinned ) ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, \ + "Entry already unpinned???") + } + entry_ptr->is_pinned = FALSE; + H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) + + } + + /* H5C__UPDATE_RP_FOR_UNPROTECT will places the unprotected entry on + * the pinned entry list if entry_ptr->is_pined is TRUE. + */ H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, FAIL) entry_ptr->is_protected = FALSE; @@ -5451,6 +6045,9 @@ H5C_unprotect(H5F_t * f, */ hbool_t dummy_first_flush = TRUE; + /* we can't delete a pinned entry */ + HDassert ( ! (entry_ptr->is_pinned ) ); + /* verify that the target entry is in the cache. */ H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) @@ -6837,6 +7434,314 @@ done: /*------------------------------------------------------------------------- + * Function: H5C_flush_invalidate_cache + * + * Purpose: Flush and destroy the entries contained in the target + * cache. + * + * If the cache contains protected entries, the function will + * fail, as protected entries cannot be either flushed or + * destroyed. However all unprotected entries should be + * flushed and destroyed before the function returns failure. + * + * While pinned entries can usually be flushed, they cannot + * be destroyed. However, they should be unpinned when all + * the entries that reference them have been destroyed (thus + * reduding the pinned entry's reference count to 0, allowing + * it to be unpinned). + * + * If pinned entries are present, the function makes repeated + * passes through the cache, flushing all dirty entries + * (including the pinned dirty entries where permitted) and + * destroying all unpinned entries. This process is repeated + * until either the cache is empty, or the number of pinned + * entries stops decreasing on each pass. + * + * The primary_dxpl_id and secondary_dxpl_id parameters + * specify the dxpl_ids used on the first write occasioned + * by the flush (primary_dxpl_id), and on all subsequent + * writes (secondary_dxpl_id). + * + * Return: Non-negative on success/Negative on failure or if there was + * a request to flush all items and something was protected. + * + * Programmer: John Mainzer + * 3/24/065 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_flush_invalidate_cache(H5F_t * f, + hid_t primary_dxpl_id, + hid_t secondary_dxpl_id, + H5C_t * cache_ptr, + unsigned flags) +{ + herr_t status; + herr_t ret_value = SUCCEED; + hbool_t first_flush = TRUE; + hbool_t first_pass = TRUE; + hbool_t have_pinned_entries; + int32_t protected_entries = 0; + int32_t i; + int32_t cur_pel_len; + int32_t old_pel_len; + unsigned cooked_flags; + H5SL_node_t * node_ptr = NULL; + H5C_cache_entry_t * entry_ptr = NULL; + H5C_cache_entry_t * next_entry_ptr = NULL; +#if H5C_DO_SANITY_CHECKS + int32_t actual_slist_len = 0; + int32_t initial_slist_len = 0; + size_t actual_slist_size = 0; + size_t initial_slist_size = 0; +#endif /* H5C_DO_SANITY_CHECKS */ + + FUNC_ENTER_NOAPI(H5C_flush_invalidate_cache, FAIL) + + HDassert( cache_ptr ); + HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); + HDassert( cache_ptr->skip_file_checks || f ); + HDassert( cache_ptr->slist_ptr ); + + /* Filter out the flags that are not relevant to the flush/invalidate. + * At present, only the H5C__FLUSH_CLEAR_ONLY_FLAG is kept. + */ + cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG; + + /* remove ageout markers if present */ + if ( cache_ptr->epoch_markers_active > 0 ) { + + status = H5C__autoadjust__ageout__remove_all_markers(cache_ptr); + + if ( status != SUCCEED ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "error removing all epoch markers.") + } + } + + cur_pel_len = cache_ptr->pel_len; + old_pel_len = cache_ptr->pel_len; + + while ( ( first_pass ) || + ( ( cur_pel_len < old_pel_len ) && ( protected_entries == 0 ) ) ) + { + have_pinned_entries = ( cur_pel_len > 0 ); + + /* first, try to flush-destroy any dirty entries */ + + if ( cache_ptr->slist_len == 0 ) { + + node_ptr = NULL; + HDassert( cache_ptr->slist_size == 0 ); + + } else { + + node_ptr = H5SL_first(cache_ptr->slist_ptr); + +#if H5C_DO_SANITY_CHECKS + /* Depending on circumstances, H5C_flush_single_entry() will + * remove dirty entries from the slist as it flushes them. + * Thus for sanity checks we must make note of the initial + * slist length and size before we do any flushes. + */ + initial_slist_len = cache_ptr->slist_len; + initial_slist_size = cache_ptr->slist_size; +#endif /* H5C_DO_SANITY_CHECKS */ + + } + + while ( node_ptr != NULL ) + { + /* Note that we now remove nodes from the slist as we flush + * the associated entries, instead of leaving them there + * until we are done, and then destroying all nodes in + * the slist. + * + * While this optimization is still easy if everything works, + * the addition of pinned entries and multiple passes + * through the cache to allow entries to unpin themselves + * complicates error recover greatly. + * + * Given these complications, I've decided to ommit this + * this optimization for now. It can be re-implemented + * later if needed. + */ + + entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); + + /* increment node pointer now, before we delete its target + * from the slist. + */ + node_ptr = H5SL_next(node_ptr); + + HDassert( entry_ptr != NULL ); + HDassert( entry_ptr->in_slist ); + +#if H5C_DO_SANITY_CHECKS + actual_slist_len++; + actual_slist_size += entry_ptr->size; +#endif /* H5C_DO_SANITY_CHECKS */ + + if ( entry_ptr->is_protected ) { + + /* we have major problems -- but lets flush + * everything we can before we flag an error. + */ + protected_entries++; + + } else if ( entry_ptr->is_pinned ) { + + /* Test to see if we are can flush the entry now. + * If we can, go ahead and flush, but don't tell + * H5C_flush_single_entry() to destroy the entry + * as pinned entries can't be evicted. + */ + if ( TRUE ) { /* insert test here */ /* JRM */ + + + status = H5C_flush_single_entry(f, + primary_dxpl_id, + secondary_dxpl_id, + cache_ptr, + NULL, + entry_ptr->addr, + H5C__NO_FLAGS_SET, + &first_flush, + TRUE); + if ( status < 0 ) { + + /* This shouldn't happen -- if it does, we are toast + * so just scream and die. + */ + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \ + "dirty pinned entry flush failed.") + } + } + } else { + + status = H5C_flush_single_entry(f, + primary_dxpl_id, + secondary_dxpl_id, + cache_ptr, + NULL, + entry_ptr->addr, + (cooked_flags | + H5C__FLUSH_INVALIDATE_FLAG), + &first_flush, + TRUE); + if ( status < 0 ) { + + /* This shouldn't happen -- if it does, we are toast so + * just scream and die. + */ + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \ + "dirty entry flush destroy failed.") + } + } + } /* end while loop scanning skip list */ + +#if H5C_DO_SANITY_CHECKS + HDassert( actual_slist_len == initial_slist_len ); + HDassert( actual_slist_size == initial_slist_size ); +#endif /* H5C_DO_SANITY_CHECKS */ + + /* Since we are doing a destroy, we must make a pass through + * the hash table and try to flush - destroy all entries that + * remain. Note that all remaining entries entries must be + * clean, so this will not result in any writes to disk. + */ + for ( i = 0; i < H5C__HASH_TABLE_LEN; i++ ) + { + next_entry_ptr = cache_ptr->index[i]; + + while ( next_entry_ptr != NULL ) + { + entry_ptr = next_entry_ptr; + next_entry_ptr = entry_ptr->ht_next; + + if ( entry_ptr->is_protected ) { + + /* we have major problems -- but lets flush and destroy + * everything we can before we flag an error. + */ + + if ( ! entry_ptr->in_slist ) { + + protected_entries++; + HDassert( !(entry_ptr->is_dirty) ); + } + } else if ( ! ( entry_ptr->is_pinned ) ) { + + HDassert( !(entry_ptr->is_dirty) ); + HDassert( !(entry_ptr->in_slist) ); + + status = H5C_flush_single_entry(f, + primary_dxpl_id, + secondary_dxpl_id, + cache_ptr, + NULL, + entry_ptr->addr, + (cooked_flags | + H5C__FLUSH_INVALIDATE_FLAG), + &first_flush, + TRUE); + if ( status < 0 ) { + + /* This shouldn't happen -- if it does, we are toast so + * just scream and die. + */ + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \ + "Clean entry flush destroy failed.") + } + } + /* We can't do anything if the entry is pinned. The + * hope is that the entry will be unpinned as the + * result of destroys of entries that reference it. + * + * We detect this by noting the change in the number + * of pinned entries from pass to pass. If it stops + * shrinking before it hits zero, we scream and die. + */ + } /* end while loop scanning hash table bin */ + } /* end for loop scanning hash table */ + + HDassert( protected_entries == cache_ptr->pl_len ); + + old_pel_len = cur_pel_len; + cur_pel_len = cache_ptr->pel_len; + + first_pass = FALSE; + + } /* main while loop */ + + HDassert( protected_entries <= cache_ptr->pl_len ); + + if ( protected_entries > 0 ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \ + "Cache has protected entries.") + + } else if ( cur_pel_len > 0 ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \ + "Can't unpin all pinned entries.") + + } + +done: + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5C_flush_invalidate_cache() */ + + +/*------------------------------------------------------------------------- * * Function: H5C_flush_single_entry * diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 9a3cdcd..8d9a5a6 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -233,6 +233,55 @@ * This field is NULL if the list is empty. * * + * For very frequently used entries, the protect/unprotect overhead can + * become burdensome. To avoid this overhead, I have modified the cache + * to allow entries to be "pinned". A pinned entry is similar to a + * protected entry, in the sense that it cannot be evicted, and that + * the entry can be modified at any time. + * + * Pinning an entry has the following implications: + * + * 1) A pinned entry cannot be evicted. Thus unprotected + * pinned entries reside in the pinned entry list, instead + * of the LRU list(s) (or other lists maintained by the current + * replacement policy code). + * + * 2) A pinned entry can be accessed or modified at any time. + * Therefore, the cache must check with the entry owner + * before flushing it. If permission is denied, the + * cache just skips the entry in the flush. + * + * 3) A pinned entry can be marked as dirty (and possibly + * change size) while it is unprotected. + * + * 4) The flush-destroy code must allow pinned entries to + * be unpinned (and possibly unprotected) during the + * flush. + * + * Since pinned entries cannot be evicted, they must be kept on a pinned + * entry list, instead of being entrusted to the replacement policy code. + * + * Maintaining the pinned entry list requires the following fields: + * + * pel_len: Number of entries currently residing on the pinned + * entry list. + * + * pel_size: Number of bytes of cache entries currently residing on + * the pinned entry list. + * + * pel_head_ptr: Pointer to the head of the doubly linked list of pinned + * but not protected entries. Note that cache entries on + * this list are linked by their next and prev fields. + * + * This field is NULL if the list is empty. + * + * pel_tail_ptr: Pointer to the tail of the doubly linked list of pinned + * but not protected entries. Note that cache entries on + * this list are linked by their next and prev fields. + * + * This field is NULL if the list is empty. + * + * * The cache must have a replacement policy, and the fields supporting this * policy must be accessible from this structure. * @@ -504,6 +553,32 @@ * id equal to the array index has been renamed in the current * epoch. * + * pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type + * id equal to the array index has been pinned in the current + * epoch. + * + * unpins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type + * id equal to the array index has been unpinned in the current + * epoch. + * + * dirty_pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type + * id equal to the array index has been marked dirty while pinned + * in the current epoch. + * + * pinned_flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The + * cells are used to record the number of times an entry + * with type id equal to the array index has been flushed while + * pinned in the current epoch. + * + * pinned_cleared: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The + * cells are used to record the number of times an entry + * with type id equal to the array index has been cleared while + * pinned in the current epoch. + * + * * size_increases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. * The cells are used to record the number of times an entry * with type id equal to the array index has increased in @@ -552,6 +627,12 @@ * max_pl_size: Largest value attained by the pl_size field in the * current epoch. * + * max_pel_len: Largest value attained by the pel_len field in the + * current epoch. + * + * max_pel_size: Largest value attained by the pel_size field in the + * current epoch. + * * The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS * and H5C_COLLECT_CACHE_ENTRY_STATS are true. * @@ -580,6 +661,11 @@ * with type id equal to the array index that has resided in * the cache in the current epoch. * + * max_pins: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the maximum number of times that any single + * entry with type id equal to the array index that has been + * marked as pinned in the cache in the current epoch. + * * * Fields supporting testing: * @@ -644,6 +730,11 @@ struct H5C_t H5C_cache_entry_t * pl_head_ptr; H5C_cache_entry_t * pl_tail_ptr; + int32_t pel_len; + size_t pel_size; + H5C_cache_entry_t * pel_head_ptr; + H5C_cache_entry_t * pel_tail_ptr; + int32_t LRU_list_len; size_t LRU_list_size; H5C_cache_entry_t * LRU_head_ptr; @@ -687,6 +778,11 @@ struct H5C_t int64_t flushes[H5C__MAX_NUM_TYPE_IDS + 1]; int64_t evictions[H5C__MAX_NUM_TYPE_IDS + 1]; int64_t renames[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t pins[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t unpins[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t dirty_pins[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t pinned_flushes[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t pinned_clears[H5C__MAX_NUM_TYPE_IDS + 1]; int64_t size_increases[H5C__MAX_NUM_TYPE_IDS + 1]; int64_t size_decreases[H5C__MAX_NUM_TYPE_IDS + 1]; @@ -703,10 +799,12 @@ struct H5C_t int32_t max_slist_len; size_t max_slist_size; - int32_t max_pl_len; size_t max_pl_size; + int32_t max_pel_len; + size_t max_pel_size; + #if H5C_COLLECT_CACHE_ENTRY_STATS int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1]; @@ -714,6 +812,7 @@ struct H5C_t int32_t max_clears[H5C__MAX_NUM_TYPE_IDS + 1]; int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS + 1]; size_t max_size[H5C__MAX_NUM_TYPE_IDS + 1]; + int32_t max_pins[H5C__MAX_NUM_TYPE_IDS + 1]; #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index c7d0313..43a93b3 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -243,11 +243,42 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr, * Note that protected entries are removed from the LRU lists * and inserted on the protected list. * + * is_pinned: Boolean flag indicating whether the entry has been pinned + * in the cache. + * + * For very hot entries, the protect / unprotect overhead + * can become excessive. Thus the cache has been extended + * to allow an entry to be "pinned" in the cache. + * + * Pinning an entry in the cache has several implications: + * + * 1) A pinned entry cannot be evicted. Thus unprotected + * pinned entries must be stored in the pinned entry + * list, instead of being managed by the replacement + * policy code (LRU at present). + * + * 2) A pinned entry can be accessed or modified at any time. + * Therefore, the cache must check with the entry owner + * before flushing it. If permission is denied, the + * cache does not flush the entry. + * + * 3) A pinned entry can be marked as dirty (and possibly + * change size) while it is unprotected. + * + * 4) The flush-destroy code must allow pinned entries to + * be unpinned (and possibly unprotected) during the + * flush. + * + * JRM -- 3/16/06 + * * in_slist: Boolean flag indicating whether the entry is in the skip list * As a general rule, entries are placed in the list when they * are marked dirty. However they may remain in the list after * being flushed. * + * Update: Dirty entries are now removed from the skip list + * when they are flushed. + * * flush_marker: Boolean flag indicating that the entry is to be flushed * the next time H5C_flush_cache() is called with the * H5AC__FLUSH_MARKED_ENTRIES_FLAG. The flag is reset when @@ -359,6 +390,9 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr, * flushes: int32_t containing the number of times this cache entry has * been flushed to file in its life time. * + * pins: int32_t containing the number of times this cache entry has + * been pinned in cache in its life time. + * ****************************************************************************/ typedef struct H5C_cache_entry_t @@ -368,6 +402,7 @@ typedef struct H5C_cache_entry_t const H5C_class_t * type; hbool_t is_dirty; hbool_t is_protected; + hbool_t is_pinned; hbool_t in_slist; hbool_t flush_marker; #ifdef H5_HAVE_PARALLEL @@ -393,6 +428,7 @@ typedef struct H5C_cache_entry_t int32_t accesses; int32_t clears; int32_t flushes; + int32_t pins; #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ @@ -708,16 +744,18 @@ typedef struct H5C_auto_size_ctl_t /* These flags applies only to H5C_unprotect() */ #define H5C__DIRTIED_FLAG 0x0004 #define H5C__SIZE_CHANGED_FLAG 0x0008 +#define H5C__PIN_ENTRY_FLAG 0x0010 +#define H5C__UNPIN_ENTRY_FLAG 0x0020 /* These flags apply to H5C_flush_cache() & H5C_flush_single_entry() */ -#define H5C__FLUSH_INVALIDATE_FLAG 0x0010 -#define H5C__FLUSH_CLEAR_ONLY_FLAG 0x0020 -#define H5C__FLUSH_MARKED_ENTRIES_FLAG 0x0040 +#define H5C__FLUSH_INVALIDATE_FLAG 0x0040 +#define H5C__FLUSH_CLEAR_ONLY_FLAG 0x0080 +#define H5C__FLUSH_MARKED_ENTRIES_FLAG 0x0100 /* This flag applies to H5C_flush_cache() only. It is an error to use * it in combination with the H5C__FLUSH_INVALIDATE_FLAG */ -#define H5C__FLUSH_IGNORE_PROTECTED_FLAG 0x0080 +#define H5C__FLUSH_IGNORE_PROTECTED_FLAG 0x0200 H5_DLL H5C_t * H5C_create(size_t max_cache_size, @@ -773,7 +811,8 @@ H5_DLL herr_t H5C_get_entry_status(H5C_t * cache_ptr, size_t * size_ptr, hbool_t * in_cache_ptr, hbool_t * is_dirty_ptr, - hbool_t * is_protected_ptr); + hbool_t * is_protected_ptr, + hbool_t * is_pinned_ptr); H5_DLL herr_t H5C_insert_entry(H5F_t * f, hid_t primary_dxpl_id, @@ -791,11 +830,19 @@ H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t * f, int32_t ce_array_len, haddr_t * ce_array_ptr); +H5_DLL herr_t H5C_mark_pinned_entry_dirty(H5C_t * cache_ptr, + void * thing, + hbool_t size_changed, + size_t new_size); + H5_DLL herr_t H5C_rename_entry(H5C_t * cache_ptr, const H5C_class_t * type, haddr_t old_addr, haddr_t new_addr); +H5_DLL herr_t H5C_pin_protected_entry(H5C_t * cache_ptr, + void * thing); + H5_DLL void * H5C_protect(H5F_t * f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, @@ -822,6 +869,8 @@ H5_DLL herr_t H5C_stats(H5C_t * cache_ptr, H5_DLL void H5C_stats__reset(H5C_t * cache_ptr); +H5_DLL herr_t H5C_unpin_entry(H5C_t * cache_ptr, void * thing); + H5_DLL herr_t H5C_unprotect(H5F_t * f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, diff --git a/src/H5Edefin.h b/src/H5Edefin.h index 1bdd4ba..75dc2ce 100644 --- a/src/H5Edefin.h +++ b/src/H5Edefin.h @@ -144,6 +144,9 @@ hid_t H5E_CANTINS_g = FAIL; /* Unable to insert metadata into cache hid_t H5E_CANTRENAME_g = FAIL; /* Unable to rename metadata */ hid_t H5E_CANTPROTECT_g = FAIL; /* Unable to protect metadata */ hid_t H5E_CANTUNPROTECT_g = FAIL; /* Unable to unprotect metadata */ +hid_t H5E_CANTPIN_g = FAIL; /* Unable to pin cache entry */ +hid_t H5E_CANTUNPIN_g = FAIL; /* Unable to un-pin cache entry */ +hid_t H5E_CANTMARKDIRTY_g = FAIL; /* Unable to mark a pinned entry as dirty */ hid_t H5E_CANTDIRTY_g = FAIL; /* Unable to mark metadata as dirty */ /* Parallel MPI errors */ diff --git a/src/H5Einit.h b/src/H5Einit.h index c35cab5..ee1a5cf 100644 --- a/src/H5Einit.h +++ b/src/H5Einit.h @@ -534,6 +534,21 @@ if((msg = H5E_create_msg(cls, H5E_MINOR, "Unable to unprotect metadata"))==NULL) HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed") if((H5E_CANTUNPROTECT_g = H5I_register(H5I_ERROR_MSG, msg))<0) HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message") +assert(H5E_CANTPIN_g==(-1)); +if((msg = H5E_create_msg(cls, H5E_MINOR, "Unable to pin cache entry"))==NULL) + HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed") +if((H5E_CANTPIN_g = H5I_register(H5I_ERROR_MSG, msg))<0) + HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message") +assert(H5E_CANTUNPIN_g==(-1)); +if((msg = H5E_create_msg(cls, H5E_MINOR, "Unable to un-pin cache entry"))==NULL) + HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed") +if((H5E_CANTUNPIN_g = H5I_register(H5I_ERROR_MSG, msg))<0) + HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message") +assert(H5E_CANTMARKDIRTY_g==(-1)); +if((msg = H5E_create_msg(cls, H5E_MINOR, "Unable to mark a pinned entry as dirty"))==NULL) + HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed") +if((H5E_CANTMARKDIRTY_g = H5I_register(H5I_ERROR_MSG, msg))<0) + HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message") assert(H5E_CANTDIRTY_g==(-1)); if((msg = H5E_create_msg(cls, H5E_MINOR, "Unable to mark metadata as dirty"))==NULL) HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed") diff --git a/src/H5Epubgen.h b/src/H5Epubgen.h index 7d9e284..a05e5ed 100644 --- a/src/H5Epubgen.h +++ b/src/H5Epubgen.h @@ -235,6 +235,9 @@ H5_DLLVAR hid_t H5E_NOIDS_g; /* Out of IDs for group */ #define H5E_CANTRENAME (H5OPEN H5E_CANTRENAME_g) #define H5E_CANTPROTECT (H5OPEN H5E_CANTPROTECT_g) #define H5E_CANTUNPROTECT (H5OPEN H5E_CANTUNPROTECT_g) +#define H5E_CANTPIN (H5OPEN H5E_CANTPIN_g) +#define H5E_CANTUNPIN (H5OPEN H5E_CANTUNPIN_g) +#define H5E_CANTMARKDIRTY (H5OPEN H5E_CANTMARKDIRTY_g) #define H5E_CANTDIRTY (H5OPEN H5E_CANTDIRTY_g) H5_DLLVAR hid_t H5E_CANTFLUSH_g; /* Unable to flush data from cache */ H5_DLLVAR hid_t H5E_CANTSERIALIZE_g; /* Unable to serialize data from cache */ @@ -246,6 +249,9 @@ H5_DLLVAR hid_t H5E_CANTINS_g; /* Unable to insert metadata into cache */ H5_DLLVAR hid_t H5E_CANTRENAME_g; /* Unable to rename metadata */ H5_DLLVAR hid_t H5E_CANTPROTECT_g; /* Unable to protect metadata */ H5_DLLVAR hid_t H5E_CANTUNPROTECT_g; /* Unable to unprotect metadata */ +H5_DLLVAR hid_t H5E_CANTPIN_g; /* Unable to pin cache entry */ +H5_DLLVAR hid_t H5E_CANTUNPIN_g; /* Unable to un-pin cache entry */ +H5_DLLVAR hid_t H5E_CANTMARKDIRTY_g; /* Unable to mark a pinned entry as dirty */ H5_DLLVAR hid_t H5E_CANTDIRTY_g; /* Unable to mark metadata as dirty */ /* Parallel MPI errors */ diff --git a/src/H5Eterm.h b/src/H5Eterm.h index d7afed8..f2968f1 100644 --- a/src/H5Eterm.h +++ b/src/H5Eterm.h @@ -146,6 +146,9 @@ H5E_CANTINS_g= H5E_CANTRENAME_g= H5E_CANTPROTECT_g= H5E_CANTUNPROTECT_g= +H5E_CANTPIN_g= +H5E_CANTUNPIN_g= +H5E_CANTMARKDIRTY_g= H5E_CANTDIRTY_g= /* Parallel MPI errors */ diff --git a/src/H5err.txt b/src/H5err.txt index bd3e06a..014e7a7 100644 --- a/src/H5err.txt +++ b/src/H5err.txt @@ -155,6 +155,9 @@ MINOR, CACHE, H5E_CANTINS, Unable to insert metadata into cache MINOR, CACHE, H5E_CANTRENAME, Unable to rename metadata MINOR, CACHE, H5E_CANTPROTECT, Unable to protect metadata MINOR, CACHE, H5E_CANTUNPROTECT, Unable to unprotect metadata +MINOR, CACHE, H5E_CANTPIN, Unable to pin cache entry +MINOR, CACHE, H5E_CANTUNPIN, Unable to un-pin cache entry +MINOR, CACHE, H5E_CANTMARKDIRTY, Unable to mark a pinned entry as dirty MINOR, CACHE, H5E_CANTDIRTY, Unable to mark metadata as dirty # B-tree related errors diff --git a/test/cache.c b/test/cache.c index 58b2af5..3157435 100644 --- a/test/cache.c +++ b/test/cache.c @@ -42,6 +42,11 @@ static void check_flush_cache__multi_entry_test(H5C_t * cache_ptr, unsigned int flush_flags, int spec_size, struct flush_cache_test_spec spec[]); +static void check_flush_cache__pe_multi_entry_test(H5C_t * cache_ptr, + int test_num, + unsigned int flush_flags, + int spec_size, + struct pe_flush_cache_test_spec spec[]); static void check_flush_cache__single_entry(H5C_t * cache_ptr); static void check_flush_cache__single_entry_test(H5C_t * cache_ptr, int test_num, @@ -55,17 +60,40 @@ static void check_flush_cache__single_entry_test(H5C_t * cache_ptr, hbool_t expected_cleared, hbool_t expected_flushed, hbool_t expected_destroyed); +static void check_flush_cache__pinned_single_entry_test(H5C_t * cache_ptr, + int test_num, + int entry_type, + int entry_idx, + hbool_t dirty_flag, + hbool_t mark_dirty, + hbool_t unprotect_unpin, + unsigned int flags, + unsigned int flush_flags, + hbool_t expected_cleared, + hbool_t expected_flushed, + hbool_t expected_destroyed); static void check_flush_protected_err(void); +static void check_rename_entry(void); +static void check_rename_entry__run_test(H5C_t * cache_ptr, int test_num, + struct rename_entry_test_spec * spec_ptr); +static void check_pin_protected_entry(void); +static void check_destroy_pinned_err(void); static void check_destroy_protected_err(void); static void check_duplicate_insert_err(void); static void check_rename_err(void); +static void check_double_pin_err(void); +static void check_double_unpin_err(void); +static void check_pin_entry_errs(void); +static void check_pin_protected_entry(void); static void check_double_protect_err(void); static void check_double_unprotect_err(void); +static void check_mark_pinned_entry_dirty_errs(void); static void check_auto_cache_resize(void); static void check_auto_cache_resize_disable(void); static void check_auto_cache_resize_epoch_markers(void); static void check_auto_cache_resize_input_errs(void); static void check_auto_cache_resize_aux_fcns(void); +static void check_get_entry_status(void); /**************************************************************************/ @@ -253,9 +281,13 @@ smoke_check_1(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* smoke_check_1() */ @@ -440,9 +472,13 @@ smoke_check_2(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* smoke_check_2() */ @@ -626,9 +662,13 @@ smoke_check_3(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* smoke_check_3() */ @@ -813,9 +853,13 @@ smoke_check_4(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* smoke_check_4() */ @@ -1044,9 +1088,13 @@ smoke_check_5(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* smoke_check_5() */ @@ -1275,9 +1323,13 @@ smoke_check_6(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* smoke_check_6() */ @@ -1507,9 +1559,13 @@ smoke_check_7(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* smoke_check_7() */ @@ -1739,9 +1795,13 @@ smoke_check_8(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* smoke_check_8() */ @@ -1915,9 +1975,11 @@ write_permitted_check(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ @@ -1927,6 +1989,8 @@ write_permitted_check(void) #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + return; + } /* write_permitted_check() */ @@ -1999,9 +2063,13 @@ check_flush_cache(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* check_flush_cache() */ @@ -2109,6 +2177,9 @@ check_flush_cache__empty_cache(H5C_t * cache_ptr) * * Modifications: * + * JRM -- 4/5/06 + * Added pinned entry tests. + * *------------------------------------------------------------------------- */ @@ -3127,136 +3198,1106 @@ check_flush_cache__multi_entry(H5C_t * cache_ptr) flush_flags, spec_size, spec); } -} /* check_flush_cache__multi_entry() */ - - -/*------------------------------------------------------------------------- - * Function: check_flush_cache__multi_entry_test() - * - * Purpose: Run a multi entry flush cache test. - * - * Return: void - * - * Programmer: John Mainzer - * 1/13/05 - * - * Modifications: - * - *------------------------------------------------------------------------- - */ - -static void -check_flush_cache__multi_entry_test(H5C_t * cache_ptr, - int test_num, - unsigned int flush_flags, - int spec_size, - struct flush_cache_test_spec spec[]) -{ - /* const char * fcn_name = "check_flush_cache__multi_entry_test"; */ - static char msg[128]; - herr_t result; - int i; - size_t total_entry_size = 0; - test_entry_t * base_addr; - test_entry_t * entry_ptr; - - if ( cache_ptr == NULL ) { - - pass = FALSE; - HDsnprintf(msg, (size_t)128, - "cache_ptr NULL on entry to single entry test #%d.", - test_num); - failure_mssg = msg; - } - else if ( ( cache_ptr->index_len != 0 ) || - ( cache_ptr->index_size != 0 ) ) { - - pass = FALSE; - - HDsnprintf(msg, (size_t)128, - "cache not empty at beginning of multi entry test #%d.", - test_num); - failure_mssg = msg; - } - else if ( ( spec_size < 1 ) || ( spec == NULL ) ) { - - pass = FALSE; - HDsnprintf(msg, (size_t)128, - "missing/bad test spec on entry to multi entry test #%d.", - test_num); - failure_mssg = msg; - } - - i = 0; - while ( ( pass ) && ( i < spec_size ) ) - { - if ( ( spec[i].entry_num != i ) || - ( spec[i].entry_type < 0 ) || - ( spec[i].entry_type >= NUMBER_OF_ENTRY_TYPES ) || - ( spec[i].entry_index < 0 ) || - ( spec[i].entry_index > max_indices[spec[i].entry_type] ) ) { - - pass = FALSE; - HDsnprintf(msg, (size_t)128, - "bad data in spec[%d] on entry to multi entry test #%d.", - i, test_num); - failure_mssg = msg; - } - i++; - } + /* Now do pinned entry tests: + * + * For the most part, this test is directed at testing the ability + * of the flush routine to unravel collections of pinned entries. + */ - i = 0; - while ( ( pass ) && ( i < spec_size ) ) + if ( pass ) { - if ( spec[i].insert_flag ) { - - insert_entry(cache_ptr, spec[i].entry_type, spec[i].entry_index, - spec[i].dirty_flag, spec[i].flags); - - } else { - - protect_entry(cache_ptr, spec[i].entry_type, spec[i].entry_index); - - unprotect_entry(cache_ptr, spec[i].entry_type, spec[i].entry_index, - (int)(spec[i].dirty_flag), spec[i].flags); - } - - total_entry_size += entry_sizes[spec[i].entry_type]; + int test_num = 1; + unsigned int flush_flags = H5C__NO_FLAGS_SET; + int spec_size = 8; + struct pe_flush_cache_test_spec spec[8] = + { + { + /* entry_num = */ 0, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 100, + /* insert_flag = */ FALSE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 0, + /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ FALSE + }, + { + /* entry_num = */ 1, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 75, + /* insert_flag = */ FALSE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 1, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ FALSE + }, + { + /* entry_num = */ 2, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 25, + /* insert_flag = */ TRUE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 2, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, 75, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ FALSE + }, + { + /* entry_num = */ 3, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 50, + /* insert_flag = */ TRUE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 3, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, 75, 25, -1, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ FALSE + }, + { + /* entry_num = */ 4, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 10, + /* insert_flag = */ FALSE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 4, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, 75, 25, 50, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ FALSE + }, + { + /* entry_num = */ 5, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 20, + /* insert_flag = */ FALSE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 5, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + MONSTER_ENTRY_TYPE, + -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, 75, 25, 50, 10, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ FALSE + }, + { + /* entry_num = */ 6, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 30, + /* insert_flag = */ TRUE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 6, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + MONSTER_ENTRY_TYPE, + MONSTER_ENTRY_TYPE, + -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, 75, 25, 50, 10, 20, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ FALSE + }, + { + /* entry_num = */ 7, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 40, + /* insert_flag = */ TRUE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 7, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + MONSTER_ENTRY_TYPE, + MONSTER_ENTRY_TYPE, + MONSTER_ENTRY_TYPE, + -1}, + /* pin_idx[MAX_PINS] = */ {100, 75, 25, 50, 10, 20, 30, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ FALSE + } + }; - i++; + check_flush_cache__pe_multi_entry_test(cache_ptr, test_num, + flush_flags, spec_size, spec); } - if ( pass ) { - - result = H5C_flush_cache(NULL, -1, -1, cache_ptr, flush_flags); - - if ( result < 0 ) { - - pass = FALSE; - HDsnprintf(msg, (size_t)128, - "flush with flags 0x%x failed in multi entry test #%d.", - flush_flags, test_num); - failure_mssg = msg; - } - } - i = 0; - while ( ( pass ) && ( i < spec_size ) ) + if ( pass ) { - base_addr = entries[spec[i].entry_type]; - entry_ptr = &(base_addr[spec[i].entry_index]); - - if ( ( entry_ptr->loaded != spec[i].expected_loaded ) || - ( entry_ptr->cleared != spec[i].expected_cleared ) || - ( entry_ptr->flushed != spec[i].expected_flushed ) || - ( entry_ptr->destroyed != spec[i].expected_destroyed ) ) { - -#if 0 /* This is useful debugging code. Lets keep it around. */ - - HDfprintf(stdout, - "loaded = %d(%d), clrd = %d(%d), flshd = %d(%d), dest = %d(%d)\n", - (int)(entry_ptr->loaded), - (int)(spec[i].expected_loaded), + int test_num = 2; + unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG; + int spec_size = 8; + struct pe_flush_cache_test_spec spec[8] = + { + { + /* entry_num = */ 0, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 100, + /* insert_flag = */ FALSE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 0, + /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 1, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 75, + /* insert_flag = */ FALSE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 1, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 2, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 25, + /* insert_flag = */ TRUE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 2, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, 75, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 3, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 50, + /* insert_flag = */ TRUE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 3, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, 75, 25, -1, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 4, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 10, + /* insert_flag = */ FALSE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 0, + /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 5, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 20, + /* insert_flag = */ FALSE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 1, + /* pin_type[MAX_PINS] = */ {MONSTER_ENTRY_TYPE, + -1, -1, -1, -1 -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {10, -1, -1, -1 -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 6, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 30, + /* insert_flag = */ TRUE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 2, + /* pin_type[MAX_PINS] = */ {MONSTER_ENTRY_TYPE, + MONSTER_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {10, 20, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 7, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 40, + /* insert_flag = */ TRUE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 3, + /* pin_type[MAX_PINS] = */ {MONSTER_ENTRY_TYPE, + MONSTER_ENTRY_TYPE, + MONSTER_ENTRY_TYPE, + -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {10, 20, 30, -1, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ TRUE + } + }; + + check_flush_cache__pe_multi_entry_test(cache_ptr, test_num, + flush_flags, spec_size, spec); + } + + if ( pass ) + { + int test_num = 3; + unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG; + int spec_size = 8; + struct pe_flush_cache_test_spec spec[8] = + { + { + /* entry_num = */ 0, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 100, + /* insert_flag = */ FALSE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 0, + /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ TRUE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 1, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 75, + /* insert_flag = */ FALSE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 1, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ TRUE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 2, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 25, + /* insert_flag = */ TRUE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 1, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ TRUE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 3, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 50, + /* insert_flag = */ TRUE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 1, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ TRUE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 4, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 10, + /* insert_flag = */ FALSE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 0, + /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ TRUE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 5, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 20, + /* insert_flag = */ FALSE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 0, + /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ TRUE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 6, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 30, + /* insert_flag = */ TRUE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 0, + /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ TRUE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 7, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 40, + /* insert_flag = */ TRUE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 0, + /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ TRUE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ TRUE + } + }; + + check_flush_cache__pe_multi_entry_test(cache_ptr, test_num, + flush_flags, spec_size, spec); + } + + + if ( pass ) + { + int test_num = 4; + unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_MARKED_ENTRIES_FLAG; + int spec_size = 8; + struct pe_flush_cache_test_spec spec[8] = + { + { + /* entry_num = */ 0, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 100, + /* insert_flag = */ FALSE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 0, + /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 1, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 75, + /* insert_flag = */ FALSE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 1, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 2, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 25, + /* insert_flag = */ TRUE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 1, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 3, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 50, + /* insert_flag = */ TRUE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 1, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 4, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 10, + /* insert_flag = */ FALSE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 0, + /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 5, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 20, + /* insert_flag = */ FALSE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 4, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, 75, 25, 50, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 6, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 30, + /* insert_flag = */ TRUE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 4, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + PICO_ENTRY_TYPE, + -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, 75, 25, 50, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 7, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 40, + /* insert_flag = */ TRUE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 0, + /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ FALSE, + /* expected_flushed = */ TRUE, + /* expected_destroyed = */ TRUE + } + }; + + check_flush_cache__pe_multi_entry_test(cache_ptr, test_num, + flush_flags, spec_size, spec); + } + + + if ( pass ) + { + int test_num = 5; + unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG | + H5C__FLUSH_MARKED_ENTRIES_FLAG; + int spec_size = 8; + struct pe_flush_cache_test_spec spec[8] = + { + { + /* entry_num = */ 0, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 100, + /* insert_flag = */ FALSE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 0, + /* pin_type[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {-1, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ TRUE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 1, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 75, + /* insert_flag = */ FALSE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 1, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ TRUE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 2, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 25, + /* insert_flag = */ TRUE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 1, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ TRUE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 3, + /* entry_type = */ PICO_ENTRY_TYPE, + /* entry_index = */ 50, + /* insert_flag = */ TRUE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__NO_FLAGS_SET, + /* num_pins = */ 1, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ TRUE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 4, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 10, + /* insert_flag = */ FALSE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 1, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ TRUE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 5, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 20, + /* insert_flag = */ FALSE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 1, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ TRUE, + /* expected_cleared = */ TRUE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 6, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 30, + /* insert_flag = */ TRUE, + /* dirty_flag = */ FALSE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 1, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ TRUE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ TRUE + }, + { + /* entry_num = */ 7, + /* entry_type = */ MONSTER_ENTRY_TYPE, + /* entry_index = */ 40, + /* insert_flag = */ TRUE, + /* dirty_flag = */ TRUE, + /* flags = */ H5C__SET_FLUSH_MARKER_FLAG, + /* num_pins = */ 1, + /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, + -1, -1, -1, -1, -1, -1, -1}, + /* pin_idx[MAX_PINS] = */ {100, -1, -1, -1, -1, -1, -1, -1}, + /* expected_loaded = */ FALSE, + /* expected_cleared = */ TRUE, + /* expected_flushed = */ FALSE, + /* expected_destroyed = */ TRUE + } + }; + + check_flush_cache__pe_multi_entry_test(cache_ptr, test_num, + flush_flags, spec_size, spec); + } + + return; + +} /* check_flush_cache__multi_entry() */ + + +/*------------------------------------------------------------------------- + * Function: check_flush_cache__multi_entry_test() + * + * Purpose: Run a multi entry flush cache test. + * + * Return: void + * + * Programmer: John Mainzer + * 1/13/05 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +check_flush_cache__multi_entry_test(H5C_t * cache_ptr, + int test_num, + unsigned int flush_flags, + int spec_size, + struct flush_cache_test_spec spec[]) +{ + /* const char * fcn_name = "check_flush_cache__multi_entry_test"; */ + static char msg[128]; + herr_t result; + int i; + size_t total_entry_size = 0; + test_entry_t * base_addr; + test_entry_t * entry_ptr; + + if ( cache_ptr == NULL ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "cache_ptr NULL on entry to single entry test #%d.", + test_num); + failure_mssg = msg; + } + else if ( ( cache_ptr->index_len != 0 ) || + ( cache_ptr->index_size != 0 ) ) { + + pass = FALSE; + + HDsnprintf(msg, (size_t)128, + "cache not empty at beginning of multi entry test #%d.", + test_num); + failure_mssg = msg; + } + else if ( ( spec_size < 1 ) || ( spec == NULL ) ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "missing/bad test spec on entry to multi entry test #%d.", + test_num); + failure_mssg = msg; + } + + i = 0; + while ( ( pass ) && ( i < spec_size ) ) + { + if ( ( spec[i].entry_num != i ) || + ( spec[i].entry_type < 0 ) || + ( spec[i].entry_type >= NUMBER_OF_ENTRY_TYPES ) || + ( spec[i].entry_index < 0 ) || + ( spec[i].entry_index > max_indices[spec[i].entry_type] ) ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "bad data in spec[%d] on entry to multi entry test #%d.", + i, test_num); + failure_mssg = msg; + } + i++; + } + + i = 0; + while ( ( pass ) && ( i < spec_size ) ) + { + if ( spec[i].insert_flag ) { + + insert_entry(cache_ptr, spec[i].entry_type, spec[i].entry_index, + spec[i].dirty_flag, spec[i].flags); + + } else { + + protect_entry(cache_ptr, spec[i].entry_type, spec[i].entry_index); + + unprotect_entry(cache_ptr, spec[i].entry_type, spec[i].entry_index, + (int)(spec[i].dirty_flag), spec[i].flags); + } + + total_entry_size += entry_sizes[spec[i].entry_type]; + + i++; + } + + if ( pass ) { + + result = H5C_flush_cache(NULL, -1, -1, cache_ptr, flush_flags); + + if ( result < 0 ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "flush with flags 0x%x failed in multi entry test #%d.", + flush_flags, test_num); + failure_mssg = msg; + } + } + + i = 0; + while ( ( pass ) && ( i < spec_size ) ) + { + base_addr = entries[spec[i].entry_type]; + entry_ptr = &(base_addr[spec[i].entry_index]); + + if ( ( entry_ptr->loaded != spec[i].expected_loaded ) || + ( entry_ptr->cleared != spec[i].expected_cleared ) || + ( entry_ptr->flushed != spec[i].expected_flushed ) || + ( entry_ptr->destroyed != spec[i].expected_destroyed ) ) { + +#if 0 /* This is useful debugging code. Lets keep it around. */ + + HDfprintf(stdout, + "loaded = %d(%d), clrd = %d(%d), flshd = %d(%d), dest = %d(%d)\n", + (int)(entry_ptr->loaded), + (int)(spec[i].expected_loaded), + (int)(entry_ptr->cleared), + (int)(spec[i].expected_cleared), + (int)(entry_ptr->flushed), + (int)(spec[i].expected_flushed), + (int)(entry_ptr->destroyed), + (int)(spec[i].expected_destroyed)); + +#endif + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Bad status on entry %d after flush in multi entry test #%d.", + i, test_num); + failure_mssg = msg; + } + i++; + } + + if ( pass ) { + + if ( ( ( (flush_flags & H5C__FLUSH_INVALIDATE_FLAG) == 0 ) + && + ( ( cache_ptr->index_len != spec_size ) + || + ( cache_ptr->index_size != total_entry_size ) + ) + ) + || + ( ( (flush_flags & H5C__FLUSH_INVALIDATE_FLAG) != 0 ) + && + ( ( cache_ptr->index_len != 0 ) + || + ( cache_ptr->index_size != 0 ) + ) + ) + ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Unexpected cache len/size after flush in multi entry test #%d.", + test_num); + failure_mssg = msg; + } + } + + /* clean up the cache to prep for the next test */ + if ( pass ) { + + result = H5C_flush_cache(NULL, -1, -1, cache_ptr, + H5C__FLUSH_INVALIDATE_FLAG); + + if ( result < 0 ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Flush failed on cleanup in multi entry test #%d.", + test_num); + failure_mssg = msg; + } + else if ( ( cache_ptr->index_len != 0 ) || + ( cache_ptr->index_size != 0 ) ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Unexpected cache len/size after cleanup in multi entry test #%d.", + test_num); + failure_mssg = msg; + + } + } + + i = 0; + while ( ( pass ) && ( i < spec_size ) ) + { + base_addr = entries[spec[i].entry_type]; + entry_ptr = &(base_addr[spec[i].entry_index]); + + entry_ptr->loaded = FALSE; + entry_ptr->cleared = FALSE; + entry_ptr->flushed = FALSE; + entry_ptr->destroyed = FALSE; + + i++; + } + + return; + +} /* check_flush_cache__multi_entry_test() */ + + +/*------------------------------------------------------------------------- + * Function: check_flush_cache__pe_multi_entry_test() + * + * Purpose: Run a multi entry flush cache test. + * + * Return: void + * + * Programmer: John Mainzer + * 4/5/06 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +check_flush_cache__pe_multi_entry_test(H5C_t * cache_ptr, + int test_num, + unsigned int flush_flags, + int spec_size, + struct pe_flush_cache_test_spec spec[]) +{ + /* const char * fcn_name = "check_flush_cache__pe_multi_entry_test"; */ + static char msg[128]; + herr_t result; + int i; + int j; + size_t total_entry_size = 0; + test_entry_t * base_addr; + test_entry_t * entry_ptr; + + if ( cache_ptr == NULL ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "cache_ptr NULL on entry to pe multi entry test #%d.", + test_num); + failure_mssg = msg; + } + else if ( ( cache_ptr->index_len != 0 ) || + ( cache_ptr->index_size != 0 ) ) { + + pass = FALSE; + + HDsnprintf(msg, (size_t)128, + "cache not empty at beginning of pe multi entry test #%d.", + test_num); + failure_mssg = msg; + } + else if ( ( spec_size < 1 ) || ( spec == NULL ) ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "missing/bad test spec on entry to pe multi entry test #%d.", + test_num); + failure_mssg = msg; + } + + i = 0; + while ( ( pass ) && ( i < spec_size ) ) + { + if ( ( spec[i].entry_num != i ) || + ( spec[i].entry_type < 0 ) || + ( spec[i].entry_type >= NUMBER_OF_ENTRY_TYPES ) || + ( spec[i].entry_index < 0 ) || + ( spec[i].entry_index > max_indices[spec[i].entry_type] ) || + ( spec[i].num_pins < 0 ) || + ( spec[i].num_pins > MAX_PINS ) ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "bad data in spec[%d] on entry to pe multi entry test #%d.", + i, test_num); + failure_mssg = msg; + } + i++; + } + + i = 0; + while ( ( pass ) && ( i < spec_size ) ) + { + if ( spec[i].insert_flag ) { + + insert_entry(cache_ptr, spec[i].entry_type, spec[i].entry_index, + spec[i].dirty_flag, spec[i].flags); + + } else { + + protect_entry(cache_ptr, spec[i].entry_type, spec[i].entry_index); + + unprotect_entry(cache_ptr, spec[i].entry_type, spec[i].entry_index, + (int)(spec[i].dirty_flag), spec[i].flags); + } + + total_entry_size += entry_sizes[spec[i].entry_type]; + + for ( j = 0; j < spec[i].num_pins; j++ ) + { + create_pinned_entry_dependency(cache_ptr, + spec[i].entry_type, + spec[i].entry_index, + spec[i].pin_type[j], + spec[i].pin_idx[j]); + } + + i++; + } + + if ( pass ) { + + result = H5C_flush_cache(NULL, -1, -1, cache_ptr, flush_flags); + + if ( result < 0 ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "flush with flags 0x%x failed in pe multi entry test #%d.", + flush_flags, test_num); + failure_mssg = msg; + } + } + + i = 0; + while ( ( pass ) && ( i < spec_size ) ) + { + base_addr = entries[spec[i].entry_type]; + entry_ptr = &(base_addr[spec[i].entry_index]); + + if ( ( entry_ptr->loaded != spec[i].expected_loaded ) || + ( entry_ptr->cleared != spec[i].expected_cleared ) || + ( entry_ptr->flushed != spec[i].expected_flushed ) || + ( entry_ptr->destroyed != spec[i].expected_destroyed ) ) { + +#if 0 /* This is useful debugging code. Lets keep it around. */ + + HDfprintf(stdout, + "loaded = %d(%d), clrd = %d(%d), flshd = %d(%d), dest = %d(%d)\n", + (int)(entry_ptr->loaded), + (int)(spec[i].expected_loaded), (int)(entry_ptr->cleared), (int)(spec[i].expected_cleared), (int)(entry_ptr->flushed), @@ -3264,152 +4305,1696 @@ check_flush_cache__multi_entry_test(H5C_t * cache_ptr, (int)(entry_ptr->destroyed), (int)(spec[i].expected_destroyed)); -#endif +#endif + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Bad status on entry %d after flush in pe multi entry test #%d.", + i, test_num); + failure_mssg = msg; + } + i++; + } + + if ( pass ) { + + if ( ( ( (flush_flags & H5C__FLUSH_INVALIDATE_FLAG) == 0 ) + && + ( ( cache_ptr->index_len != spec_size ) + || + ( cache_ptr->index_size != total_entry_size ) + ) + ) + || + ( ( (flush_flags & H5C__FLUSH_INVALIDATE_FLAG) != 0 ) + && + ( ( cache_ptr->index_len != 0 ) + || + ( cache_ptr->index_size != 0 ) + ) + ) + ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Unexpected cache len/size after flush in pe multi entry test #%d.", + test_num); + failure_mssg = msg; + } + } + + /* clean up the cache to prep for the next test */ + if ( pass ) { + + result = H5C_flush_cache(NULL, -1, -1, cache_ptr, + H5C__FLUSH_INVALIDATE_FLAG); + + if ( result < 0 ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Flush failed on cleanup in pe multi entry test #%d.", + test_num); + failure_mssg = msg; + } + else if ( ( cache_ptr->index_len != 0 ) || + ( cache_ptr->index_size != 0 ) ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Unexpected cache len/size after cleanup in pe multi entry test #%d.", + test_num); + failure_mssg = msg; + + } + } + + i = 0; + while ( ( pass ) && ( i < spec_size ) ) + { + base_addr = entries[spec[i].entry_type]; + entry_ptr = &(base_addr[spec[i].entry_index]); + + entry_ptr->loaded = FALSE; + entry_ptr->cleared = FALSE; + entry_ptr->flushed = FALSE; + entry_ptr->destroyed = FALSE; + + i++; + } + + return; + +} /* check_flush_cache__pe_multi_entry_test() */ + + +/*------------------------------------------------------------------------- + * Function: check_flush_cache__single_entry() + * + * Purpose: Verify that flush_cache behaves as expected when the cache + * contains only one element. + * + * Return: void + * + * Programmer: John Mainzer + * 1/12/05 + * + * Modifications: + * + * JRM -- 3/29/06 + * Added tests for pinned entries. + * + *------------------------------------------------------------------------- + */ + +static void +check_flush_cache__single_entry(H5C_t * cache_ptr) +{ + /* const char * fcn_name = "check_flush_cache__single_entry"; */ + + if ( cache_ptr == NULL ) { + + pass = FALSE; + failure_mssg = "cache_ptr NULL on entry to single entry case."; + } + else if ( ( cache_ptr->index_len != 0 ) || + ( cache_ptr->index_size != 0 ) ) { + + pass = FALSE; + failure_mssg = "cache not empty at beginning of single entry case."; + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 1, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 2, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 3, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 4, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 5, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 6, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 7, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 8, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 9, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 10, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 11, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 12, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 13, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_INVALIDATE_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 14, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_INVALIDATE_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 15, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG | + H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 16, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG | + H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 17, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 18, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 19, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 20, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 21, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 22, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 23, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 24, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 25, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 26, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 27, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 28, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 29, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_INVALIDATE_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 30, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_INVALIDATE_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 31, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG | + H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 32, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ FALSE, + /* dirty_flag */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG | + H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_loaded */ TRUE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 33, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 34, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 35, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 36, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 37, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 38, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 39, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 40, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 41, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 42, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 43, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 44, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 45, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_INVALIDATE_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 46, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_INVALIDATE_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 47, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG | + H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 48, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG | + H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 49, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 50, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 51, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 52, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 53, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 54, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 55, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 56, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 57, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 58, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 59, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 60, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 61, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_INVALIDATE_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 62, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_INVALIDATE_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 63, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG | + H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ TRUE + ); + } + + if ( pass ) { + + check_flush_cache__single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 64, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* insert_flag */ TRUE, + /* dirty_flag */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG | + H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_loaded */ FALSE, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ TRUE + ); + } + + /* Now run single entry tests for pinned entries. Test all combinations + * of: + * + * 1) Unpin by unprotect vs. unpin by call to H5C_unpin_entry(). + * + * 2) Marked dirty by unprotect or not. + * + * 3) Marked dirty by call to H5C_mark_pinned_entry_dirty() or not. + * + * 4) Entry marked for flush or not. + * + * 5) Call flush with H5C__FLUSH_MARKED_ENTRIES_FLAG or not. + * + * 6) Call flush with H5C__FLUSH_CLEAR_ONLY_FLAG or not. + * + * This yields a total of 64 tests. + */ + if ( pass ) { + + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 1, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ FALSE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { - pass = FALSE; - HDsnprintf(msg, (size_t)128, - "Bad status on entry %d after flush in multi entry test #%d.", - i, test_num); - failure_mssg = msg; - } - i++; + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 2, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ FALSE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); } if ( pass ) { - if ( ( ( (flush_flags & H5C__FLUSH_INVALIDATE_FLAG) == 0 ) - && - ( ( cache_ptr->index_len != spec_size ) - || - ( cache_ptr->index_size != total_entry_size ) - ) - ) - || - ( ( (flush_flags & H5C__FLUSH_INVALIDATE_FLAG) != 0 ) - && - ( ( cache_ptr->index_len != 0 ) - || - ( cache_ptr->index_size != 0 ) - ) - ) - ) { + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 3, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ TRUE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); + } - pass = FALSE; - HDsnprintf(msg, (size_t)128, - "Unexpected cache len/size after flush in multi entry test #%d.", - test_num); - failure_mssg = msg; - } + if ( pass ) { + + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 4, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ TRUE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); } - /* clean up the cache to prep for the next test */ if ( pass ) { - result = H5C_flush_cache(NULL, -1, -1, cache_ptr, - H5C__FLUSH_INVALIDATE_FLAG); + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 5, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ FALSE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); + } - if ( result < 0 ) { + if ( pass ) { - pass = FALSE; - HDsnprintf(msg, (size_t)128, - "Flush failed on cleanup in multi entry test #%d.", - test_num); - failure_mssg = msg; - } - else if ( ( cache_ptr->index_len != 0 ) || - ( cache_ptr->index_size != 0 ) ) { + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 6, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ FALSE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); + } - pass = FALSE; - HDsnprintf(msg, (size_t)128, - "Unexpected cache len/size after cleanup in multi entry test #%d.", - test_num); - failure_mssg = msg; + if ( pass ) { - } + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 7, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ TRUE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); } - i = 0; - while ( ( pass ) && ( i < spec_size ) ) - { - base_addr = entries[spec[i].entry_type]; - entry_ptr = &(base_addr[spec[i].entry_index]); + if ( pass ) { - entry_ptr->loaded = FALSE; - entry_ptr->cleared = FALSE; - entry_ptr->flushed = FALSE; - entry_ptr->destroyed = FALSE; + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 8, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ TRUE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); + } - i++; + if ( pass ) { + + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 9, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ FALSE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); } -} /* check_flush_cache__multi_entry_test() */ + if ( pass ) { - -/*------------------------------------------------------------------------- - * Function: check_flush_cache__single_entry() - * - * Purpose: Verify that flush_cache behaves as expected when the cache - * contains only one element. - * - * Return: void - * - * Programmer: John Mainzer - * 1/12/05 - * - * Modifications: - * - *------------------------------------------------------------------------- - */ + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 10, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ FALSE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } -static void -check_flush_cache__single_entry(H5C_t * cache_ptr) -{ - /* const char * fcn_name = "check_flush_cache__single_entry"; */ + if ( pass ) { - if ( cache_ptr == NULL ) { + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 11, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ TRUE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); + } - pass = FALSE; - failure_mssg = "cache_ptr NULL on entry to single entry case."; + if ( pass ) { + + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 12, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ TRUE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); } - else if ( ( cache_ptr->index_len != 0 ) || - ( cache_ptr->index_size != 0 ) ) { - pass = FALSE; - failure_mssg = "cache not empty at beginning of single entry case."; + if ( pass ) { + + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 13, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ FALSE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 14, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ FALSE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__NO_FLAGS_SET, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 1, + /* test_num */ 15, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, - /* dirty_flag */ FALSE, - /* flags */ H5C__NO_FLAGS_SET, + /* dirty_flag */ TRUE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, /* flush_flags */ H5C__NO_FLAGS_SET, - /* expected_loaded */ TRUE, /* expected_cleared */ FALSE, - /* expected_flushed */ FALSE, + /* expected_flushed */ TRUE, /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 2, + /* test_num */ 16, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ TRUE, - /* flags */ H5C__NO_FLAGS_SET, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, /* flush_flags */ H5C__NO_FLAGS_SET, - /* expected_loaded */ TRUE, /* expected_cleared */ FALSE, /* expected_flushed */ TRUE, /* expected_destroyed */ FALSE @@ -3418,17 +6003,17 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 3, + /* test_num */ 17, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ FALSE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ FALSE, /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ TRUE, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, /* expected_cleared */ FALSE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE @@ -3437,18 +6022,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 4, + /* test_num */ 18, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, - /* dirty_flag */ TRUE, + /* dirty_flag */ FALSE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ TRUE, /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ TRUE, - /* expected_cleared */ TRUE, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_cleared */ FALSE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE ); @@ -3456,55 +6041,55 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 5, + /* test_num */ 19, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, - /* dirty_flag */ FALSE, + /* dirty_flag */ TRUE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ FALSE, /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG, - /* expected_loaded */ TRUE, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ TRUE + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 6, + /* test_num */ 20, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ TRUE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ TRUE, /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG, - /* expected_loaded */ TRUE, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ TRUE + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 7, + /* test_num */ 21, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ FALSE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ FALSE, /* flags */ H5C__NO_FLAGS_SET, /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, - /* expected_loaded */ TRUE, /* expected_cleared */ FALSE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE @@ -3513,17 +6098,17 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 8, + /* test_num */ 22, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, - /* dirty_flag */ TRUE, + /* dirty_flag */ FALSE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ TRUE, /* flags */ H5C__NO_FLAGS_SET, /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, - /* expected_loaded */ TRUE, /* expected_cleared */ FALSE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE @@ -3532,58 +6117,55 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 9, + /* test_num */ 23, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, - /* dirty_flag */ FALSE, + /* dirty_flag */ TRUE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ FALSE, /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ TRUE, - /* expected_cleared */ TRUE, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_cleared */ FALSE, /* expected_flushed */ FALSE, - /* expected_destroyed */ TRUE + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 10, + /* test_num */ 24, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ TRUE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ TRUE, /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ TRUE, - /* expected_cleared */ TRUE, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_cleared */ FALSE, /* expected_flushed */ FALSE, - /* expected_destroyed */ TRUE + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 11, + /* test_num */ 25, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ FALSE, - /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ TRUE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, /* expected_cleared */ FALSE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE @@ -3592,18 +6174,17 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 12, + /* test_num */ 26, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, - /* dirty_flag */ TRUE, - /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ TRUE, + /* dirty_flag */ FALSE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, /* expected_cleared */ FALSE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE @@ -3612,100 +6193,208 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 13, + /* test_num */ 27, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ TRUE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 28, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ TRUE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_cleared */ FALSE, + /* expected_flushed */ TRUE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 29, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ FALSE, - /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | - H5C__FLUSH_INVALIDATE_FLAG, - /* expected_loaded */ TRUE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, /* can't mark a clean entry */ + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 30, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ FALSE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, /* can't makr a clean entry */ + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 31, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ TRUE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, /* expected_cleared */ FALSE, /* expected_flushed */ TRUE, - /* expected_destroyed */ TRUE + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 14, + /* test_num */ 32, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ TRUE, - /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | - H5C__FLUSH_INVALIDATE_FLAG, - /* expected_loaded */ TRUE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, /* expected_cleared */ FALSE, /* expected_flushed */ TRUE, - /* expected_destroyed */ TRUE + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 15, + /* test_num */ 33, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ FALSE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ FALSE, /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG | - H5C__FLUSH_MARKED_ENTRIES_FLAG, - /* expected_loaded */ TRUE, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 34, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ FALSE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE + ); + } + + if ( pass ) { + + check_flush_cache__pinned_single_entry_test + ( + /* cache_ptr */ cache_ptr, + /* test_num */ 35, + /* entry_type */ PICO_ENTRY_TYPE, + /* entry_idx */ 0, + /* dirty_flag */ TRUE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, /* expected_cleared */ TRUE, /* expected_flushed */ FALSE, - /* expected_destroyed */ TRUE + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 16, + /* test_num */ 36, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ TRUE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ TRUE, /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG | - H5C__FLUSH_MARKED_ENTRIES_FLAG, - /* expected_loaded */ TRUE, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, /* expected_cleared */ TRUE, /* expected_flushed */ FALSE, - /* expected_destroyed */ TRUE + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 17, + /* test_num */ 37, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ FALSE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__NO_FLAGS_SET, - /* expected_loaded */ TRUE, - /* expected_cleared */ FALSE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_cleared */ TRUE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE ); @@ -3713,37 +6402,37 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 18, + /* test_num */ 38, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, - /* dirty_flag */ TRUE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__NO_FLAGS_SET, - /* expected_loaded */ TRUE, - /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, + /* dirty_flag */ FALSE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 19, + /* test_num */ 39, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, - /* dirty_flag */ FALSE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* dirty_flag */ TRUE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ TRUE, - /* expected_cleared */ FALSE, + /* expected_cleared */ TRUE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE ); @@ -3751,17 +6440,17 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 20, + /* test_num */ 40, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ TRUE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ TRUE, /* expected_cleared */ TRUE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE @@ -3770,56 +6459,56 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 21, + /* test_num */ 41, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ FALSE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ FALSE, /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG, - /* expected_loaded */ TRUE, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ TRUE + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 22, + /* test_num */ 42, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, - /* dirty_flag */ TRUE, + /* dirty_flag */ FALSE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ TRUE, /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG, - /* expected_loaded */ TRUE, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ TRUE + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 23, + /* test_num */ 43, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, - /* dirty_flag */ FALSE, + /* dirty_flag */ TRUE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ FALSE, /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, - /* expected_loaded */ TRUE, - /* expected_cleared */ FALSE, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_cleared */ TRUE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE ); @@ -3827,78 +6516,75 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 24, + /* test_num */ 44, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ TRUE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ TRUE, /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, - /* expected_loaded */ TRUE, - /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_cleared */ TRUE, + /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 25, + /* test_num */ 45, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ FALSE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ FALSE, /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ TRUE, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, /* expected_cleared */ TRUE, /* expected_flushed */ FALSE, - /* expected_destroyed */ TRUE + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 26, + /* test_num */ 46, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, - /* dirty_flag */ TRUE, + /* dirty_flag */ FALSE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ TRUE, /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ TRUE, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, /* expected_cleared */ TRUE, /* expected_flushed */ FALSE, - /* expected_destroyed */ TRUE + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 27, + /* test_num */ 47, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, - /* dirty_flag */ FALSE, + /* dirty_flag */ TRUE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ FALSE, /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ TRUE, - /* expected_cleared */ FALSE, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_cleared */ TRUE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE ); @@ -3906,18 +6592,17 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 28, + /* test_num */ 48, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ TRUE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ TRUE, /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ TRUE, + /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, /* expected_cleared */ TRUE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE @@ -3926,138 +6611,139 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 29, + /* test_num */ 49, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ FALSE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | - H5C__FLUSH_INVALIDATE_FLAG, - /* expected_loaded */ TRUE, + H5C__FLUSH_CLEAR_ONLY_FLAG, /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ TRUE + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 30, + /* test_num */ 50, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, - /* dirty_flag */ TRUE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* dirty_flag */ FALSE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | - H5C__FLUSH_INVALIDATE_FLAG, - /* expected_loaded */ TRUE, + H5C__FLUSH_CLEAR_ONLY_FLAG, /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ TRUE + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 31, + /* test_num */ 51, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, - /* dirty_flag */ FALSE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG | - H5C__FLUSH_MARKED_ENTRIES_FLAG, - /* expected_loaded */ TRUE, - /* expected_cleared */ TRUE, + /* dirty_flag */ TRUE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_cleared */ FALSE, /* expected_flushed */ FALSE, - /* expected_destroyed */ TRUE + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 32, + /* test_num */ 52, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ FALSE, /* dirty_flag */ TRUE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG | - H5C__FLUSH_MARKED_ENTRIES_FLAG, - /* expected_loaded */ TRUE, - /* expected_cleared */ TRUE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__NO_FLAGS_SET, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_cleared */ FALSE, /* expected_flushed */ FALSE, - /* expected_destroyed */ TRUE + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 33, + /* test_num */ 53, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ TRUE, /* dirty_flag */ FALSE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ FALSE, /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__NO_FLAGS_SET, - /* expected_loaded */ FALSE, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, + /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 34, + /* test_num */ 54, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ TRUE, + /* dirty_flag */ FALSE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ TRUE, /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__NO_FLAGS_SET, - /* expected_loaded */ FALSE, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, + /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 35, + /* test_num */ 55, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ FALSE, + /* dirty_flag */ TRUE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ FALSE, /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ TRUE, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_cleared */ FALSE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE ); @@ -4065,18 +6751,19 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 36, + /* test_num */ 56, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ TRUE, /* dirty_flag */ TRUE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ TRUE, /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ TRUE, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_cleared */ FALSE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE ); @@ -4084,56 +6771,59 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 37, + /* test_num */ 57, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ TRUE, /* dirty_flag */ FALSE, - /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG, - /* expected_loaded */ FALSE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ TRUE + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 38, + /* test_num */ 58, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ TRUE, - /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG, - /* expected_loaded */ FALSE, + /* dirty_flag */ FALSE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ TRUE + /* expected_flushed */ FALSE, + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 39, + /* test_num */ 59, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ FALSE, - /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ FALSE, + /* dirty_flag */ TRUE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_cleared */ TRUE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE ); @@ -4141,18 +6831,19 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 40, + /* test_num */ 60, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ TRUE, /* dirty_flag */ TRUE, - /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ FALSE, + /* mark_dirty */ FALSE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_cleared */ TRUE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE ); @@ -4160,59 +6851,59 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 41, + /* test_num */ 61, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ TRUE, /* dirty_flag */ FALSE, - /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ TRUE, - /* expected_flushed */ FALSE, - /* expected_destroyed */ TRUE + /* mark_dirty */ TRUE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, /* can't mark a clean entry */ + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 42, + /* test_num */ 62, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ TRUE, - /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ TRUE, - /* expected_flushed */ FALSE, - /* expected_destroyed */ TRUE + /* dirty_flag */ FALSE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, + /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_cleared */ FALSE, + /* expected_flushed */ FALSE, /* can't makr a clean entry */ + /* expected_destroyed */ FALSE ); } if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 43, + /* test_num */ 63, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ FALSE, - /* flags */ H5C__NO_FLAGS_SET, + /* dirty_flag */ TRUE, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ FALSE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ FALSE, + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_cleared */ TRUE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE ); @@ -4220,595 +6911,750 @@ check_flush_cache__single_entry(H5C_t * cache_ptr) if ( pass ) { - check_flush_cache__single_entry_test + check_flush_cache__pinned_single_entry_test ( /* cache_ptr */ cache_ptr, - /* test_num */ 44, + /* test_num */ 64, /* entry_type */ PICO_ENTRY_TYPE, /* entry_idx */ 0, - /* insert_flag */ TRUE, /* dirty_flag */ TRUE, - /* flags */ H5C__NO_FLAGS_SET, + /* mark_dirty */ TRUE, + /* unprotect_unpin */ TRUE, + /* flags */ H5C__SET_FLUSH_MARKER_FLAG, /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ FALSE, + H5C__FLUSH_CLEAR_ONLY_FLAG, + /* expected_cleared */ TRUE, /* expected_flushed */ FALSE, /* expected_destroyed */ FALSE ); } - if ( pass ) { + return; + +} /* check_flush_cache__single_entry() */ + + +/*------------------------------------------------------------------------- + * Function: check_flush_cache__single_entry_test() + * + * Purpose: Run a single entry flush cache test. + * + * Return: void + * + * Programmer: John Mainzer + * 1/12/05 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +check_flush_cache__single_entry_test(H5C_t * cache_ptr, + int test_num, + int entry_type, + int entry_idx, + hbool_t insert_flag, + hbool_t dirty_flag, + unsigned int flags, + unsigned int flush_flags, + hbool_t expected_loaded, + hbool_t expected_cleared, + hbool_t expected_flushed, + hbool_t expected_destroyed) +{ + /* const char * fcn_name = "check_flush_cache__single_entry_test"; */ + static char msg[128]; + herr_t result; + test_entry_t * base_addr; + test_entry_t * entry_ptr; + + if ( cache_ptr == NULL ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "cache_ptr NULL on entry to single entry test #%d.", + test_num); + failure_mssg = msg; + } + else if ( ( cache_ptr->index_len != 0 ) || + ( cache_ptr->index_size != 0 ) ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "cache not empty at beginning of single entry test #%d.", + test_num); + failure_mssg = msg; + } + else if ( ( entry_type < 0 ) || ( entry_type >= NUMBER_OF_ENTRY_TYPES ) || + ( entry_idx < 0 ) || ( entry_idx > max_indices[entry_type] ) ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Bad parameters on entry to single entry test #%d.", + test_num); + failure_mssg = msg; + } + + if ( pass ) { + + base_addr = entries[entry_type]; + entry_ptr = &(base_addr[entry_idx]); + + if ( insert_flag ) { + + insert_entry(cache_ptr, entry_type, entry_idx, dirty_flag, flags); + + } else { + + protect_entry(cache_ptr, entry_type, entry_idx); + + unprotect_entry(cache_ptr, entry_type, entry_idx, + (int)dirty_flag, flags); + } + } + + if ( pass ) { + + result = H5C_flush_cache(NULL, -1, -1, cache_ptr, flush_flags); + + if ( result < 0 ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "flush with flags 0x%x failed in single entry test #%d.", + flush_flags, test_num); + failure_mssg = msg; + } + else if ( ( entry_ptr->loaded != expected_loaded ) || + ( entry_ptr->cleared != expected_cleared ) || + ( entry_ptr->flushed != expected_flushed ) || + ( entry_ptr->destroyed != expected_destroyed ) ) { + + HDfprintf(stdout, + "loaded = %d(%d), clrd = %d(%d), flshd = %d(%d), dest = %d(%d)\n", + (int)(entry_ptr->loaded), + (int)expected_loaded, + (int)(entry_ptr->cleared), + (int)expected_cleared, + (int)(entry_ptr->flushed), + (int)expected_flushed, + (int)(entry_ptr->destroyed), + (int)expected_destroyed); + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Unexpected entry status after flush in single entry test #%d.", + test_num); + failure_mssg = msg; + } + else if ( ( ( (flush_flags & H5C__FLUSH_INVALIDATE_FLAG) == 0 ) + && + ( ( cache_ptr->index_len != 1 ) + || + ( cache_ptr->index_size != entry_sizes[entry_type] ) + ) + ) + || + ( ( (flush_flags & H5C__FLUSH_INVALIDATE_FLAG) != 0 ) + && + ( ( cache_ptr->index_len != 0 ) + || + ( cache_ptr->index_size != 0 ) + ) + ) + ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Unexpected cache len/size after flush in single entry test #%d.", + test_num); + failure_mssg = msg; + } + } + + + /* clean up the cache to prep for the next test */ + if ( pass ) { + + result = H5C_flush_cache(NULL, -1, -1, cache_ptr, + H5C__FLUSH_INVALIDATE_FLAG); + + if ( result < 0 ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Flush failed on cleanup in single entry test #%d.", + test_num); + failure_mssg = msg; + } + else if ( ( cache_ptr->index_len != 0 ) || + ( cache_ptr->index_size != 0 ) ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Unexpected cache len/size after cleanup in single entry test #%d.", + test_num); + failure_mssg = msg; + + } else { - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 45, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ FALSE, - /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | - H5C__FLUSH_INVALIDATE_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ TRUE - ); + entry_ptr->loaded = FALSE; + entry_ptr->cleared = FALSE; + entry_ptr->flushed = FALSE; + entry_ptr->destroyed = FALSE; + } } - if ( pass ) { + return; - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 46, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ TRUE, - /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | - H5C__FLUSH_INVALIDATE_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ TRUE - ); - } +} /* check_flush_cache__single_entry_test() */ - if ( pass ) { + +/*------------------------------------------------------------------------- + * Function: check_flush_cache__pinned_single_entry_test() + * + * Purpose: Run a pinned single entry flush cache test. + * + * Return: void + * + * Programmer: John Mainzer + * 3/28/06 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 47, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ FALSE, - /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG | - H5C__FLUSH_MARKED_ENTRIES_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ TRUE, - /* expected_flushed */ FALSE, - /* expected_destroyed */ TRUE - ); - } +static void +check_flush_cache__pinned_single_entry_test(H5C_t * cache_ptr, + int test_num, + int entry_type, + int entry_idx, + hbool_t dirty_flag, + hbool_t mark_dirty, + hbool_t unprotect_unpin, + unsigned int flags, + unsigned int flush_flags, + hbool_t expected_cleared, + hbool_t expected_flushed, + hbool_t expected_destroyed) +{ + /* const char *fcn_name = "check_flush_cache__pinned_single_entry_test"; */ + static char msg[128]; + hbool_t expected_loaded = TRUE; + herr_t result; + test_entry_t * base_addr; + test_entry_t * entry_ptr; - if ( pass ) { + if ( cache_ptr == NULL ) { - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 48, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ TRUE, - /* flags */ H5C__NO_FLAGS_SET, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG | - H5C__FLUSH_MARKED_ENTRIES_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ TRUE, - /* expected_flushed */ FALSE, - /* expected_destroyed */ TRUE - ); + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "cache_ptr NULL on entry to pinned single entry test #%d.", + test_num); + failure_mssg = msg; } + else if ( ( cache_ptr->index_len != 0 ) || + ( cache_ptr->index_size != 0 ) ) { - if ( pass ) { + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "cache not empty at beginning of pinned single entry test #%d.", + test_num); + failure_mssg = msg; + } + else if ( ( entry_type < 0 ) || ( entry_type >= NUMBER_OF_ENTRY_TYPES ) || + ( entry_idx < 0 ) || ( entry_idx > max_indices[entry_type] ) ) { - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 49, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ FALSE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__NO_FLAGS_SET, - /* expected_loaded */ FALSE, - /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ FALSE - ); + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Bad parameters on entry to single entry test #%d.", + test_num); + failure_mssg = msg; } if ( pass ) { - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 50, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ TRUE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__NO_FLAGS_SET, - /* expected_loaded */ FALSE, - /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ FALSE - ); - } + base_addr = entries[entry_type]; + entry_ptr = &(base_addr[entry_idx]); - if ( pass ) { + protect_entry(cache_ptr, entry_type, entry_idx); - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 51, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ FALSE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ TRUE, - /* expected_flushed */ FALSE, - /* expected_destroyed */ FALSE - ); - } + unprotect_entry(cache_ptr, entry_type, entry_idx, + (int)dirty_flag, (flags | H5C__PIN_ENTRY_FLAG)); - if ( pass ) { + if ( mark_dirty ) { - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 52, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ TRUE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ TRUE, - /* expected_flushed */ FALSE, - /* expected_destroyed */ FALSE - ); + mark_pinned_entry_dirty(cache_ptr, entry_type, entry_idx, + FALSE, (size_t)0); + } } if ( pass ) { - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 53, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ FALSE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ TRUE - ); - } + result = H5C_flush_cache(NULL, -1, -1, cache_ptr, flush_flags); - if ( pass ) { + if ( result < 0 ) { - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 54, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ TRUE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ TRUE - ); + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "flush with flags 0x%x failed in pinned single entry test #%d.", + flush_flags, test_num); + failure_mssg = msg; + } + else if ( ( entry_ptr->loaded != expected_loaded ) || + ( entry_ptr->cleared != expected_cleared ) || + ( entry_ptr->flushed != expected_flushed ) || + ( entry_ptr->destroyed != expected_destroyed ) ) { + + HDfprintf(stdout, + "loaded = %d(%d), clrd = %d(%d), flshd = %d(%d), dest = %d(%d)\n", + (int)(entry_ptr->loaded), + (int)expected_loaded, + (int)(entry_ptr->cleared), + (int)expected_cleared, + (int)(entry_ptr->flushed), + (int)expected_flushed, + (int)(entry_ptr->destroyed), + (int)expected_destroyed); + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Unexpected entry status after flush in single entry test #%d.", + test_num); + failure_mssg = msg; + } + else if ( ( ( (flush_flags & H5C__FLUSH_INVALIDATE_FLAG) == 0 ) + && + ( ( cache_ptr->index_len != 1 ) + || + ( cache_ptr->index_size != entry_sizes[entry_type] ) + ) + ) + || + ( ( (flush_flags & H5C__FLUSH_INVALIDATE_FLAG) != 0 ) + && + ( ( cache_ptr->index_len != 0 ) + || + ( cache_ptr->index_size != 0 ) + ) + ) + ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Unexpected cache len/size after flush in pinned single entry test #%d.", + test_num); + failure_mssg = msg; + } } + + /* clean up the cache to prep for the next test */ if ( pass ) { - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 55, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ FALSE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ FALSE - ); - } + if ( unprotect_unpin ) { - if ( pass ) { + protect_entry(cache_ptr, entry_type, entry_idx); - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 56, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ TRUE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ FALSE - ); - } + unprotect_entry(cache_ptr, entry_type, entry_idx, + (int)dirty_flag, H5C__UNPIN_ENTRY_FLAG); - if ( pass ) { + } else { - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 57, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ FALSE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ TRUE, - /* expected_flushed */ FALSE, - /* expected_destroyed */ TRUE - ); + unpin_entry(cache_ptr, entry_type, entry_idx); + + } } if ( pass ) { - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 58, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ TRUE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ TRUE, - /* expected_flushed */ FALSE, - /* expected_destroyed */ TRUE - ); - } + result = H5C_flush_cache(NULL, -1, -1, cache_ptr, + H5C__FLUSH_INVALIDATE_FLAG); - if ( pass ) { + if ( result < 0 ) { - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 59, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ FALSE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ TRUE, - /* expected_flushed */ FALSE, - /* expected_destroyed */ FALSE - ); - } + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Flush failed on cleanup in single entry test #%d.", + test_num); + failure_mssg = msg; + } + else if ( ( cache_ptr->index_len != 0 ) || + ( cache_ptr->index_size != 0 ) ) { - if ( pass ) { + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Unexpected cache len/size after cleanup in pinned single entry test #%d.", + test_num); + failure_mssg = msg; - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 60, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ TRUE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ TRUE, - /* expected_flushed */ FALSE, - /* expected_destroyed */ FALSE - ); + } else { + + entry_ptr->loaded = FALSE; + entry_ptr->cleared = FALSE; + entry_ptr->flushed = FALSE; + entry_ptr->destroyed = FALSE; + } } - if ( pass ) { + return; - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 61, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ FALSE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | - H5C__FLUSH_INVALIDATE_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ TRUE - ); - } +} /* check_flush_cache__single_entry_test() */ + + +/*------------------------------------------------------------------------- + * Function: check_rename_entry() + * + * Purpose: Verify that H5C_rename_entry behaves as expected. In + * particular, verify that it works correctly with pinned + * entries. + * + * Return: void + * + * Programmer: John Mainzer + * 4/26/06 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +check_rename_entry(void) +{ + const char * fcn_name = "check_rename_entry"; + int i; + H5C_t * cache_ptr = NULL; + struct rename_entry_test_spec test_specs[4] = + { + { + /* int entry_type = */ PICO_ENTRY_TYPE, + /* int entry_index = */ 10, + /* hbool_t is_dirty = */ FALSE, + /* hbool_t is_pinned = */ FALSE + }, + { + /* int entry_type = */ PICO_ENTRY_TYPE, + /* int entry_index = */ 20, + /* hbool_t is_dirty = */ TRUE, + /* hbool_t is_pinned = */ FALSE + }, + { + /* int entry_type = */ PICO_ENTRY_TYPE, + /* int entry_index = */ 30, + /* hbool_t is_dirty = */ FALSE, + /* hbool_t is_pinned = */ TRUE + }, + { + /* int entry_type = */ PICO_ENTRY_TYPE, + /* int entry_index = */ 40, + /* hbool_t is_dirty = */ TRUE, + /* hbool_t is_pinned = */ TRUE + } + }; + + TESTING("H5C_rename_entry() functionality"); + + pass = TRUE; + + /* allocate a cache, load entries into it, and then rename + * them. To the extent possible, verify that the desired + * actions took place. + * + * At present, we should do the following tests: + * + * 1) Rename a clean, unprotected, unpinned entry. + * + * 2) Rename a dirty, unprotected, unpinned entry. + * + * 3) Rename a clean, unprotected, pinned entry. + * + * 4) Rename a dirty, unprotected, pinned entry. + * + * In all cases, the entry should have moved to its + * new location, and have been marked dirty if it wasn't + * already. + * + * Unpinned entries should have been moved to the head + * of the LRU list. + * + * Pinned entries should remain untouched on the pinned entry + * list. + */ if ( pass ) { - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 62, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ TRUE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_MARKED_ENTRIES_FLAG | - H5C__FLUSH_INVALIDATE_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ FALSE, - /* expected_flushed */ TRUE, - /* expected_destroyed */ TRUE - ); + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024 * 1024), + (size_t)(1 * 1024 * 1024)); + } + + i = 0; + while ( ( pass ) && ( i < 4 ) ) + { + check_rename_entry__run_test(cache_ptr, i, &(test_specs[i])); + i++; } if ( pass ) { - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 63, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ FALSE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG | - H5C__FLUSH_MARKED_ENTRIES_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ TRUE, - /* expected_flushed */ FALSE, - /* expected_destroyed */ TRUE - ); + takedown_cache(cache_ptr, FALSE, FALSE); } - if ( pass ) { + if ( pass ) { PASSED(); } else { H5_FAILED(); } - check_flush_cache__single_entry_test - ( - /* cache_ptr */ cache_ptr, - /* test_num */ 64, - /* entry_type */ PICO_ENTRY_TYPE, - /* entry_idx */ 0, - /* insert_flag */ TRUE, - /* dirty_flag */ TRUE, - /* flags */ H5C__SET_FLUSH_MARKER_FLAG, - /* flush_flags */ H5C__FLUSH_INVALIDATE_FLAG | - H5C__FLUSH_CLEAR_ONLY_FLAG | - H5C__FLUSH_MARKED_ENTRIES_FLAG, - /* expected_loaded */ FALSE, - /* expected_cleared */ TRUE, - /* expected_flushed */ FALSE, - /* expected_destroyed */ TRUE - ); + if ( ! pass ) { + + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); } -} /* check_flush_cache__single_entry() */ + return; + +} /* check_rename_entry() */ /*------------------------------------------------------------------------- - * Function: check_flush_cache__single_entry_test() + * Function: check_rename_entry__run_test() * - * Purpose: Run a single entry flush cache test. + * Purpose: Run a rename entry test. * - * Return: void + * Do nothing if pass is FALSE on entry. * - * Programmer: John Mainzer - * 1/12/05 + * Return: void + * + * Programmer: John Mainzer + * 4/27/06 * * Modifications: * + * None. + * *------------------------------------------------------------------------- */ static void -check_flush_cache__single_entry_test(H5C_t * cache_ptr, - int test_num, - int entry_type, - int entry_idx, - hbool_t insert_flag, - hbool_t dirty_flag, - unsigned int flags, - unsigned int flush_flags, - hbool_t expected_loaded, - hbool_t expected_cleared, - hbool_t expected_flushed, - hbool_t expected_destroyed) +check_rename_entry__run_test(H5C_t * cache_ptr, + int test_num, + struct rename_entry_test_spec * spec_ptr) { - /* const char * fcn_name = "check_flush_cache__single_entry_test"; */ + /* const char * fcn_name = "check_rename_entry__run_test"; */ static char msg[128]; - herr_t result; + unsigned int flags = H5C__NO_FLAGS_SET; test_entry_t * base_addr; test_entry_t * entry_ptr; + H5C_cache_entry_t * test_ptr = NULL; if ( cache_ptr == NULL ) { pass = FALSE; HDsnprintf(msg, (size_t)128, - "cache_ptr NULL on entry to single entry test #%d.", + "cache_ptr NULL on entry to rename test #%d.", test_num); failure_mssg = msg; - } - else if ( ( cache_ptr->index_len != 0 ) || - ( cache_ptr->index_size != 0 ) ) { + + } else if ( spec_ptr == NULL ) { pass = FALSE; HDsnprintf(msg, (size_t)128, - "cache not empty at beginning of single entry test #%d.", + "spec_ptr NULL on entry to rename test #%d.", test_num); failure_mssg = msg; + } - else if ( ( entry_type < 0 ) || ( entry_type >= NUMBER_OF_ENTRY_TYPES ) || - ( entry_idx < 0 ) || ( entry_idx > max_indices[entry_type] ) ) { - pass = FALSE; - HDsnprintf(msg, (size_t)128, - "Bad parameters on entry to single entry test #%d.", - test_num); - failure_mssg = msg; + if ( pass ) { + + base_addr = entries[spec_ptr->entry_type]; + entry_ptr = &(base_addr[spec_ptr->entry_index]); + + if ( ( entry_ptr->self != entry_ptr ) || + ( ( entry_ptr->cache_ptr != cache_ptr ) && + ( entry_ptr->cache_ptr != NULL ) ) || + ( ! ( entry_ptr->at_main_addr ) ) || + ( entry_ptr->addr != entry_ptr->main_addr ) ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "bad entry_ptr in rename test #%d.", + test_num); + failure_mssg = msg; + + } else if ( spec_ptr->is_pinned ) { + + flags |= H5C__PIN_ENTRY_FLAG; + } } + protect_entry(cache_ptr, spec_ptr->entry_type, spec_ptr->entry_index); + + unprotect_entry(cache_ptr, spec_ptr->entry_type, spec_ptr->entry_index, + (int)(spec_ptr->is_dirty), flags); + + rename_entry(cache_ptr, spec_ptr->entry_type, spec_ptr->entry_index, FALSE); + if ( pass ) { - base_addr = entries[entry_type]; - entry_ptr = &(base_addr[entry_idx]); + /* verify that the rename took place, and that the cache's internal + * structures are as expected. Note that some sanity checking is + * done by rename_entry(), so we don't have to repeat it here. + */ - if ( insert_flag ) { + if ( spec_ptr->is_pinned ) { - insert_entry(cache_ptr, entry_type, entry_idx, dirty_flag, flags); + if ( ! ( entry_ptr->header.is_pinned ) ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Pinned entry not pinned after rename in test #%d.", + test_num); + failure_mssg = msg; + } + + if ( pass ) { + + test_ptr = cache_ptr->pel_head_ptr; + + while ( ( test_ptr != NULL ) && + ( test_ptr != (H5C_cache_entry_t *)entry_ptr ) ) + { + test_ptr = test_ptr->next; + } + + if ( test_ptr == NULL ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Pinned entry not in pel after rename in test #%d.", + test_num); + failure_mssg = msg; + } + } + + unpin_entry(cache_ptr, spec_ptr->entry_type, spec_ptr->entry_index); } else { - protect_entry(cache_ptr, entry_type, entry_idx); + if ( entry_ptr->header.is_pinned ) { - unprotect_entry(cache_ptr, entry_type, entry_idx, - (int)dirty_flag, flags); + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Unpinned entry pinned after rename in test #%d.", + test_num); + failure_mssg = msg; + } + + if ( ( entry_ptr->header.prev != NULL ) || + ( cache_ptr->LRU_head_ptr != (H5C_cache_entry_t *)entry_ptr ) ) + { + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "Entry not at head of LRU after rename in test #%d.", + test_num); + failure_mssg = msg; + } } } - if ( pass ) { + /* put the entry back where it started from */ + rename_entry(cache_ptr, spec_ptr->entry_type, spec_ptr->entry_index, TRUE); - result = H5C_flush_cache(NULL, -1, -1, cache_ptr, flush_flags); + return; - if ( result < 0 ) { +} /* check_rename_entry__run_test() */ - pass = FALSE; - HDsnprintf(msg, (size_t)128, - "flush with flags 0x%x failed in single entry test #%d.", - flush_flags, test_num); - failure_mssg = msg; - } - else if ( ( entry_ptr->loaded != expected_loaded ) || - ( entry_ptr->cleared != expected_cleared ) || - ( entry_ptr->flushed != expected_flushed ) || - ( entry_ptr->destroyed != expected_destroyed ) ) { + +/*------------------------------------------------------------------------- + * Function: check_pin_protected_entry() + * + * Purpose: Verify that H5C_pin_protected_entry behaves as expected. + * + * Return: void + * + * Programmer: John Mainzer + * 4/28/06 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ - HDfprintf(stdout, - "loaded = %d(%d), clrd = %d(%d), flshd = %d(%d), dest = %d(%d)\n", - (int)(entry_ptr->loaded), - (int)expected_loaded, - (int)(entry_ptr->cleared), - (int)expected_cleared, - (int)(entry_ptr->flushed), - (int)expected_flushed, - (int)(entry_ptr->destroyed), - (int)expected_destroyed); +static void +check_pin_protected_entry(void) +{ + const char * fcn_name = "check_pin_protected_entry"; + static char msg[128]; + herr_t result; + H5C_t * cache_ptr = NULL; + test_entry_t * base_addr; + test_entry_t * entry_ptr; - pass = FALSE; - HDsnprintf(msg, (size_t)128, - "Unexpected entry status after flush in single entry test #%d.", - test_num); - failure_mssg = msg; - } - else if ( ( ( (flush_flags & H5C__FLUSH_INVALIDATE_FLAG) == 0 ) - && - ( ( cache_ptr->index_len != 1 ) - || - ( cache_ptr->index_size != entry_sizes[entry_type] ) - ) - ) - || - ( ( (flush_flags & H5C__FLUSH_INVALIDATE_FLAG) != 0 ) - && - ( ( cache_ptr->index_len != 0 ) - || - ( cache_ptr->index_size != 0 ) - ) - ) - ) { + TESTING("H5C_pin_protected_entry() functionality"); - pass = FALSE; - HDsnprintf(msg, (size_t)128, - "Unexpected cache len/size after flush in single entry test #%d.", - test_num); - failure_mssg = msg; - } + pass = TRUE; + + /* Create a cache, protect an entry, and then use H5C_pin_protected_entry() + * to pin it. Verify that the entry is in fact pined. Unprotect the entry + * to unpin it, and then destroy the cache. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024 * 1024), + (size_t)(1 * 1024 * 1024)); } + protect_entry(cache_ptr, 0, 0); - /* clean up the cache to prep for the next test */ if ( pass ) { - result = H5C_flush_cache(NULL, -1, -1, cache_ptr, - H5C__FLUSH_INVALIDATE_FLAG); + base_addr = entries[0]; + entry_ptr = &(base_addr[0]); - if ( result < 0 ) { + result = H5C_pin_protected_entry(cache_ptr, (void *)entry_ptr); + + if ( result < 0 ) { pass = FALSE; HDsnprintf(msg, (size_t)128, - "Flush failed on cleanup in single entry test #%d.", - test_num); + "H5C_pin_protected_entry() reports failure."); failure_mssg = msg; - } - else if ( ( cache_ptr->index_len != 0 ) || - ( cache_ptr->index_size != 0 ) ) { + + } else if ( ! ( entry_ptr->header.is_pinned ) ) { pass = FALSE; - HDsnprintf(msg, (size_t)128, - "Unexpected cache len/size after cleanup in single entry test #%d.", - test_num); + HDsnprintf(msg, (size_t)128, "entry not pinned when it should be."); failure_mssg = msg; - } else { + } else { - entry_ptr->loaded = FALSE; - entry_ptr->cleared = FALSE; - entry_ptr->flushed = FALSE; - entry_ptr->destroyed = FALSE; - } + entry_ptr->is_pinned = TRUE; + } } -} /* check_flush_cache__single_entry_test() */ + + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__UNPIN_ENTRY_FLAG); + + if ( pass ) { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) { + + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + } + + return; + +} /* check_pin_protected_entry() */ /*------------------------------------------------------------------------- @@ -4851,36 +7697,113 @@ check_flush_protected_err(void) protect_entry(cache_ptr, 0, 0); - if ( H5C_flush_cache(NULL, -1, -1, cache_ptr, H5C__NO_FLAGS_SET) - >= 0 ) { + if ( H5C_flush_cache(NULL, -1, -1, cache_ptr, H5C__NO_FLAGS_SET) + >= 0 ) { + + pass = FALSE; + failure_mssg = "flush succeeded on cache with protected entry.\n"; + + } else { + + unprotect_entry(cache_ptr, 0, 0, TRUE, H5C__NO_FLAGS_SET); + + if ( H5C_flush_cache(NULL, -1, -1, cache_ptr, H5C__NO_FLAGS_SET) + < 0 ) { + + pass = FALSE; + failure_mssg = "flush failed after unprotect.\n"; + + } else { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + } + } + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) { + + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + } + + return; + +} /* check_flush_protected_err() */ + + +/*------------------------------------------------------------------------- + * Function: check_destroy_pinned_err() + * + * Purpose: Verify that an attempt to destroy the cache when it contains + * a pinned entry that can't be unpined during the flush destroy + * will generate an error. + * + * Return: void + * + * Programmer: John Mainzer + * 4/7/06 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +check_destroy_pinned_err(void) +{ + const char * fcn_name = "check_destroy_pinned_err()"; + H5C_t * cache_ptr = NULL; + + TESTING("destroy cache with permanently pinned entry error"); + + pass = TRUE; + + /* allocate a cache, pin an entry, and try to flush destroy. This + * should fail. Unpin the entry and flush destroy again -- should + * succeed. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + protect_entry(cache_ptr, 0, 0); + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__PIN_ENTRY_FLAG); + + if ( H5C_dest(NULL, -1, -1, cache_ptr) >= 0 ) { pass = FALSE; - failure_mssg = "flush succeeded on cache with protected entry.\n"; + failure_mssg = "destroy succeeded on cache with pinned entry.\n"; } else { - unprotect_entry(cache_ptr, 0, 0, TRUE, H5C__NO_FLAGS_SET); + unpin_entry(cache_ptr, 0, 0); - if ( H5C_flush_cache(NULL, -1, -1, cache_ptr, H5C__NO_FLAGS_SET) - < 0 ) { + if ( H5C_dest(NULL, -1, -1, cache_ptr) < 0 ) { pass = FALSE; - failure_mssg = "flush failed after unprotect.\n"; - - } else { + failure_mssg = "destroy failed after unpin.\n"; - takedown_cache(cache_ptr, FALSE, FALSE); } } } if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } -} /* check_flush_protected_err() */ + return; + +} /* check_destroy_pinned_err() */ /*------------------------------------------------------------------------- @@ -4943,9 +7866,13 @@ check_destroy_protected_err(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* check_destroy_protected_err() */ @@ -5018,9 +7945,13 @@ check_duplicate_insert_err(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* check_duplicate_insert_err() */ @@ -5081,23 +8012,321 @@ check_rename_err(void) result = H5C_rename_entry(cache_ptr, &(types[0]), entry_0_0_ptr->addr, entry_0_1_ptr->addr); - if ( result >= 0 ) { + if ( result >= 0 ) { + + pass = FALSE; + failure_mssg = "rename to addr of same type succeeded.\n"; + } + } + + if ( pass ) { + + result = H5C_rename_entry(cache_ptr, &(types[0]), + entry_0_0_ptr->addr, entry_1_0_ptr->addr); + + if ( result >= 0 ) { + + pass = FALSE; + failure_mssg = "rename to addr of different type succeeded.\n"; + } + } + + if ( pass ) { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) { + + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + } + + return; + +} /* check_rename_err() */ + + +/*------------------------------------------------------------------------- + * Function: check_double_pin_err() + * + * Purpose: Verify that an attempt to pin an entry that is already + * pinned will generate an error. + * + * Return: void + * + * Programmer: John Mainzer + * 4/24/06 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static void +check_double_pin_err(void) +{ + const char * fcn_name = "check_double_pin_err()"; + herr_t result; + H5C_t * cache_ptr = NULL; + test_entry_t * entry_ptr; + + TESTING("pin a pinned entry error"); + + pass = TRUE; + + /* allocate a cache, protect an entry, unprotect it with the pin flag, + * protect it again, and then try to unprotect it again with the pin + * flag. This should fail. Unpin the entry and destroy the cache + * -- should succeed. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + protect_entry(cache_ptr, 0, 0); + + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__PIN_ENTRY_FLAG); + + protect_entry(cache_ptr, 0, 0); + + entry_ptr = &((entries[0])[0]); + } + + if ( pass ) { + + result = H5C_unprotect(NULL, -1, -1, cache_ptr, &(types[0]), + entry_ptr->addr, (void *)entry_ptr, + H5C__PIN_ENTRY_FLAG, 0); + + if ( result > 0 ) { + + pass = FALSE; + failure_mssg = + "attempt to pin a pinned entry succeeded.\n"; + + } else { + + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__UNPIN_ENTRY_FLAG); + } + } + + if ( pass ) { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) { + + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + } + + return; + +} /* check_double_pin_err() */ + + +/*------------------------------------------------------------------------- + * Function: check_double_unpin_err() + * + * Purpose: Verify that an attempt to unpin an unpinned entry will + * generate an error. + * + * Return: void + * + * Programmer: John Mainzer + * 4/24/06 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static void +check_double_unpin_err(void) +{ + const char * fcn_name = "check_double_unpin_err()"; + herr_t result; + H5C_t * cache_ptr = NULL; + test_entry_t * entry_ptr; + + TESTING("unpin an unpinned entry error"); + + pass = TRUE; + + /* allocate a cache, protect an entry, unprotect it with the unpin flag. + * -- This should fail. + * + * Try again with H5C_unpin_entry -- this should also fail. + * + * Destroy the cache -- should succeed. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + protect_entry(cache_ptr, 0, 0); + + entry_ptr = &((entries[0])[0]); + } + + if ( pass ) { + + result = H5C_unprotect(NULL, -1, -1, cache_ptr, &(types[0]), + entry_ptr->addr, (void *)entry_ptr, + H5C__UNPIN_ENTRY_FLAG, 0); + + if ( result > 0 ) { + + pass = FALSE; + failure_mssg = + "attempt to unpin an unpinned entry succeeded 1.\n"; + + } else { + + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET); + } + } + + if ( pass ) { + + result = H5C_unpin_entry(cache_ptr, (void *)entry_ptr); + + if ( result > 0 ) { + + pass = FALSE; + failure_mssg = + "attempt to unpin an unpinned entry succeeded 2.\n"; + + } + } + + if ( pass ) { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) { + + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + } + + return; + +} /* check_double_unpin_err() */ + + +/*------------------------------------------------------------------------- + * Function: check_pin_entry_errs() + * + * Purpose: Verify that invalid calls to H5C_pin_protected_entry() + * generate errors as expected. + * + * Return: void + * + * Programmer: John Mainzer + * 4/24/06 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static void +check_pin_entry_errs(void) +{ + const char * fcn_name = "check_pin_entry_errs()"; + herr_t result; + H5C_t * cache_ptr = NULL; + test_entry_t * entry_ptr; + + TESTING("pin entry related errors"); + + pass = TRUE; + + /* Allocate a cache, protect an entry, unprotect it with no flags, + * and then call H5C_pin_protected_entry() to pin it -- This should fail. + * + * Protect the entry again, unprotect it with a pin flag, protect it + * again, and then call H5C_pin_protected_entry() to pin it -- This + * should fail also. + * + * Unprotect the entry with the unpin flag. + * + * Destroy the cache -- should succeed. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + protect_entry(cache_ptr, 0, 0); + + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET); + + entry_ptr = &((entries[0])[0]); + } + + if ( pass ) { + + result = H5C_pin_protected_entry(cache_ptr, (void *)entry_ptr); + + if ( result > 0 ) { + + pass = FALSE; + failure_mssg = + "attempt to pin an unprotected entry succeeded.\n"; + + } else { + + protect_entry(cache_ptr, 0, 0); + + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__PIN_ENTRY_FLAG); - pass = FALSE; - failure_mssg = "rename to addr of same type succeeded.\n"; - } + protect_entry(cache_ptr, 0, 0); + } } if ( pass ) { - result = H5C_rename_entry(cache_ptr, &(types[0]), - entry_0_0_ptr->addr, entry_1_0_ptr->addr); + result = H5C_pin_protected_entry(cache_ptr, (void *)entry_ptr); - if ( result >= 0 ) { + if ( result > 0 ) { pass = FALSE; - failure_mssg = "rename to addr of different type succeeded.\n"; - } + failure_mssg = + "attempt to pin a pinned, protected entry succeeded.\n"; + + } else { + + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__UNPIN_ENTRY_FLAG); + + } } if ( pass ) { @@ -5107,11 +8336,15 @@ check_rename_err(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } -} /* check_rename_err() */ + return; + +} /* check_pin_entry_errs() */ /*------------------------------------------------------------------------- @@ -5183,9 +8416,13 @@ check_double_protect_err(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* check_double_protect_err() */ @@ -5266,14 +8503,128 @@ check_double_unprotect_err(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* check_double_unprotect_err() */ /*------------------------------------------------------------------------- + * Function: check_mark_pinned_entry_dirty_errs() + * + * Purpose: Verify that: + * + * 1) a call to H5C_mark_pinned_entry_dirty with an upinned + * entry as the target will generate an error. + * + * 2) a call to H5C_mark_pinned_entry_dirty with a protected + * entry as the target will generate an error. + * + * Return: void + * + * Programmer: John Mainzer + * 4/25/06 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static void +check_mark_pinned_entry_dirty_errs(void) +{ + const char * fcn_name = "check_mark_pinned_entry_dirty_errs()"; + herr_t result; + H5C_t * cache_ptr = NULL; + test_entry_t * entry_ptr; + + TESTING("mark pinned entry dirty related errors"); + + pass = TRUE; + + /* allocate a cache, protect an entry, and then attempt to mark it dirty + * with the H5C_mark_pinned_entry_dirty() call -- This should fail. + * + * Then unprotect the entry without pinning it, and try to mark it dirty + * again -- this should fail too. + * + * Destroy the cache -- should succeed. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + protect_entry(cache_ptr, 0, 0); + + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__PIN_ENTRY_FLAG); + + protect_entry(cache_ptr, 0, 0); + + entry_ptr = &((entries[0])[0]); + } + + if ( pass ) { + + result = H5C_mark_pinned_entry_dirty(cache_ptr, (void *)entry_ptr, + FALSE, 0); + + if ( result > 0 ) { + + pass = FALSE; + failure_mssg = + "attempt dirty a pinned and protected entry succeeded.\n"; + + } else { + + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__UNPIN_ENTRY_FLAG); + } + } + + if ( pass ) { + + result = H5C_mark_pinned_entry_dirty(cache_ptr, (void *)entry_ptr, + FALSE, 0); + + + if ( result > 0 ) { + + pass = FALSE; + failure_mssg = + "attempt dirty a unpinned and unprotected entry succeeded.\n"; + + } + } + + if ( pass ) { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) { + + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + } + + return; + +} /* check_mark_pinned_entry_dirty_errs() */ + + +/*------------------------------------------------------------------------- * Function: check_auto_cache_resize() * * Purpose: Exercise the automatic cache resizing functionality. @@ -8471,9 +11822,13 @@ check_auto_cache_resize(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* check_auto_cache_resize() */ @@ -10983,9 +14338,13 @@ check_auto_cache_resize_disable(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* check_auto_cache_resize_disable() */ @@ -11662,9 +15021,13 @@ check_auto_cache_resize_epoch_markers(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* check_auto_cache_resize_epoch_markers() */ @@ -13634,9 +16997,13 @@ check_auto_cache_resize_input_errs(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* check_auto_cache_resize_input_errs() */ @@ -14207,14 +17574,205 @@ check_auto_cache_resize_aux_fcns(void) if ( pass ) { PASSED(); } else { H5_FAILED(); } - if ( ! pass ) + if ( ! pass ) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", fcn_name, failure_mssg); + } + + return; } /* check_auto_cache_resize_aux_fcns() */ /*------------------------------------------------------------------------- + * Function: check_get_entry_status() + * + * Purpose: Verify that H5AC_get_entry_status() behaves as expected. + * + * Return: void + * + * Programmer: John Mainzer + * 4/28/06 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +check_get_entry_status(void) +{ + const char * fcn_name = "check_get_entry_status"; + static char msg[128]; + herr_t result; + unsigned int status; + H5C_t * cache_ptr = NULL; + test_entry_t * base_addr; + test_entry_t * entry_ptr; + + TESTING("H5AC_check_get_entry_status() functionality"); + + pass = TRUE; + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024 * 1024), + (size_t)(1 * 1024 * 1024)); + + base_addr = entries[0]; + entry_ptr = &(base_addr[0]); + } + + if ( pass ) { + + result = H5AC_get_entry_status(cache_ptr, entry_ptr->addr, &status); + + if ( result < 0 ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "H5AC_get_entry_status() reports failure 1."); + failure_mssg = msg; + + } else if ( status != 0 ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, "Unexpected status 1."); + failure_mssg = msg; + } + } + + protect_entry(cache_ptr, 0, 0); + + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET); + + if ( pass ) { + + result = H5AC_get_entry_status(cache_ptr, entry_ptr->addr, &status); + + if ( result < 0 ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "H5AC_get_entry_status() reports failure 2."); + failure_mssg = msg; + + } else if ( status != H5AC_ES__IN_CACHE ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, "Unexpected status 2."); + failure_mssg = msg; + } + } + + protect_entry(cache_ptr, 0, 0); + + if ( pass ) { + + result = H5AC_get_entry_status(cache_ptr, entry_ptr->addr, &status); + + if ( result < 0 ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "H5AC_get_entry_status() reports failure 3."); + failure_mssg = msg; + + } else if ( status != (H5AC_ES__IN_CACHE | H5AC_ES__IS_PROTECTED) ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, "Unexpected status 3."); + failure_mssg = msg; + } + } + + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__PIN_ENTRY_FLAG); + + if ( pass ) { + + result = H5AC_get_entry_status(cache_ptr, entry_ptr->addr, &status); + + if ( result < 0 ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "H5AC_get_entry_status() reports failure 4."); + failure_mssg = msg; + + } else if ( status != (H5AC_ES__IN_CACHE | H5AC_ES__IS_PINNED) ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, "Unexpected status 4."); + failure_mssg = msg; + } + } + + mark_pinned_entry_dirty(cache_ptr, 0, 0, FALSE, 0); + + if ( pass ) { + + result = H5AC_get_entry_status(cache_ptr, entry_ptr->addr, &status); + + if ( result < 0 ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "H5AC_get_entry_status() reports failure 5."); + failure_mssg = msg; + + } else if ( status != (H5AC_ES__IN_CACHE | + H5AC_ES__IS_PINNED | + H5AC_ES__IS_DIRTY) ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, "Unexpected status 5."); + failure_mssg = msg; + } + } + + unpin_entry(cache_ptr, 0, 0); + + if ( pass ) { + + result = H5AC_get_entry_status(cache_ptr, entry_ptr->addr, &status); + + if ( result < 0 ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, + "H5AC_get_entry_status() reports failure 6."); + failure_mssg = msg; + + } else if ( status != (H5AC_ES__IN_CACHE | H5AC_ES__IS_DIRTY) ) { + + pass = FALSE; + HDsnprintf(msg, (size_t)128, "Unexpected status 6."); + failure_mssg = msg; + } + } + + if ( pass ) { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) { + + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + } + + return; + +} /* check_get_entry_status() */ + + +/*------------------------------------------------------------------------- * Function: main * * Purpose: Run tests on the cache code contained in H5C.c @@ -14257,17 +17815,25 @@ main(void) #if 1 write_permitted_check(); check_flush_cache(); + check_rename_entry(); + check_pin_protected_entry(); check_flush_protected_err(); + check_destroy_pinned_err(); check_destroy_protected_err(); check_duplicate_insert_err(); check_rename_err(); + check_double_pin_err(); + check_double_unpin_err(); + check_pin_entry_errs(); check_double_protect_err(); check_double_unprotect_err(); + check_mark_pinned_entry_dirty_errs(); check_auto_cache_resize(); check_auto_cache_resize_disable(); check_auto_cache_resize_epoch_markers(); check_auto_cache_resize_input_errs(); check_auto_cache_resize_aux_fcns(); + check_get_entry_status(); #endif return(0); diff --git a/test/cache_common.c b/test/cache_common.c index c87fb7f..55276d0 100644 --- a/test/cache_common.c +++ b/test/cache_common.c @@ -505,6 +505,11 @@ monster_clear(H5F_t * f, void * thing, hbool_t dest) * * Modifications: * + * JRM -- 4/4/06 + * Added code to decrement the pinning_ref_count s of entries + * pinned by the target entry, and to unpin those entries + * if the reference count drops to zero. + * *------------------------------------------------------------------------- */ @@ -512,18 +517,23 @@ herr_t destroy(H5F_t UNUSED * f, void * thing) { + int i; test_entry_t * entry_ptr; test_entry_t * base_addr; + test_entry_t * pinned_entry_ptr; + test_entry_t * pinned_base_addr; HDassert( thing ); entry_ptr = (test_entry_t *)thing; base_addr = entries[entry_ptr->type]; - HDassert ( entry_ptr->index >= 0 ); - HDassert ( entry_ptr->index <= max_indices[entry_ptr->type] ); + HDassert( entry_ptr->index >= 0 ); + HDassert( entry_ptr->index <= max_indices[entry_ptr->type] ); HDassert( entry_ptr == &(base_addr[entry_ptr->index]) ); HDassert( entry_ptr == entry_ptr->self ); + HDassert( entry_ptr->cache_ptr != NULL ); + HDassert( entry_ptr->cache_ptr->magic == H5C__H5C_T_MAGIC ); HDassert( entry_ptr->header.addr == entry_ptr->addr ); HDassert( entry_ptr->header.size == entry_ptr->size ); HDassert( entry_ptr->size == entry_sizes[entry_ptr->type] ); @@ -531,7 +541,42 @@ destroy(H5F_t UNUSED * f, HDassert( !(entry_ptr->is_dirty) ); HDassert( !(entry_ptr->header.is_dirty) ); + if ( entry_ptr->num_pins > 0 ) { + + for ( i = 0; i < entry_ptr->num_pins; i++ ) + { + pinned_base_addr = entries[entry_ptr->pin_type[i]]; + pinned_entry_ptr = &(pinned_base_addr[entry_ptr->pin_idx[i]]); + + HDassert( 0 <= pinned_entry_ptr->type ); + HDassert( pinned_entry_ptr->type < NUMBER_OF_ENTRY_TYPES ); + HDassert( pinned_entry_ptr->type == entry_ptr->pin_type[i] ); + HDassert( pinned_entry_ptr->index >= 0 ); + HDassert( pinned_entry_ptr->index <= + max_indices[pinned_entry_ptr->type] ); + HDassert( pinned_entry_ptr->index == entry_ptr->pin_idx[i] ); + HDassert( pinned_entry_ptr == pinned_entry_ptr->self ); + HDassert( pinned_entry_ptr->header.is_pinned ); + HDassert( pinned_entry_ptr->is_pinned ); + HDassert( pinned_entry_ptr->pinning_ref_count > 0 ); + + pinned_entry_ptr->pinning_ref_count--; + + if ( pinned_entry_ptr->pinning_ref_count <= 0 ) { + + unpin_entry(pinned_entry_ptr->cache_ptr, + pinned_entry_ptr->type, + pinned_entry_ptr->index); + } + + entry_ptr->pin_type[i] = -1; + entry_ptr->pin_idx[i] = -1; + } + entry_ptr->num_pins = 0; + } + entry_ptr->destroyed = TRUE; + entry_ptr->cache_ptr = NULL; return(SUCCEED); @@ -966,6 +1011,88 @@ monster_size(H5F_t * f, void * thing, size_t * size_ptr) /**************************************************************************/ /*------------------------------------------------------------------------- + * Function: create_pinned_entry_dependency + * + * Purpose: Do noting if pass is FALSE on entry. + * + * Otherwise, set up a pinned entry dependency so we can + * test the pinned entry modifications to the flush routine. + * + * Given the types and indicies of the pinned and pinning + * entries, add the pinned entry to the list of pinned + * entries in the pinning entry, increment the + * pinning reference count of the pinned entry, and + * if that count was zero initially, pin the entry. + * + * Return: void + * + * Programmer: John Mainzer + * 6/10/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +void +create_pinned_entry_dependency(H5C_t * cache_ptr, + int pinning_type, + int pinning_idx, + int pinned_type, + int pinned_idx) +{ + test_entry_t * pinning_base_addr; + test_entry_t * pinning_entry_ptr; + test_entry_t * pinned_base_addr; + test_entry_t * pinned_entry_ptr; + + if ( pass ) { + + HDassert( ( 0 <= pinning_type ) && + ( pinning_type < NUMBER_OF_ENTRY_TYPES ) ); + HDassert( ( 0 <= pinning_idx ) && + ( pinning_idx <= max_indices[pinning_type] ) ); + HDassert( ( 0 <= pinned_type ) && + ( pinned_type < NUMBER_OF_ENTRY_TYPES ) ); + HDassert( ( 0 <= pinned_idx ) && + ( pinned_idx <= max_indices[pinned_type] ) ); + + pinning_base_addr = entries[pinning_type]; + pinning_entry_ptr = &(pinning_base_addr[pinning_idx]); + + pinned_base_addr = entries[pinned_type]; + pinned_entry_ptr = &(pinned_base_addr[pinned_idx]); + + HDassert( pinning_entry_ptr->index == pinning_idx ); + HDassert( pinning_entry_ptr->type == pinning_type ); + HDassert( pinning_entry_ptr == pinning_entry_ptr->self ); + HDassert( pinning_entry_ptr->num_pins < MAX_PINS ); + + HDassert( pinning_entry_ptr->index == pinning_idx ); + HDassert( pinning_entry_ptr->type == pinning_type ); + HDassert( pinning_entry_ptr == pinning_entry_ptr->self ); + HDassert( ! ( pinning_entry_ptr->is_protected ) ); + + pinning_entry_ptr->pin_type[pinning_entry_ptr->num_pins] = pinned_type; + pinning_entry_ptr->pin_idx[pinning_entry_ptr->num_pins] = pinned_idx; + (pinning_entry_ptr->num_pins)++; + + if ( pinned_entry_ptr->pinning_ref_count == 0 ) { + + protect_entry(cache_ptr, pinned_type, pinned_idx); + unprotect_entry(cache_ptr, pinned_type, pinned_idx, FALSE, + H5C__PIN_ENTRY_FLAG); + } + + (pinned_entry_ptr->pinning_ref_count)++; + } + + return; + +} /* create_pinned_entry_dependency() */ + + +/*------------------------------------------------------------------------- * Function: entry_in_cache * * Purpose: Given a pointer to a cache, an entry type, and an index, @@ -1032,6 +1159,10 @@ entry_in_cache(H5C_t * cache_ptr, * * Modifications: * + * JRM -- 3/31/06 + * Added initialization for new pinned entry test related + * fields. + * *------------------------------------------------------------------------- */ @@ -1041,6 +1172,7 @@ reset_entries(void) { int i; int j; + int k; int32_t max_index; haddr_t addr = 0; haddr_t alt_addr = PICO_ALT_BASE_ADDR; @@ -1074,6 +1206,7 @@ reset_entries(void) base_addr[j].header.aux_prev = NULL; base_addr[j].self = &(base_addr[j]); + base_addr[j].cache_ptr = NULL; base_addr[j].addr = addr; base_addr[j].at_main_addr = TRUE; base_addr[j].main_addr = addr; @@ -1086,6 +1219,15 @@ reset_entries(void) base_addr[j].is_dirty = FALSE; base_addr[j].is_protected = FALSE; + base_addr[j].is_pinned = FALSE; + base_addr[j].pinning_ref_count = 0; + base_addr[j].num_pins = 0; + for ( k = 0; k < MAX_PINS; k++ ) + { + base_addr[j].pin_type[k] = -1; + base_addr[j].pin_idx[k] = -1; + } + base_addr[j].loaded = FALSE; base_addr[j].cleared = FALSE; base_addr[j].flushed = FALSE; @@ -1391,6 +1533,10 @@ flush_cache(H5C_t * cache_ptr, * The interface no longer permits clean inserts. * Accordingly, the dirty parameter is no longer meaningfull. * + * JRM -- 4/5/06 + * Added code to initialize the new cache_ptr field of the + * test_entry_t structure. + * *------------------------------------------------------------------------- */ @@ -1439,7 +1585,8 @@ insert_entry(H5C_t * cache_ptr, HDfprintf(stdout, "result = %d\n", (int)result); HDfprintf(stdout, "entry_ptr->header.is_protected = %d\n", (int)(entry_ptr->header.is_protected)); - HDfprintf(stdout, "entry_ptr->header.type != &(types[type]) = %d\n", + HDfprintf(stdout, + "entry_ptr->header.type != &(types[type]) = %d\n", (int)(entry_ptr->header.type != &(types[type]))); HDfprintf(stdout, "entry_ptr->size != entry_ptr->header.size = %d\n", @@ -1449,6 +1596,9 @@ insert_entry(H5C_t * cache_ptr, (int)(entry_ptr->addr != entry_ptr->header.addr)); #endif } + HDassert( entry_ptr->cache_ptr == NULL ); + + entry_ptr->cache_ptr = cache_ptr; HDassert( entry_ptr->header.is_dirty ); HDassert( ((entry_ptr->header).type)->id == type ); @@ -1460,6 +1610,82 @@ insert_entry(H5C_t * cache_ptr, /*------------------------------------------------------------------------- + * Function: mark_pinned_entry_dirty() + * + * Purpose: Mark the specified entry as dirty. + * + * Do nothing if pass is FALSE on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 3/28/06 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +void +mark_pinned_entry_dirty(H5C_t * cache_ptr, + int32_t type, + int32_t idx, + hbool_t size_changed, + size_t new_size) +{ + /* const char * fcn_name = "mark_pinned_entry_dirty()"; */ + herr_t result; + test_entry_t * base_addr; + test_entry_t * entry_ptr; + + if ( pass ) { + + HDassert( cache_ptr ); + HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) ); + HDassert( ( 0 <= idx ) && ( idx <= max_indices[type] ) ); + + base_addr = entries[type]; + entry_ptr = &(base_addr[idx]); + + HDassert( entry_ptr->index == idx ); + HDassert( entry_ptr->type == type ); + HDassert( entry_ptr == entry_ptr->self ); + HDassert( entry_ptr->cache_ptr == cache_ptr ); + HDassert( ! (entry_ptr->header.is_protected) ); + HDassert( entry_ptr->header.is_pinned ); + HDassert( entry_ptr->is_pinned ); + + entry_ptr->is_dirty = TRUE; + + result = H5C_mark_pinned_entry_dirty(cache_ptr, + (void *)entry_ptr, + size_changed, + new_size); + + if ( ( result < 0 ) || + ( ! (entry_ptr->header.is_dirty) ) || + ( ! (entry_ptr->header.is_pinned) ) || + ( entry_ptr->header.type != &(types[type]) ) || + ( entry_ptr->size != entry_ptr->header.size ) || + ( entry_ptr->addr != entry_ptr->header.addr ) ) { + + pass = FALSE; + failure_mssg = "error in H5C_mark_pinned_entry_dirty()."; + + } + + HDassert( ((entry_ptr->header).type)->id == type ); + + } + + return; + +} /* mark_pinned_entry_dirty() */ + + +/*------------------------------------------------------------------------- * Function: rename_entry() * * Purpose: Rename the entry indicated by the type and index to its @@ -1503,9 +1729,11 @@ rename_entry(H5C_t * cache_ptr, HDassert( entry_ptr->index == idx ); HDassert( entry_ptr->type == type ); HDassert( entry_ptr == entry_ptr->self ); + HDassert( entry_ptr->cache_ptr == cache_ptr ); HDassert( !(entry_ptr->is_protected) ); HDassert( !(entry_ptr->header.is_protected) ); + if ( entry_ptr->at_main_addr && !main_addr ) { /* rename to alt addr */ @@ -1635,7 +1863,11 @@ protect_entry(H5C_t * cache_ptr, failure_mssg = "error in H5C_protect()."; } else { + + HDassert( ( entry_ptr->cache_ptr == NULL ) || + ( entry_ptr->cache_ptr == cache_ptr ) ); + entry_ptr->cache_ptr = cache_ptr; entry_ptr->is_protected = TRUE; } @@ -1649,6 +1881,76 @@ protect_entry(H5C_t * cache_ptr, /*------------------------------------------------------------------------- + * Function: unpin_entry() + * + * Purpose: Unpin the entry indicated by the type and index. + * + * Do nothing if pass is FALSE on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 3/28/06 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +void +unpin_entry(H5C_t * cache_ptr, + int32_t type, + int32_t idx) +{ + /* const char * fcn_name = "unpin_entry()"; */ + herr_t result; + test_entry_t * base_addr; + test_entry_t * entry_ptr; + + if ( pass ) { + + HDassert( cache_ptr ); + HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) ); + HDassert( ( 0 <= idx ) && ( idx <= max_indices[type] ) ); + + base_addr = entries[type]; + entry_ptr = &(base_addr[idx]); + + HDassert( entry_ptr->index == idx ); + HDassert( entry_ptr->type == type ); + HDassert( entry_ptr == entry_ptr->self ); + HDassert( entry_ptr->cache_ptr == cache_ptr ); + HDassert( ! (entry_ptr->header.is_protected) ); + HDassert( entry_ptr->header.is_pinned ); + HDassert( entry_ptr->is_pinned ); + + result = H5C_unpin_entry(cache_ptr, (void *)entry_ptr); + + if ( ( result < 0 ) || + ( entry_ptr->header.is_pinned ) || + ( entry_ptr->header.type != &(types[type]) ) || + ( entry_ptr->size != entry_ptr->header.size ) || + ( entry_ptr->addr != entry_ptr->header.addr ) ) { + + pass = FALSE; + failure_mssg = "error in H5C_unpin()."; + + } + + entry_ptr->is_pinned = FALSE; + + HDassert( ((entry_ptr->header).type)->id == type ); + + } + + return; + +} /* unpin_entry() */ + + +/*------------------------------------------------------------------------- * Function: unprotect_entry() * * Purpose: Unprotect the entry indicated by the type and index. @@ -1674,6 +1976,9 @@ protect_entry(H5C_t * cache_ptr, * Update for new entry size parameter in H5C_unprotect(). * We don't use them here for now. * + * JRM -- 3/31/06 + * Update for pinned entries. + * *------------------------------------------------------------------------- */ @@ -1686,6 +1991,8 @@ unprotect_entry(H5C_t * cache_ptr, { /* const char * fcn_name = "unprotect_entry()"; */ herr_t result; + hbool_t pin_flag_set; + hbool_t unpin_flag_set; test_entry_t * base_addr; test_entry_t * entry_ptr; @@ -1701,9 +2008,17 @@ unprotect_entry(H5C_t * cache_ptr, HDassert( entry_ptr->index == idx ); HDassert( entry_ptr->type == type ); HDassert( entry_ptr == entry_ptr->self ); + HDassert( entry_ptr->cache_ptr == cache_ptr ); HDassert( entry_ptr->header.is_protected ); HDassert( entry_ptr->is_protected ); + pin_flag_set = ((flags & H5C__PIN_ENTRY_FLAG) != 0 ); + unpin_flag_set = ((flags & H5C__UNPIN_ENTRY_FLAG) != 0 ); + + HDassert ( ! ( pin_flag_set && unpin_flag_set ) ); + HDassert ( ( ! pin_flag_set ) || ( ! (entry_ptr->is_pinned) ) ); + HDassert ( ( ! unpin_flag_set ) || ( entry_ptr->is_pinned ) ); + if ( ( dirty == TRUE ) || ( dirty == FALSE ) ) { flags |= (dirty ? H5AC__DIRTIED_FLAG : H5AC__NO_FLAGS_SET); @@ -1727,6 +2042,18 @@ unprotect_entry(H5C_t * cache_ptr, else { entry_ptr->is_protected = FALSE; + + if ( pin_flag_set ) { + + HDassert ( entry_ptr->header.is_pinned ); + entry_ptr->is_pinned = TRUE; + + } else if ( unpin_flag_set ) { + + HDassert ( ! ( entry_ptr->header.is_pinned ) ); + entry_ptr->is_pinned = FALSE; + + } } HDassert( ((entry_ptr->header).type)->id == type ); diff --git a/test/cache_common.h b/test/cache_common.h index acacb30..ac58b1a 100644 --- a/test/cache_common.h +++ b/test/cache_common.h @@ -105,6 +105,9 @@ #define MONSTER_ALT_BASE_ADDR (haddr_t)(HUGE_ALT_BASE_ADDR + \ (HUGE_ENTRY_SIZE * NUM_HUGE_ENTRIES)) +#define MAX_PINS 8 /* Maximum number of entries that can be + * directly pinned by a single entry. + */ typedef struct test_entry_t { H5C_cache_entry_t header; /* entry data used by the cache @@ -113,6 +116,10 @@ typedef struct test_entry_t struct test_entry_t * self; /* pointer to this entry -- used for * sanity checking. */ + H5C_t * cache_ptr; /* pointer to the cache in which + * the entry resides, or NULL if the + * entry is not in cache. + */ haddr_t addr; /* where the cache thinks this entry * is located */ @@ -146,6 +153,25 @@ typedef struct test_entry_t hbool_t is_protected; /* entry should currently be on * the cache's protected list. */ + hbool_t is_pinned; /* entry is currently pinned in + * the cache. + */ + int pinning_ref_count; /* Number of entries that + * pin this entry in the cache. + * When this count drops to zero, + * this entry should be unpinned. + */ + int num_pins; /* Number of entries that this + * entry pins in the cache. This + * value must be in the range + * [0, MAX_PINS]. + */ + int pin_type[MAX_PINS]; /* array of the types of entries + * pinned by this entry. + */ + int pin_idx[MAX_PINS]; /* array of the indicies of + * entries pinned by this entry. + */ hbool_t loaded; /* entry has been loaded since the * last time it was reset. */ @@ -247,6 +273,33 @@ struct flush_cache_test_spec hbool_t expected_destroyed; }; +struct pe_flush_cache_test_spec +{ + int entry_num; + int entry_type; + int entry_index; + hbool_t insert_flag; + hbool_t dirty_flag; + unsigned int flags; + int num_pins; + int pin_type[MAX_PINS]; + int pin_idx[MAX_PINS]; + hbool_t expected_loaded; + hbool_t expected_cleared; + hbool_t expected_flushed; + hbool_t expected_destroyed; +}; + +struct rename_entry_test_spec +{ + int entry_type; + int entry_index; + hbool_t is_dirty; + hbool_t is_pinned; +}; + + + /* global variable externs: */ @@ -376,6 +429,12 @@ void insert_entry(H5C_t * cache_ptr, hbool_t dirty, unsigned int flags); +void mark_pinned_entry_dirty(H5C_t * cache_ptr, + int32_t type, + int32_t idx, + hbool_t size_changed, + size_t new_size); + void rename_entry(H5C_t * cache_ptr, int32_t type, int32_t idx, @@ -389,6 +448,12 @@ hbool_t entry_in_cache(H5C_t * cache_ptr, int32_t type, int32_t idx); +void create_pinned_entry_dependency(H5C_t * cache_ptr, + int pinning_type, + int pinning_idx, + int pinned_type, + int pinned_idx); + void reset_entries(void); H5C_t * setup_cache(size_t max_cache_size, size_t min_clean_size); @@ -488,6 +553,10 @@ void flush_cache(H5C_t * cache_ptr, hbool_t dump_stats, hbool_t dump_detailed_stats); +void unpin_entry(H5C_t * cache_ptr, + int32_t type, + int32_t idx); + void unprotect_entry(H5C_t * cache_ptr, int32_t type, int32_t idx, diff --git a/testpar/t_cache.c b/testpar/t_cache.c index 95b3e4b..f80f4b3 100644 --- a/testpar/t_cache.c +++ b/testpar/t_cache.c @@ -26,6 +26,10 @@ #include "H5Cpkg.h" +#define H5AC_PACKAGE /*suppress error about including H5Cpkg */ + +#include "H5ACpkg.h" + #define H5F_PACKAGE /*suppress error about including H5Fpkg */ #include "H5Fpkg.h" @@ -57,6 +61,21 @@ int file_mpi_rank = -1; MPI_Comm file_mpi_comm = MPI_COMM_NULL; +/* the following globals are used to maintain rudementary statistics + * to check the validity of the statistics maintained by H5C.c + */ + +long datum_clears = 0; +long datum_pinned_clears = 0; +long datum_destroys = 0; +long datum_flushes = 0; +long datum_pinned_flushes = 0; +long datum_loads = 0; +long global_pins = 0; +long global_dirty_pins = 0; +long local_pins = 0; + + /***************************************************************************** * struct datum * @@ -91,6 +110,17 @@ MPI_Comm file_mpi_comm = MPI_COMM_NULL; * locked: Boolean flag that is set to true iff the entry is in * the cache and locked. * + * global_pinned: Boolean flag that is set to true iff the entry has + * been pinned collectively in all caches. Since writes must + * be collective across all processes, only entries pinned + * in this fashion may be marked dirty. + * + * local_pinned: Boolean flag that is set to true iff the entry + * has been pinned in the local cache, but probably not all + * caches. Such pins will typically not be consistant across + * processes, and thus cannot be marked as dirty unless they + * happen to overlap some collective operation. + * * index: Index of this instance of datum in the data_index[] array * discussed below. * @@ -105,6 +135,8 @@ struct datum hbool_t dirty; hbool_t valid; hbool_t locked; + hbool_t global_pinned; + hbool_t local_pinned; int index; }; @@ -124,15 +156,44 @@ struct datum *****************************************************************************/ #ifndef H5_HAVE_MPE + +#if 1 /* JRM */ #define NUM_DATA_ENTRIES 100000 -#else - /* Use a smaller test size to avoid creating huge MPE logfiles. */ +#else /* JRM */ +#define NUM_DATA_ENTRIES 10000 +#endif /* JRM */ + +#else /* H5_HAVE_MPE */ + +/* Use a smaller test size to avoid creating huge MPE logfiles. */ #define NUM_DATA_ENTRIES 1000 -#endif + +#endif /* H5_HAVE_MPE */ struct datum data[NUM_DATA_ENTRIES]; +/* Many tests use the size of data array as the size of test loops. + * On some machines, this results in unacceptably long test runs. + * + * To deal with this issue, I have introduced the virt_num_data_entries + * global, which can be set to a lower value to throtle the length of + * tests. + * + * Note that this value must always be divisible by 40, and must be an + * even divisor of NUM_DATA_ENTRIES. So far, all tests have been with + * powers of 10 that meet these criteria. + * + * Further, this value must be consistant across all processes. + */ + +#define STD_VIRT_NUM_DATA_ENTRIES NUM_DATA_ENTRIES +#define EXPRESS_VIRT_NUM_DATA_ENTRIES (NUM_DATA_ENTRIES / 10) +#define MPE_VIRT_NUM_DATA_ENTIES (NUM_DATA_ENTRIES / 100) + +int virt_num_data_entries = NUM_DATA_ENTRIES; + + /***************************************************************************** * data_index array * @@ -202,6 +263,11 @@ MPI_Datatype mpi_mssg_t; /* for MPI derived type created from mssg */ /************************** function declarations ****************************/ /*****************************************************************************/ +/* stats functions */ + +void print_stats(void); +void reset_stats(void); + /* MPI setup functions */ hbool_t set_up_file_communicator(void); @@ -213,9 +279,14 @@ int addr_to_datum_index(haddr_t base_addr); void init_data(void); -/* mssg xfer related functions */ +/* test coodination related functions */ +int do_express_test(void); int get_max_nerrors(void); + + +/* mssg xfer related functions */ + hbool_t recv_mssg(struct mssg_t *mssg_ptr); hbool_t send_mssg(struct mssg_t *mssg_ptr); hbool_t setup_derived_types(void); @@ -260,12 +331,25 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] = void insert_entry(H5C_t * cache_ptr, H5F_t * file_ptr, int32_t idx, unsigned int flags); +void local_pin_and_unpin_random_entries(H5C_t * cache_ptr, H5F_t * file_ptr, + int min_idx, int max_idx, + int min_count, int max_count); +void local_pin_random_entry(H5C_t * cache_ptr, H5F_t * file_ptr, + int min_idx, int max_idx); +void local_unpin_all_entries(H5C_t * cache_ptr, H5F_t * file_ptr, + hbool_t via_unprotect); +int local_unpin_next_pinned_entry(H5C_t * cache_ptr, H5F_t * file_ptr, + int start_idx, hbool_t via_unprotect); void lock_and_unlock_random_entries(H5C_t * cache_ptr, H5F_t * file_ptr, int min_idx, int max_idx, int min_count, int max_count); void lock_and_unlock_random_entry(H5C_t * cache_ptr, H5F_t * file_ptr, int min_idx, int max_idx); void lock_entry(H5C_t * cache_ptr, H5F_t * file_ptr, int32_t idx); +void mark_pinned_entry_dirty(H5C_t * cache_ptr, int32_t idx, + hbool_t size_changed, size_t new_size); +void pin_entry(H5C_t * cache_ptr, H5F_t * file_ptr, int32_t idx, + hbool_t global, hbool_t dirty); void rename_entry(H5C_t * cache_ptr, H5F_t * file_ptr, int32_t old_idx, int32_t new_idx); hbool_t setup_cache_for_test(hid_t * fid_ptr, H5F_t ** file_ptr_ptr, @@ -274,6 +358,8 @@ void setup_rand(void); hbool_t take_down_cache(hid_t fid); void unlock_entry(H5C_t * cache_ptr, H5F_t * file_ptr, int32_t type, unsigned int flags); +void unpin_entry(H5C_t * cache_ptr, H5F_t * file_ptr, int32_t idx, + hbool_t global, hbool_t dirty, hbool_t via_unprotect); /* test functions */ @@ -286,6 +372,83 @@ hbool_t smoke_check_4(void); /*****************************************************************************/ +/****************************** stats functions ******************************/ +/*****************************************************************************/ + +/***************************************************************************** + * + * Function: print_stats() + * + * Purpose: Print the rudementary stats maintained by t_cache. + * + * This is a debugging function, which will not normally + * be run as part of t_cache. + * + * Return: void + * + * Programmer: JRM -- 4/17/06 + * + * Modifications: + * + * None. + * + *****************************************************************************/ + +void +print_stats(void) +{ + HDfprintf(stdout, + "%d: datum clears / pinned clears / destroys = %ld / %ld / %ld\n", + world_mpi_rank, datum_clears, datum_pinned_clears, + datum_destroys ); + HDfprintf(stdout, + "%d: datum flushes / pinned flushes / loads = %ld / %ld / %ld\n", + world_mpi_rank, datum_flushes, datum_pinned_flushes, + datum_loads ); + HDfprintf(stdout, + "%d: pins: global / global dirty / local = %ld / %ld / %ld\n", + world_mpi_rank, global_pins, global_dirty_pins, local_pins); + HDfflush(stdout); + + return; + +} /* print_stats() */ + +/***************************************************************************** + * + * Function: reset_stats() + * + * Purpose: Reset the rudementary stats maintained by t_cache. + * + * Return: void + * + * Programmer: JRM -- 4/17/06 + * + * Modifications: + * + * None. + * + *****************************************************************************/ + +void +reset_stats(void) +{ + datum_clears = 0; + datum_pinned_clears = 0; + datum_destroys = 0; + datum_flushes = 0; + datum_pinned_flushes = 0; + datum_loads = 0; + global_pins = 0; + global_dirty_pins = 0; + local_pins = 0; + + return; + +} /* reset_stats() */ + + +/*****************************************************************************/ /**************************** MPI setup functions ****************************/ /*****************************************************************************/ @@ -433,14 +596,6 @@ set_up_file_communicator(void) } } -#if 0 /* Useful debuggging code -- lets keep it around for a while */ - if ( success ) { - - fprintf(stdout, "%d:%s: file mpi size = %d, file mpi rank = %d.\n", - world_mpi_rank, fcn_name, file_mpi_size, file_mpi_rank); - } -#endif /* JRM */ - return(success); } /* set_up_file_communicator() */ @@ -544,18 +699,21 @@ init_data(void) /* this must hold so renames don't change entry size. */ HDassert( (NUM_DATA_ENTRIES / 2) % 20 == 0 ); + HDassert( (virt_num_data_entries / 2) % 20 == 0 ); for ( i = 0; i < NUM_DATA_ENTRIES; i++ ) { - data[i].base_addr = addr; - data[i].len = (size_t)(addr_offsets[j]); - data[i].ver = 0; - data[i].dirty = FALSE; - data[i].valid = FALSE; - data[i].locked = FALSE; - data[i].index = i; - - data_index[i] = i; + data[i].base_addr = addr; + data[i].len = (size_t)(addr_offsets[j]); + data[i].ver = 0; + data[i].dirty = FALSE; + data[i].valid = FALSE; + data[i].locked = FALSE; + data[i].global_pinned = FALSE; + data[i].local_pinned = FALSE; + data[i].index = i; + + data_index[i] = i; addr += addr_offsets[j]; HDassert( addr > data[i].base_addr ); @@ -569,11 +727,68 @@ init_data(void) /*****************************************************************************/ -/************************ mssg xfer related functions ************************/ +/******************** test coodination related functions *********************/ /*****************************************************************************/ /***************************************************************************** * + * Function: do_express_test() + * + * Purpose: Do an MPI_Allreduce to obtain the maximum value returned + * by GetTestExpress() across all processes. Return this + * value. + * + * Envirmoment variables can be different across different + * processes. This function ensures that all processes agree + * on whether to do an express test. + * + * Return: Success: Maximum of the values returned by + * GetTestExpress() across all processes. + * + * Failure: -1 + * + * Programmer: JRM -- 4/25/06 + * + * Modifications: + * + * None. + * + *****************************************************************************/ + +int +do_express_test(void) +{ + const char * fcn_name = "do_express_test()"; + int express_test; + int max_express_test; + int result; + + express_test = GetTestExpress(); + + result = MPI_Allreduce((void *)&express_test, + (void *)&max_express_test, + 1, + MPI_INT, + MPI_MAX, + world_mpi_comm); + + if ( result != MPI_SUCCESS ) { + + nerrors++; + max_express_test = -1; + if ( verbose ) { + HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n", + world_mpi_rank, fcn_name ); + } + } + + return(max_express_test); + +} /* do_express_test() */ + + +/***************************************************************************** + * * Function: get_max_nerrors() * * Purpose: Do an MPI_Allreduce to obtain the maximum value of nerrors @@ -621,6 +836,10 @@ get_max_nerrors(void) } /* get_max_nerrors() */ +/*****************************************************************************/ +/************************ mssg xfer related functions ************************/ +/*****************************************************************************/ + /***************************************************************************** * * Function: recv_mssg() @@ -648,7 +867,6 @@ recv_mssg(struct mssg_t *mssg_ptr) const char * fcn_name = "recv_mssg()"; hbool_t success = TRUE; int result; - static long mssg_num = 0; MPI_Status status; if ( mssg_ptr == NULL ) { @@ -794,9 +1012,9 @@ setup_derived_types(void) hbool_t success = TRUE; int i; int result; - MPI_Datatype types[8] = {MPI_INT, MPI_INT, MPI_INT, MPI_LONG, - HADDR_AS_MPI_TYPE, MPI_INT, MPI_INT, - MPI_UNSIGNED}; + MPI_Datatype mpi_types[8] = {MPI_INT, MPI_INT, MPI_INT, MPI_LONG, + HADDR_AS_MPI_TYPE, MPI_INT, MPI_INT, + MPI_UNSIGNED}; int block_len[8] = {1, 1, 1, 1, 1, 1, 1, 1}; MPI_Aint displs[8]; struct mssg_t sample; /* used to compute displacements */ @@ -829,7 +1047,7 @@ setup_derived_types(void) if ( success ) { - result = MPI_Type_struct(8, block_len, displs, types, &mpi_mssg_t); + result = MPI_Type_struct(8, block_len, displs, mpi_types, &mpi_mssg_t); if ( result != MPI_SUCCESS ) { @@ -886,7 +1104,6 @@ takedown_derived_types(void) { const char * fcn_name = "takedown_derived_types()"; hbool_t success = TRUE; - int i; int result; result = MPI_Type_free(&mpi_mssg_t); @@ -1225,18 +1442,19 @@ clear_datum(H5F_t * f, void * thing, hbool_t dest) { - int index; + int idx; struct datum * entry_ptr; HDassert( thing ); entry_ptr = (struct datum *)thing; - index = addr_to_datum_index(entry_ptr->base_addr); + idx = addr_to_datum_index(entry_ptr->base_addr); - HDassert( index >= 0 ); - HDassert( index < NUM_DATA_ENTRIES ); - HDassert( &(data[index]) == entry_ptr ); + HDassert( idx >= 0 ); + HDassert( idx < NUM_DATA_ENTRIES ); + HDassert( idx < virt_num_data_entries ); + HDassert( &(data[idx]) == entry_ptr ); HDassert( entry_ptr->header.addr == entry_ptr->base_addr ); HDassert( entry_ptr->header.size == entry_ptr->len ); @@ -1250,6 +1468,14 @@ clear_datum(H5F_t * f, } + datum_clears++; + + if ( entry_ptr->header.is_pinned ) { + + datum_pinned_clears++; + HDassert( entry_ptr->global_pinned || entry_ptr->local_pinned ); + } + return(SUCCEED); } /* clear_datum() */ @@ -1275,24 +1501,30 @@ herr_t destroy_datum(H5F_t UNUSED * f, void * thing) { - int index; + int idx; struct datum * entry_ptr; HDassert( thing ); entry_ptr = (struct datum *)thing; - index = addr_to_datum_index(entry_ptr->base_addr); + idx = addr_to_datum_index(entry_ptr->base_addr); - HDassert( index >= 0 ); - HDassert( index < NUM_DATA_ENTRIES ); - HDassert( &(data[index]) == entry_ptr ); + HDassert( idx >= 0 ); + HDassert( idx < NUM_DATA_ENTRIES ); + HDassert( idx < virt_num_data_entries ); + HDassert( &(data[idx]) == entry_ptr ); HDassert( entry_ptr->header.addr == entry_ptr->base_addr ); HDassert( entry_ptr->header.size == entry_ptr->len ); HDassert( !(entry_ptr->dirty) ); HDassert( !(entry_ptr->header.is_dirty) ); + HDassert( !(entry_ptr->global_pinned) ); + HDassert( !(entry_ptr->local_pinned) ); + HDassert( !(entry_ptr->header.is_pinned) ); + + datum_destroys++; return(SUCCEED); @@ -1318,12 +1550,12 @@ herr_t flush_datum(H5F_t *f, hid_t UNUSED dxpl_id, hbool_t dest, - haddr_t addr, + haddr_t UNUSED addr, void *thing) { const char * fcn_name = "flush_datum()"; herr_t ret_value = SUCCEED; - int index; + int idx; struct datum * entry_ptr; struct mssg_t mssg; @@ -1331,11 +1563,12 @@ flush_datum(H5F_t *f, entry_ptr = (struct datum *)thing; - index = addr_to_datum_index(entry_ptr->base_addr); + idx = addr_to_datum_index(entry_ptr->base_addr); - HDassert( index >= 0 ); - HDassert( index < NUM_DATA_ENTRIES ); - HDassert( &(data[index]) == entry_ptr ); + HDassert( idx >= 0 ); + HDassert( idx < NUM_DATA_ENTRIES ); + HDassert( idx < virt_num_data_entries ); + HDassert( &(data[idx]) == entry_ptr ); HDassert( entry_ptr->header.addr == entry_ptr->base_addr ); HDassert( entry_ptr->header.size == entry_ptr->len ); @@ -1389,6 +1622,14 @@ flush_datum(H5F_t *f, } } + datum_flushes++; + + if ( entry_ptr->header.is_pinned ) { + + datum_pinned_flushes++; + HDassert( entry_ptr->global_pinned || entry_ptr->local_pinned ); + } + return(ret_value); } /* flush_datum() */ @@ -1418,18 +1659,21 @@ load_datum(H5F_t UNUSED *f, { const char * fcn_name = "load_datum()"; hbool_t success = TRUE; - int index; + int idx; struct datum * entry_ptr = NULL; struct mssg_t mssg; - index = addr_to_datum_index(addr); + idx = addr_to_datum_index(addr); - HDassert( index >= 0 ); - HDassert( index < NUM_DATA_ENTRIES ); + HDassert( idx >= 0 ); + HDassert( idx < NUM_DATA_ENTRIES ); + HDassert( idx < virt_num_data_entries ); - entry_ptr = &(data[index]); + entry_ptr = &(data[idx]); HDassert( addr == entry_ptr->base_addr ); + HDassert( ! entry_ptr->global_pinned ); + HDassert( ! entry_ptr->local_pinned ); /* compose the read message */ mssg.req = READ_REQ_CODE; @@ -1494,6 +1738,8 @@ load_datum(H5F_t UNUSED *f, } + datum_loads++; + return(entry_ptr); } /* load_datum() */ @@ -1519,7 +1765,7 @@ size_datum(H5F_t UNUSED * f, void * thing, size_t * size_ptr) { - int index; + int idx; struct datum * entry_ptr; HDassert( thing ); @@ -1527,11 +1773,12 @@ size_datum(H5F_t UNUSED * f, entry_ptr = (struct datum *)thing; - index = addr_to_datum_index(entry_ptr->base_addr); + idx = addr_to_datum_index(entry_ptr->base_addr); - HDassert( index >= 0 ); - HDassert( index < NUM_DATA_ENTRIES ); - HDassert( &(data[index]) == entry_ptr ); + HDassert( idx >= 0 ); + HDassert( idx < NUM_DATA_ENTRIES ); + HDassert( idx < virt_num_data_entries ); + HDassert( &(data[idx]) == entry_ptr ); HDassert( entry_ptr->header.addr == entry_ptr->base_addr ); @@ -1578,6 +1825,7 @@ insert_entry(H5C_t * cache_ptr, HDassert( cache_ptr ); HDassert( file_ptr ); HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) ); + HDassert( idx < virt_num_data_entries ); entry_ptr = &(data[idx]); @@ -1605,11 +1853,26 @@ insert_entry(H5C_t * cache_ptr, if ( ! (entry_ptr->header.is_dirty) ) { - nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n", - world_mpi_rank, fcn_name, idx, - (int)(data[idx].header.is_dirty)); + /* it is possible that we just exceeded the dirty bytes + * threshold, triggering a write of the newly inserted + * entry. Test for this, and only flag an error if this + * is not the case. + */ + + struct H5AC_aux_t * aux_ptr; + + aux_ptr = ((H5AC_aux_t *)(cache_ptr->aux_ptr)); + + if ( ! ( ( aux_ptr != NULL ) && + ( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC ) && + ( aux_ptr->dirty_bytes == 0 ) ) ) { + + nerrors++; + if ( verbose ) { + HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n", + world_mpi_rank, fcn_name, idx, + (int)(data[idx].header.is_dirty)); + } } } @@ -1623,6 +1886,242 @@ insert_entry(H5C_t * cache_ptr, /***************************************************************************** + * Function: local_pin_and_unpin_random_entries() + * + * Purpose: Pin a random number of randomly selected entries in cache, and + * then unpin a random number of entries. + * + * Do nothing if nerrors is non-zero on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 4/12/06 + * + * Modifications: + * + *****************************************************************************/ + +void +local_pin_and_unpin_random_entries(H5C_t * cache_ptr, + H5F_t * file_ptr, + int min_idx, + int max_idx, + int min_count, + int max_count) +{ + const char * fcn_name = "local_pin_and_unpin_random_entries()"; + + if ( nerrors == 0 ) { + + hbool_t via_unprotect; + int count; + int i; + int idx; + + HDassert( cache_ptr ); + HDassert( file_ptr ); + HDassert( 0 <= min_idx ); + HDassert( min_idx < max_idx ); + HDassert( max_idx < NUM_DATA_ENTRIES ); + HDassert( max_idx < virt_num_data_entries ); + HDassert( 0 <= min_count ); + HDassert( min_count < max_count ); + + count = (HDrand() % (max_count - min_count)) + min_count; + + HDassert( min_count <= count ); + HDassert( count <= max_count ); + + for ( i = 0; i < count; i++ ) + { + local_pin_random_entry(cache_ptr, file_ptr, min_idx, max_idx); + } + + count = (HDrand() % (max_count - min_count)) + min_count; + + HDassert( min_count <= count ); + HDassert( count <= max_count ); + + i = 0; + idx = 0; + + while ( ( i < count ) && ( idx >= 0 ) ) + { + via_unprotect = ( (((unsigned)i) & 0x0001) == 0 ); + idx = local_unpin_next_pinned_entry(cache_ptr, file_ptr, + idx, via_unprotect); + i++; + } + } + + return; + +} /* local_pin_and_unpin_random_entries() */ + + +/***************************************************************************** + * Function: local_pin_random_entry() + * + * Purpose: Pin a randomly selected entry in cache, and mark the entry + * as being locally pinned. Since this entry will not in + * general be pinned in any other cache, we can't mark it + * dirty. + * + * Do nothing if nerrors is non-zero on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 4/12/06 + * + * Modifications: + * + *****************************************************************************/ + +void +local_pin_random_entry(H5C_t * cache_ptr, + H5F_t * file_ptr, + int min_idx, + int max_idx) +{ + const char * fcn_name = "local_pin_random_entry()"; + int idx; + + if ( nerrors == 0 ) { + + HDassert( cache_ptr ); + HDassert( file_ptr ); + HDassert( 0 <= min_idx ); + HDassert( min_idx < max_idx ); + HDassert( max_idx < NUM_DATA_ENTRIES ); + HDassert( max_idx < virt_num_data_entries ); + + do + { + idx = (HDrand() % (max_idx - min_idx)) + min_idx; + HDassert( min_idx <= idx ); + HDassert( idx <= max_idx ); + } + while ( data[idx].global_pinned || data[idx].local_pinned ); + + pin_entry(cache_ptr, file_ptr, idx, FALSE, FALSE); + } + + return; + +} /* local_pin_random_entry() */ + + +/***************************************************************************** + * Function: local_unpin_all_entries() + * + * Purpose: Unpin all local pinned entries. + * + * Do nothing if nerrors is non-zero on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 4/12/06 + * + * Modifications: + * + *****************************************************************************/ + +void +local_unpin_all_entries(H5C_t * cache_ptr, + H5F_t * file_ptr, + hbool_t via_unprotect) +{ + const char * fcn_name = "local_unpin_all_entries()"; + + if ( nerrors == 0 ) { + + int idx; + + HDassert( cache_ptr ); + HDassert( file_ptr ); + + idx = 0; + + while ( idx >= 0 ) + { + idx = local_unpin_next_pinned_entry(cache_ptr, file_ptr, + idx, via_unprotect); + } + } + + return; + +} /* local_unpin_all_entries() */ + + +/***************************************************************************** + * Function: local_unpin_next_pinned_entry() + * + * Purpose: Find the next locally pinned entry after the specified + * starting point, and unpin it. + * + * Do nothing if nerrors is non-zero on entry. + * + * Return: Index of the unpinned entry if there is one, or -1 if + * nerrors is non-zero on entry, or if there is no locally + * pinned entry. + * + * Programmer: John Mainzer + * 4/12/06 + * + * Modifications: + * + *****************************************************************************/ + +int +local_unpin_next_pinned_entry(H5C_t * cache_ptr, + H5F_t * file_ptr, + int start_idx, + hbool_t via_unprotect) +{ + const char * fcn_name = "local_unpin_next_pinned_entry()"; + int i = 0; + int idx = -1; + + if ( nerrors == 0 ) { + + HDassert( cache_ptr ); + HDassert( file_ptr ); + HDassert( 0 <= start_idx ); + HDassert( start_idx < NUM_DATA_ENTRIES ); + HDassert( start_idx < virt_num_data_entries ); + + idx = start_idx; + + while ( ( i < virt_num_data_entries ) && + ( ! ( data[idx].local_pinned ) ) ) + { + i++; + idx++; + if ( idx >= virt_num_data_entries ) { + idx = 0; + } + } + + if ( data[idx].local_pinned ) { + + unpin_entry(cache_ptr, file_ptr, idx, FALSE, FALSE, via_unprotect); + + } else { + + idx = -1; + } + } + + return(idx); + +} /* local_unpin_next_pinned_entry() */ + + +/***************************************************************************** * Function: lock_and_unlock_random_entries() * * Purpose: Obtain a random number in the closed interval [min_count, @@ -1699,7 +2198,7 @@ lock_and_unlock_random_entry(H5C_t * cache_ptr, int max_idx) { const char * fcn_name = "lock_and_unlock_random_entry()"; - int index; + int idx; if ( nerrors == 0 ) { @@ -1708,14 +2207,15 @@ lock_and_unlock_random_entry(H5C_t * cache_ptr, HDassert( 0 <= min_idx ); HDassert( min_idx < max_idx ); HDassert( max_idx < NUM_DATA_ENTRIES ); + HDassert( max_idx < virt_num_data_entries ); - index = (HDrand() % (max_idx - min_idx)) + min_idx; + idx = (HDrand() % (max_idx - min_idx)) + min_idx; - HDassert( min_idx <= index ); - HDassert( index <= max_idx ); + HDassert( min_idx <= idx ); + HDassert( idx <= max_idx ); - lock_entry(cache_ptr, file_ptr, index); - unlock_entry(cache_ptr, file_ptr, index, H5AC__NO_FLAGS_SET); + lock_entry(cache_ptr, file_ptr, idx); + unlock_entry(cache_ptr, file_ptr, idx, H5AC__NO_FLAGS_SET); } return; @@ -1752,6 +2252,7 @@ lock_entry(H5C_t * cache_ptr, HDassert( cache_ptr ); HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) ); + HDassert( idx < virt_num_data_entries ); entry_ptr = &(data[idx]); @@ -1780,6 +2281,142 @@ lock_entry(H5C_t * cache_ptr, /***************************************************************************** + * Function: mark_pinned_entry_dirty() + * + * Purpose: Mark dirty the entry indicated by the index, + * + * Do nothing if nerrors is non-zero on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 4/14/06 + * + * Modifications: + * + *****************************************************************************/ + +void +mark_pinned_entry_dirty(H5C_t * cache_ptr, + int32_t idx, + hbool_t size_changed, + size_t new_size) +{ + const char * fcn_name = "mark_pinned_entry_dirty()"; + herr_t result; + struct datum * entry_ptr; + + if ( nerrors == 0 ) { + + HDassert( cache_ptr ); + HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) ); + HDassert( idx < virt_num_data_entries ); + + entry_ptr = &(data[idx]); + + HDassert ( entry_ptr->global_pinned ); + HDassert ( ! (entry_ptr->local_pinned) ); + + (entry_ptr->ver)++; + entry_ptr->dirty = TRUE; + + result = H5AC_mark_pinned_entry_dirty(cache_ptr, + (void *)entry_ptr, + size_changed, + new_size); + + if ( result < 0 ) { + + nerrors++; + if ( verbose ) { + HDfprintf(stdout, "%d:%s: error in H5AC_unpin_entry().\n", + world_mpi_rank, fcn_name); + } + } + else + { + global_dirty_pins++; + } + } + + return; + +} /* mark_pinned_entry_dirty() */ + + +/***************************************************************************** + * Function: pin_entry() + * + * Purpose: Pin the entry indicated by the index. + * + * Do nothing if nerrors is non-zero on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 4/11/06 + * + * Modifications: + * + *****************************************************************************/ + +void +pin_entry(H5C_t * cache_ptr, + H5F_t * file_ptr, + int32_t idx, + hbool_t global, + hbool_t dirty) +{ + const char * fcn_name = "pin_entry()"; + unsigned int flags = H5AC__PIN_ENTRY_FLAG; + struct datum * entry_ptr; + + if ( nerrors == 0 ) { + + HDassert( cache_ptr ); + HDassert( file_ptr ); + HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) ); + HDassert( idx < virt_num_data_entries ); + + entry_ptr = &(data[idx]); + + HDassert ( ! (entry_ptr->global_pinned) ); + HDassert ( ! (entry_ptr->local_pinned) ); + HDassert ( ! ( dirty && ( ! global ) ) ); + + lock_entry(cache_ptr, file_ptr, idx); + + if ( dirty ) { + + flags |= H5AC__DIRTIED_FLAG; + } + + unlock_entry(cache_ptr, file_ptr, idx, flags); + + HDassert( (entry_ptr->header).is_pinned ); + HDassert( ( ! dirty ) || ( (entry_ptr->header).is_dirty ) ); + + if ( global ) { + + entry_ptr->global_pinned = TRUE; + + global_pins++; + + } else { + + entry_ptr->local_pinned = TRUE; + + local_pins++; + + } + } + + return; + +} /* pin_entry() */ + + +/***************************************************************************** * Function: rename_entry() * * Purpose: Rename the entry indicated old_idx to the entry indicated @@ -1809,7 +2446,6 @@ rename_entry(H5C_t * cache_ptr, const char * fcn_name = "rename_entry()"; herr_t result; int tmp; - hbool_t done = TRUE; /* will set to FALSE if we have work to do */ haddr_t old_addr = HADDR_UNDEF; haddr_t new_addr = HADDR_UNDEF; struct datum * old_entry_ptr; @@ -1820,7 +2456,9 @@ rename_entry(H5C_t * cache_ptr, HDassert( cache_ptr ); HDassert( file_ptr ); HDassert( ( 0 <= old_idx ) && ( old_idx < NUM_DATA_ENTRIES ) ); + HDassert( old_idx < virt_num_data_entries ); HDassert( ( 0 <= new_idx ) && ( new_idx < NUM_DATA_ENTRIES ) ); + HDassert( new_idx < virt_num_data_entries ); old_entry_ptr = &(data[old_idx]); new_entry_ptr = &(data[new_idx]); @@ -2159,6 +2797,7 @@ setup_rand(void) seed = (unsigned)tv.tv_usec; HDfprintf(stdout, "%d:%s: seed = %d.\n", world_mpi_rank, fcn_name, seed); + fflush(stdout); HDsrand(seed); } @@ -2258,6 +2897,7 @@ unlock_entry(H5C_t * cache_ptr, HDassert( cache_ptr ); HDassert( file_ptr ); HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) ); + HDassert( idx < virt_num_data_entries ); entry_ptr = &(data[idx]); @@ -2269,7 +2909,8 @@ unlock_entry(H5C_t * cache_ptr, entry_ptr->dirty = TRUE; } - result = H5AC_unprotect(file_ptr, -1, &(types[0]), entry_ptr->base_addr, + result = H5AC_unprotect(file_ptr, -1, &(types[0]), + entry_ptr->base_addr, (void *)(&(entry_ptr->header)), flags); if ( ( result < 0 ) || @@ -2299,6 +2940,98 @@ unlock_entry(H5C_t * cache_ptr, } /* unlock_entry() */ +/***************************************************************************** + * Function: unpin_entry() + * + * Purpose: Unpin the entry indicated by the index. + * + * Do nothing if nerrors is non-zero on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 4/12/06 + * + * Modifications: + * + *****************************************************************************/ + +void +unpin_entry(H5C_t * cache_ptr, + H5F_t * file_ptr, + int32_t idx, + hbool_t global, + hbool_t dirty, + hbool_t via_unprotect) +{ + const char * fcn_name = "unpin_entry()"; + herr_t result; + unsigned int flags = H5AC__UNPIN_ENTRY_FLAG; + struct datum * entry_ptr; + + if ( nerrors == 0 ) { + + HDassert( cache_ptr ); + HDassert( file_ptr ); + HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) ); + HDassert( idx < virt_num_data_entries ); + + entry_ptr = &(data[idx]); + + HDassert ( ! ( entry_ptr->global_pinned && entry_ptr->local_pinned) ); + HDassert ( ( global && entry_ptr->global_pinned ) || + ( ! global && entry_ptr->local_pinned ) ); + HDassert ( ! ( dirty && ( ! global ) ) ); + + if ( via_unprotect ) { + + lock_entry(cache_ptr, file_ptr, idx); + + if ( dirty ) { + + flags |= H5AC__DIRTIED_FLAG; + } + + unlock_entry(cache_ptr, file_ptr, idx, flags); + + } else { + + if ( dirty ) { + + mark_pinned_entry_dirty(cache_ptr, idx, FALSE, (size_t)0); + + } + + result = H5AC_unpin_entry(cache_ptr, (void *)entry_ptr); + + if ( result < 0 ) { + + nerrors++; + if ( verbose ) { + HDfprintf(stdout, "%d:%s: error in H5AC_unpin_entry().\n", + world_mpi_rank, fcn_name); + } + } + } + + HDassert( ! ((entry_ptr->header).is_pinned) ); + + if ( global ) { + + entry_ptr->global_pinned = FALSE; + + } else { + + entry_ptr->local_pinned = FALSE; + + } + } + + return; + +} /* unpin_entry() */ + + /*****************************************************************************/ /****************************** test functions *******************************/ /*****************************************************************************/ @@ -2336,6 +3069,7 @@ server_smoke_check(void) nerrors = 0; init_data(); + reset_stats(); if ( world_mpi_rank == world_server_mpi_rank ) { @@ -2512,6 +3246,7 @@ smoke_check_1(void) nerrors = 0; init_data(); + reset_stats(); if ( world_mpi_rank == world_server_mpi_rank ) { @@ -2538,31 +3273,33 @@ smoke_check_1(void) } } - for ( i = 0; i < (NUM_DATA_ENTRIES / 2); i++ ) + for ( i = 0; i < (virt_num_data_entries / 2); i++ ) { insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); } - for ( i = (NUM_DATA_ENTRIES / 2) - 1; i >= 0; i-- ) + for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- ) { lock_entry(cache_ptr, file_ptr, i); unlock_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); } /* rename the first half of the entries... */ - for ( i = 0; i < (NUM_DATA_ENTRIES / 2); i++ ) + for ( i = 0; i < (virt_num_data_entries / 2); i++ ) { lock_entry(cache_ptr, file_ptr, i); unlock_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); - rename_entry(cache_ptr, file_ptr, i, (i + (NUM_DATA_ENTRIES / 2))); + rename_entry(cache_ptr, file_ptr, i, + (i + (virt_num_data_entries / 2))); } /* ...and then rename them back. */ - for ( i = (NUM_DATA_ENTRIES / 2) - 1; i >= 0; i-- ) + for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- ) { lock_entry(cache_ptr, file_ptr, i); unlock_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); - rename_entry(cache_ptr, file_ptr, i, (i + (NUM_DATA_ENTRIES / 2))); + rename_entry(cache_ptr, file_ptr, i, + (i + (virt_num_data_entries / 2))); } if ( fid >= 0 ) { @@ -2599,7 +3336,6 @@ smoke_check_1(void) if ( success ) { - success = send_mssg(&mssg); if ( ! success ) { @@ -2652,7 +3388,11 @@ smoke_check_1(void) * * Modifications: * - * None. + * JRM -- 4/13/06 + * Added pinned entry tests. + * + * JRM -- 4/28/06 + * Modified test to rename pinned entries. * *****************************************************************************/ @@ -2675,6 +3415,7 @@ smoke_check_2(void) nerrors = 0; init_data(); + reset_stats(); if ( world_mpi_rank == world_server_mpi_rank ) { @@ -2701,7 +3442,7 @@ smoke_check_2(void) } } - for ( i = 0; i < (NUM_DATA_ENTRIES / 2); i++ ) + for ( i = 0; i < (virt_num_data_entries / 2); i++ ) { insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); @@ -2712,43 +3453,72 @@ smoke_check_2(void) } } - for ( i = (NUM_DATA_ENTRIES / 2) - 1; i >= 0; i-=2 ) + for ( i = 0; i < (virt_num_data_entries / 2); i+=61 ) + { + /* Make sure we don't step on any locally pinned entries */ + if ( data[i].local_pinned ) { + unpin_entry(cache_ptr, file_ptr, i, FALSE, FALSE, FALSE); + } + + pin_entry(cache_ptr, file_ptr, i, TRUE, FALSE); + } + + for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-=2 ) { lock_entry(cache_ptr, file_ptr, i); unlock_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); - lock_and_unlock_random_entries(cache_ptr, file_ptr, - 0, (NUM_DATA_ENTRIES / 20), 0, 100); + lock_and_unlock_random_entries(cache_ptr, file_ptr, 0, + (virt_num_data_entries / 20), + 0, 100); + local_pin_and_unpin_random_entries(cache_ptr, file_ptr, 0, + (virt_num_data_entries / 4), + 0, 3); } - for ( i = 0; i < (NUM_DATA_ENTRIES / 2); i+=2 ) + for ( i = 0; i < (virt_num_data_entries / 2); i+=2 ) { lock_entry(cache_ptr, file_ptr, i); unlock_entry(cache_ptr, file_ptr, i, H5AC__DIRTIED_FLAG); - lock_and_unlock_random_entries(cache_ptr, file_ptr, - 0, (NUM_DATA_ENTRIES / 10), 0, 100); + lock_and_unlock_random_entries(cache_ptr, file_ptr, 0, + (virt_num_data_entries / 10), + 0, 100); } + /* we can't rename pinned entries, so release any local pins now. */ + local_unpin_all_entries(cache_ptr, file_ptr, FALSE); + /* rename the first half of the entries... */ - for ( i = 0; i < (NUM_DATA_ENTRIES / 2); i++ ) + for ( i = 0; i < (virt_num_data_entries / 2); i++ ) { lock_entry(cache_ptr, file_ptr, i); unlock_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); - rename_entry(cache_ptr, file_ptr, i, (i + (NUM_DATA_ENTRIES / 2))); - lock_and_unlock_random_entries(cache_ptr, file_ptr, - 0, ((NUM_DATA_ENTRIES / 50) - 1), + rename_entry(cache_ptr, file_ptr, i, + (i + (virt_num_data_entries / 2))); + lock_and_unlock_random_entries(cache_ptr, file_ptr, 0, + ((virt_num_data_entries / 50) - 1), 0, 100); } /* ...and then rename them back. */ - for ( i = (NUM_DATA_ENTRIES / 2) - 1; i >= 0; i-- ) + for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- ) { lock_entry(cache_ptr, file_ptr, i); unlock_entry(cache_ptr, file_ptr, i, H5AC__DIRTIED_FLAG); - rename_entry(cache_ptr, file_ptr, i, (i + (NUM_DATA_ENTRIES / 2))); - lock_and_unlock_random_entries(cache_ptr, file_ptr, - 0, (NUM_DATA_ENTRIES / 100), 0, 100); + rename_entry(cache_ptr, file_ptr, i, + (i + (virt_num_data_entries / 2))); + lock_and_unlock_random_entries(cache_ptr, file_ptr, 0, + (virt_num_data_entries / 100), + 0, 100); } + for ( i = 0; i < (virt_num_data_entries / 2); i+=61 ) + { + hbool_t via_unprotect = ( (((unsigned)i) & 0x01) == 0 ); + hbool_t dirty = ( (((unsigned)i) & 0x02) == 0 ); + + unpin_entry(cache_ptr, file_ptr, i, TRUE, dirty, via_unprotect); + } + if ( fid >= 0 ) { if ( ! take_down_cache(fid) ) { @@ -2783,7 +3553,6 @@ smoke_check_2(void) if ( success ) { - success = send_mssg(&mssg); if ( ! success ) { @@ -2842,6 +3611,8 @@ smoke_check_2(void) * Added code intended to ensure correct operation with large * numbers of processors. * JRM - 1/31/06 + * + * Added pinned entry tests. JRM - 4/14/06 * *****************************************************************************/ @@ -2868,6 +3639,7 @@ smoke_check_3(void) nerrors = 0; init_data(); + reset_stats(); if ( world_mpi_rank == world_server_mpi_rank ) { @@ -2894,11 +3666,10 @@ smoke_check_3(void) } } - min_count = 100 / ((file_mpi_rank + 1) * (file_mpi_rank + 1)); max_count = min_count + 50; - for ( i = 0; i < (NUM_DATA_ENTRIES / 4); i++ ) + for ( i = 0; i < (virt_num_data_entries / 4); i++ ) { insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); @@ -2910,46 +3681,101 @@ smoke_check_3(void) } } + min_count = 100 / ((file_mpi_rank + 2) * (file_mpi_rank + 2)); max_count = min_count + 50; - for ( i = (NUM_DATA_ENTRIES / 4); i < (NUM_DATA_ENTRIES / 2); i++ ) + for ( i = (virt_num_data_entries / 4); + i < (virt_num_data_entries / 2); + i++ ) { + insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); + if ( i % 59 == 0 ) { + + hbool_t dirty = ( (i % 2) == 0); + + if ( data[i].local_pinned ) { + unpin_entry(cache_ptr, file_ptr, i, FALSE, FALSE, FALSE); + } + + pin_entry(cache_ptr, file_ptr, i, TRUE, dirty); + + HDassert( !dirty || data[i].header.is_dirty ); + HDassert( data[i].header.is_pinned ); + HDassert( data[i].global_pinned ); + HDassert( ! data[i].local_pinned ); + } + if ( i > 100 ) { lock_and_unlock_random_entries(cache_ptr, file_ptr, (i - 100), i, min_count, max_count); } + + local_pin_and_unpin_random_entries(cache_ptr, file_ptr, + 0, virt_num_data_entries / 4, + 0, (file_mpi_rank + 2)); + + } + + + /* flush the file to be sure that we have no problems flushing + * pinned entries + */ + if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) { + nerrors++; + if ( verbose ) { + HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", + world_mpi_rank, fcn_name); + } } + min_idx = 0; - max_idx = ((NUM_DATA_ENTRIES / 10) / + max_idx = ((virt_num_data_entries / 10) / ((file_mpi_rank + 1) * (file_mpi_rank + 1))) - 1; if ( max_idx <= min_idx ) { max_idx = min_idx + 10; } - for ( i = (NUM_DATA_ENTRIES / 2) - 1; i >= 0; i-=2 ) + for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- ) { - lock_entry(cache_ptr, file_ptr, i); - unlock_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); - lock_and_unlock_random_entries(cache_ptr, file_ptr, - min_idx, max_idx, 0, 100); + if ( ( i >= (virt_num_data_entries / 4) ) && ( i % 59 == 0 ) ) { + + hbool_t via_unprotect = ( (((unsigned)i) & 0x02) == 0 ); + hbool_t dirty = ( (((unsigned)i) & 0x04) == 0 ); + + HDassert( data[i].global_pinned ); + HDassert( ! data[i].local_pinned ); + + unpin_entry(cache_ptr, file_ptr, i, TRUE, dirty, + via_unprotect); + } + if ( i % 2 == 0 ) { + + lock_entry(cache_ptr, file_ptr, i); + unlock_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); + local_pin_and_unpin_random_entries(cache_ptr, file_ptr, 0, + virt_num_data_entries / 2, + 0, 2); + lock_and_unlock_random_entries(cache_ptr, file_ptr, + min_idx, max_idx, 0, 100); + } } min_idx = 0; - max_idx = ((NUM_DATA_ENTRIES / 10) / + max_idx = ((virt_num_data_entries / 10) / ((file_mpi_rank + 3) * (file_mpi_rank + 3))) - 1; if ( max_idx <= min_idx ) { max_idx = min_idx + 10; } - for ( i = 0; i < (NUM_DATA_ENTRIES / 2); i+=2 ) + for ( i = 0; i < (virt_num_data_entries / 2); i+=2 ) { lock_entry(cache_ptr, file_ptr, i); unlock_entry(cache_ptr, file_ptr, i, H5AC__DIRTIED_FLAG); @@ -2957,28 +3783,33 @@ smoke_check_3(void) min_idx, max_idx, 0, 100); } + /* we can't rename pinned entries, so release any local pins now. */ + local_unpin_all_entries(cache_ptr, file_ptr, FALSE); + min_count = 10 / (file_mpi_rank + 1); max_count = min_count + 100; /* rename the first half of the entries... */ - for ( i = 0; i < (NUM_DATA_ENTRIES / 2); i++ ) + for ( i = 0; i < (virt_num_data_entries / 2); i++ ) { lock_entry(cache_ptr, file_ptr, i); unlock_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); - rename_entry(cache_ptr, file_ptr, i, (i + (NUM_DATA_ENTRIES / 2))); - lock_and_unlock_random_entries(cache_ptr, file_ptr, - 0, (NUM_DATA_ENTRIES / 20), + rename_entry(cache_ptr, file_ptr, i, + (i + (virt_num_data_entries / 2))); + lock_and_unlock_random_entries(cache_ptr, file_ptr, 0, + (virt_num_data_entries / 20), min_count, max_count); } /* ...and then rename them back. */ - for ( i = (NUM_DATA_ENTRIES / 2) - 1; i >= 0; i-- ) + for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- ) { lock_entry(cache_ptr, file_ptr, i); unlock_entry(cache_ptr, file_ptr, i, H5AC__DIRTIED_FLAG); - rename_entry(cache_ptr, file_ptr, i, (i + (NUM_DATA_ENTRIES / 2))); - lock_and_unlock_random_entries(cache_ptr, file_ptr, - 0, (NUM_DATA_ENTRIES / 40), + rename_entry(cache_ptr, file_ptr, i, + (i + (virt_num_data_entries / 2))); + lock_and_unlock_random_entries(cache_ptr, file_ptr, 0, + (virt_num_data_entries / 40), min_count, max_count); } @@ -2988,8 +3819,12 @@ smoke_check_3(void) min_count = 200 / ((file_mpi_rank + 1) * (file_mpi_rank + 1)); max_count = min_count + 100; - for ( i = 0; i < (NUM_DATA_ENTRIES / 2); i+=2 ) + for ( i = 0; i < (virt_num_data_entries / 2); i+=2 ) { + local_pin_and_unpin_random_entries(cache_ptr, file_ptr, 0, + (virt_num_data_entries / 2), + 0, 5); + lock_entry(cache_ptr, file_ptr, i); unlock_entry(cache_ptr, file_ptr, i, H5AC__DIRTIED_FLAG); @@ -3001,6 +3836,9 @@ smoke_check_3(void) } } + /* release any local pins before we take down the cache. */ + local_unpin_all_entries(cache_ptr, file_ptr, FALSE); + if ( fid >= 0 ) { if ( ! take_down_cache(fid) ) { @@ -3120,6 +3958,7 @@ smoke_check_4(void) nerrors = 0; init_data(); + reset_stats(); if ( world_mpi_rank == world_server_mpi_rank ) { @@ -3150,7 +3989,7 @@ smoke_check_4(void) min_count = 100 * (file_mpi_rank % 4); max_count = min_count + 50; - for ( i = 0; i < (NUM_DATA_ENTRIES / 4); i++ ) + for ( i = 0; i < (virt_num_data_entries / 4); i++ ) { insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); @@ -3165,35 +4004,84 @@ smoke_check_4(void) min_count = 10 * (file_mpi_rank % 4); max_count = min_count + 100; - for ( i = (NUM_DATA_ENTRIES / 4); i < (NUM_DATA_ENTRIES / 2); i++ ) + for ( i = (virt_num_data_entries / 4); + i < (virt_num_data_entries / 2); + i++ ) { insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); + if ( i % 59 == 0 ) { + + hbool_t dirty = ( (i % 2) == 0); + + if ( data[i].local_pinned ) { + unpin_entry(cache_ptr, file_ptr, i, FALSE, FALSE, FALSE); + } + + pin_entry(cache_ptr, file_ptr, i, TRUE, dirty); + + HDassert( !dirty || data[i].header.is_dirty ); + HDassert( data[i].header.is_pinned ); + HDassert( data[i].global_pinned ); + HDassert( ! data[i].local_pinned ); + } + if ( i > 100 ) { lock_and_unlock_random_entries(cache_ptr, file_ptr, (i - 100), i, min_count, max_count); } + + local_pin_and_unpin_random_entries(cache_ptr, file_ptr, 0, + (virt_num_data_entries / 4), + 0, (file_mpi_rank + 2)); } + + /* flush the file to be sure that we have no problems flushing + * pinned entries + */ + if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) { + nerrors++; + if ( verbose ) { + HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", + world_mpi_rank, fcn_name); + } + } + + min_idx = 0; - max_idx = (((NUM_DATA_ENTRIES / 10) / 4) * + max_idx = (((virt_num_data_entries / 10) / 4) * ((file_mpi_rank % 4) + 1)) - 1; - for ( i = (NUM_DATA_ENTRIES / 2) - 1; i >= 0; i-=2 ) + for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- ) { - lock_entry(cache_ptr, file_ptr, i); - unlock_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); - lock_and_unlock_random_entries(cache_ptr, file_ptr, - min_idx, max_idx, 0, 100); + if ( ( i >= (virt_num_data_entries / 4) ) && ( i % 59 == 0 ) ) { + + hbool_t via_unprotect = ( (((unsigned)i) & 0x02) == 0 ); + hbool_t dirty = ( (((unsigned)i) & 0x04) == 0 ); + + HDassert( data[i].global_pinned ); + HDassert( ! data[i].local_pinned ); + + unpin_entry(cache_ptr, file_ptr, i, TRUE, dirty, via_unprotect); + } + + if ( i % 2 == 0 ) { + + lock_entry(cache_ptr, file_ptr, i); + unlock_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); + lock_and_unlock_random_entries(cache_ptr, file_ptr, + min_idx, max_idx, 0, 100); + } } min_idx = 0; - max_idx = (((NUM_DATA_ENTRIES / 10) / 8) * + max_idx = (((virt_num_data_entries / 10) / 8) * ((file_mpi_rank % 4) + 1)) - 1; - for ( i = 0; i < (NUM_DATA_ENTRIES / 2); i+=2 ) + for ( i = 0; i < (virt_num_data_entries / 2); i+=2 ) { lock_entry(cache_ptr, file_ptr, i); unlock_entry(cache_ptr, file_ptr, i, H5AC__DIRTIED_FLAG); @@ -3201,28 +4089,33 @@ smoke_check_4(void) min_idx, max_idx, 0, 100); } + /* we can't rename pinned entries, so release any local pins now. */ + local_unpin_all_entries(cache_ptr, file_ptr, FALSE); + min_count = 10 * (file_mpi_rank % 4); max_count = min_count + 100; /* rename the first half of the entries... */ - for ( i = 0; i < (NUM_DATA_ENTRIES / 2); i++ ) + for ( i = 0; i < (virt_num_data_entries / 2); i++ ) { lock_entry(cache_ptr, file_ptr, i); unlock_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); - rename_entry(cache_ptr, file_ptr, i, (i + (NUM_DATA_ENTRIES / 2))); - lock_and_unlock_random_entries(cache_ptr, file_ptr, - 0, (NUM_DATA_ENTRIES / 20), + rename_entry(cache_ptr, file_ptr, i, + (i + (virt_num_data_entries / 2))); + lock_and_unlock_random_entries(cache_ptr, file_ptr, 0, + (virt_num_data_entries / 20), min_count, max_count); } /* ...and then rename them back. */ - for ( i = (NUM_DATA_ENTRIES / 2) - 1; i >= 0; i-- ) + for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- ) { lock_entry(cache_ptr, file_ptr, i); unlock_entry(cache_ptr, file_ptr, i, H5AC__DIRTIED_FLAG); - rename_entry(cache_ptr, file_ptr, i, (i + (NUM_DATA_ENTRIES / 2))); - lock_and_unlock_random_entries(cache_ptr, file_ptr, - 0, (NUM_DATA_ENTRIES / 40), + rename_entry(cache_ptr, file_ptr, i, + (i + (virt_num_data_entries / 2))); + lock_and_unlock_random_entries(cache_ptr, file_ptr, 0, + (virt_num_data_entries / 40), min_count, max_count); } @@ -3232,7 +4125,7 @@ smoke_check_4(void) min_count = 100 * (file_mpi_rank % 4); max_count = min_count + 100; - for ( i = 0; i < (NUM_DATA_ENTRIES / 2); i+=2 ) + for ( i = 0; i < (virt_num_data_entries / 2); i+=2 ) { lock_entry(cache_ptr, file_ptr, i); unlock_entry(cache_ptr, file_ptr, i, H5AC__DIRTIED_FLAG); @@ -3337,10 +4230,10 @@ int main(int argc, char **argv) { const char * fcn_name = "main()"; + int express_test; int i; int mpi_size; int mpi_rank; - int ret_code; int max_nerrors; MPI_Init(&argc, &argv); @@ -3354,10 +4247,29 @@ main(int argc, char **argv) H5open(); + if ( express_test = do_express_test() ) { + +#if 0 /* I'll want this from time to time */ + HDfprintf(stdout, "%d:%s: Express test.\n", world_mpi_rank, fcn_name); +#endif + virt_num_data_entries = EXPRESS_VIRT_NUM_DATA_ENTRIES; + + } else { + + virt_num_data_entries = STD_VIRT_NUM_DATA_ENTRIES; + } + +#ifdef H5_HAVE_MPE + if ( MAINPROCESS ) { printf(" Tests compiled for MPE.\n"); } + virt_num_data_entries = MPE_VIRT_NUM_DATA_ENTIES; +#endif /* H5_HAVE_MPE */ + + if (MAINPROCESS){ printf("===================================\n"); printf("Parallel metadata cache tests\n"); - printf(" mpi_size = %d\n", mpi_size); + printf(" mpi_size = %d\n", mpi_size); + printf(" express_test = %d\n", express_test); printf("===================================\n"); } @@ -3463,6 +4375,8 @@ main(int argc, char **argv) /* run the tests */ #if 1 server_smoke_check(); +#endif +#if 1 smoke_check_1(); #endif #if 1 -- cgit v0.12