summaryrefslogtreecommitdiffstats
path: root/test/cache_common.h
diff options
context:
space:
mode:
authormainzer <mainzer#hdfgroup.org>2020-08-05 20:39:49 (GMT)
committermainzer <mainzer#hdfgroup.org>2020-08-05 20:39:49 (GMT)
commit33f35183cbfdde70ee8f803acb5b735ad4dfe086 (patch)
treee18d05c2c6b34f4baba1d4b28250dc4cf9d51171 /test/cache_common.h
parent50f404c887118577034c6412aeaaa1f6db2fe475 (diff)
downloadhdf5-33f35183cbfdde70ee8f803acb5b735ad4dfe086.zip
hdf5-33f35183cbfdde70ee8f803acb5b735ad4dfe086.tar.gz
hdf5-33f35183cbfdde70ee8f803acb5b735ad4dfe086.tar.bz2
When flushing, the metadata cache attempts to flush entries in increasing
address order. To facilitate this, the metadata cache needs a list of of dirty entries in increasing address order. This is implemented via a skip list of all dirty entries in the cache. To date this skip list has been maintained at all times. However, profiling indicates that we can avoid significant overhead by constructing the skip list of dirty entries just before a flush, taking it down afterwareds, and not maintaining it during normal operation. This commit implements this optimization for both serial and parallel. Tested serial and parallel, debug and production on charis and jelly.
Diffstat (limited to 'test/cache_common.h')
-rw-r--r--test/cache_common.h59
1 files changed, 59 insertions, 0 deletions
diff --git a/test/cache_common.h b/test/cache_common.h
index 785dc21..807ba35 100644
--- a/test/cache_common.h
+++ b/test/cache_common.h
@@ -134,6 +134,65 @@
(NOTIFY_ENTRY_SIZE * NUM_NOTIFY_ENTRIES))
#define ADDR_SPACE_SIZE (haddr_t)(MAX_ADDR - BASE_ADDR)
+
+/***********************************************************************
+ *
+ * Macro: H5C_FLUSH_CACHE
+ *
+ * Purpose: Wrap a call to H5C_flush_cache() in calls to
+ * H5C_set_slist_enabled() to setup and take down the slist.
+ *
+ * This is necessary, as H5C_flush_cache() needs the
+ * slist to be active. Further, since it is called
+ * repeatedly during file flush, it would be inefficient
+ * for it to setup the slist on entry, and take it down
+ * on exit.
+ *
+ * Note that the slist need not be empty if the flags
+ * indicate a partial flush (i.e.
+ * H5C__FLUSH_MARKED_ENTRIES_FLAG). Compute clear_slist
+ * and pass it into H5C_set_slist_enabled as appropriate.
+ *
+ * On error, set pass to FALSE, and set failure_mssg
+ * to the supplied error message.
+ *
+ * Return: N/A
+ *
+ * Programmer: John Mainzer
+ * 5/14/20
+ *
+ * Changes: None.
+ *
+ ***********************************************************************/
+
+#define H5C_FLUSH_CACHE(file, flags, fail_mssg) \
+{ \
+ hbool_t clear_slist; \
+ herr_t rslt; \
+ \
+ clear_slist = ( (flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0 ); \
+ \
+ rslt = H5C_set_slist_enabled((file)->shared->cache, TRUE, FALSE); \
+ \
+ if ( rslt >= 0 ) { \
+ \
+ rslt = H5C_flush_cache((file), (flags)); \
+ } \
+ \
+ if ( rslt >= 0 ) { \
+ \
+ rslt = H5C_set_slist_enabled((file)->shared->cache, FALSE, \
+ clear_slist); \
+ } \
+ \
+ if( rslt < 0 ) { \
+ \
+ pass = FALSE; \
+ failure_mssg = (fail_mssg); \
+ } \
+} /* H5C_FLUSH_CACHE */
+
+
#define MAX_PINS 8 /* Maximum number of entries that can be
* directly pinned by a single entry.
*/