summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/H5FD.c19
-rw-r--r--src/H5HL.c35
2 files changed, 44 insertions, 10 deletions
diff --git a/src/H5FD.c b/src/H5FD.c
index e9974ed..eec94fa 100644
--- a/src/H5FD.c
+++ b/src/H5FD.c
@@ -1203,10 +1203,10 @@ static herr_t
H5FD_free_freelist(H5FD_t *file)
{
H5FD_mem_t i;
-#ifdef H5F_DEBUG
+#ifdef H5FD_ALLOC_DEBUG
unsigned nblocks = 0;
hsize_t nbytes = 0;
-#endif /* H5F_DEBUG */
+#endif /* H5FD_ALLOC_DEBUG */
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5FD_free_freelist)
@@ -1222,10 +1222,10 @@ H5FD_free_freelist(H5FD_t *file)
H5FD_free_t *cur, *next;
for( cur = file->fl[i]; cur; cur = next) {
-#ifdef H5F_DEBUG
+#ifdef H5FD_ALLOC_DEBUG
++nblocks;
nbytes += cur->size;
-#endif /* H5F_DEBUG */
+#endif /* H5FD_ALLOC_DEBUG */
next = cur->next;
H5FL_FREE(H5FD_free_t, cur);
} /* end for */
@@ -1233,12 +1233,11 @@ H5FD_free_freelist(H5FD_t *file)
file->fl[i] = NULL;
} /* end for */
-#ifdef H5F_DEBUG
- if(nblocks && H5DEBUG(F))
- HDfprintf(H5DEBUG(F),
- "H5F: leaked %Hu bytes of file memory in %u blocks\n",
- nbytes, nblocks);
-#endif /* H5F_DEBUG */
+#ifdef H5FD_ALLOC_DEBUG
+ if(nblocks)
+ HDfprintf(stderr, "%s: leaked %Hu bytes of file memory in %u blocks\n",
+ "H5FD_free_freelist", nbytes, nblocks);
+#endif /* H5FD_ALLOC_DEBUG */
/* Check if we need to reset the metadata accumulator information */
if(file->feature_flags & H5FD_FEAT_ACCUMULATE_METADATA) {
diff --git a/src/H5HL.c b/src/H5HL.c
index c861978..5d7b6f8 100644
--- a/src/H5HL.c
+++ b/src/H5HL.c
@@ -963,6 +963,41 @@ H5HL_insert(H5F_t *f, hid_t dxpl_id, H5HL_t *heap, size_t buf_size, const void *
need_more = need_size;
new_heap_alloc = heap->heap_alloc + need_more;
+/*
+ * XXX: This is a _total_ hack, a real kludge. :-/ The metadata cache currently
+ * responds very poorly when an object is inserted into the cache (or
+ * resized) that is larger than the current cache size. It waits through
+ * an entire 'epoch' of cache operations to resize the cache larger (getting
+ * _very_ poor performance), instead of immediately accommodating the large
+ * object by increasing the cache size.
+ *
+ * So, what we are doing here is to look at the current cache size, check
+ * if the new local heap will overwhelm the cache and, if so, resize the
+ * cache to be large enough to hold the new local heap block along with
+ * leaving room for other objects in the cache.
+ *
+ * John will be working on a fix inside the cache itself, so this special
+ * case code here can be removed when he's finished. - QAK, 2007/12/21
+ */
+{
+ H5AC_cache_config_t mdc_config;
+
+ /* Retrieve the current cache information */
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ if(H5AC_get_cache_auto_resize_config(f->shared->cache, &mdc_config) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (size_t)-1, "H5AC_get_cache_auto_resize_config() failed.")
+
+ /* Check if the current cache will get blown out by adding this heap
+ * block and resize it if so.
+ */
+ if((2 * new_heap_alloc) >= mdc_config.initial_size) {
+ mdc_config.set_initial_size = TRUE;
+ mdc_config.initial_size = 2 * new_heap_alloc;
+
+ if(H5AC_set_cache_auto_resize_config(f->shared->cache, &mdc_config) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (size_t)-1, "H5AC_set_cache_auto_resize_config() failed.")
+ } /* end if */
+}
HDassert(heap->heap_alloc < new_heap_alloc);
H5_CHECK_OVERFLOW(heap->heap_alloc, size_t, hsize_t);
H5_CHECK_OVERFLOW(new_heap_alloc, size_t, hsize_t);