summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJordan Henderson <jhenderson@hdfgroup.org>2017-03-10 15:05:36 (GMT)
committerJordan Henderson <jhenderson@hdfgroup.org>2017-03-10 15:05:36 (GMT)
commit281c24a5a91b16eaf498e5dd5af783773c228758 (patch)
tree4777cd13a47ce3bc7a043090598c0682b9372ee7 /src
parent104d63f25a8cb6e156bc901eb85aafc0b67775ee (diff)
downloadhdf5-281c24a5a91b16eaf498e5dd5af783773c228758.zip
hdf5-281c24a5a91b16eaf498e5dd5af783773c228758.tar.gz
hdf5-281c24a5a91b16eaf498e5dd5af783773c228758.tar.bz2
Revert "Merge latest changes from develop"
This reverts commit 104d63f25a8cb6e156bc901eb85aafc0b67775ee.
Diffstat (limited to 'src')
-rw-r--r--src/CMakeLists.txt40
-rw-r--r--src/H5AC.c308
-rw-r--r--src/H5ACdbg.c203
-rw-r--r--src/H5ACmpio.c80
-rw-r--r--src/H5ACpkg.h13
-rw-r--r--src/H5ACprivate.h69
-rw-r--r--src/H5ACpublic.h61
-rw-r--r--src/H5B2pkg.h9
-rw-r--r--src/H5Bpkg.h3
-rw-r--r--src/H5C.c1848
-rw-r--r--src/H5Cdbg.c467
-rw-r--r--src/H5Cepoch.c5
-rw-r--r--src/H5Cmpio.c377
-rw-r--r--src/H5Cpkg.h564
-rw-r--r--src/H5Cprivate.h481
-rw-r--r--src/H5Cquery.c5
-rw-r--r--src/H5EApkg.h15
-rw-r--r--src/H5F.c74
-rw-r--r--src/H5FApkg.h9
-rw-r--r--src/H5FDmulti.c14
-rw-r--r--src/H5FSpkg.h6
-rw-r--r--src/H5Fint.c67
-rw-r--r--src/H5Fio.c2
-rw-r--r--src/H5Fpkg.h12
-rw-r--r--src/H5Fprivate.h4
-rw-r--r--src/H5Fpublic.h1
-rw-r--r--src/H5Fsuper.c58
-rw-r--r--src/H5Gpkg.h3
-rw-r--r--src/H5HFcache.c684
-rw-r--r--src/H5HFpkg.h9
-rw-r--r--src/H5HGpkg.h3
-rw-r--r--src/H5HLpkg.h6
-rw-r--r--src/H5MF.c193
-rw-r--r--src/H5MFaggr.c15
-rw-r--r--src/H5MFprivate.h8
-rw-r--r--src/H5O.c7
-rw-r--r--src/H5Opkg.h13
-rw-r--r--src/H5Oprivate.h16
-rw-r--r--src/H5Pfapl.c255
-rw-r--r--src/H5Ppublic.h2
-rw-r--r--src/H5SMpkg.h2
-rw-r--r--src/H5win32defs.h4
-rw-r--r--src/Makefile.am6
43 files changed, 1627 insertions, 4394 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index ef361b2..55de5ea 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.2.2)
+cmake_minimum_required (VERSION 3.1.0)
PROJECT (HDF5_SRC C CXX)
#-----------------------------------------------------------------------------
@@ -90,10 +90,8 @@ set (H5C_SOURCES
${HDF5_SRC_DIR}/H5C.c
${HDF5_SRC_DIR}/H5Cdbg.c
${HDF5_SRC_DIR}/H5Cepoch.c
- ${HDF5_SRC_DIR}/H5Cimage.c
${HDF5_SRC_DIR}/H5Clog.c
${HDF5_SRC_DIR}/H5Cmpio.c
- ${HDF5_SRC_DIR}/H5Cprefetched.c
${HDF5_SRC_DIR}/H5Cquery.c
${HDF5_SRC_DIR}/H5Ctag.c
${HDF5_SRC_DIR}/H5Ctest.c
@@ -441,7 +439,6 @@ set (H5O_SOURCES
${HDF5_SRC_DIR}/H5Obogus.c
${HDF5_SRC_DIR}/H5Obtreek.c
${HDF5_SRC_DIR}/H5Ocache.c
- ${HDF5_SRC_DIR}/H5Ocache_image.c
${HDF5_SRC_DIR}/H5Ochunk.c
${HDF5_SRC_DIR}/H5Ocont.c
${HDF5_SRC_DIR}/H5Ocopy.c
@@ -661,7 +658,7 @@ set (H5Z_SOURCES
if (H5_ZLIB_HEADER)
SET_PROPERTY(SOURCE ${HDF5_SRC_DIR}/H5Zdeflate.c PROPERTY
COMPILE_DEFINITIONS H5_ZLIB_HEADER="${H5_ZLIB_HEADER}")
-endif ()
+endif (H5_ZLIB_HEADER)
set (H5Z_HDRS
@@ -819,10 +816,10 @@ if (HDF5_GENERATE_HEADERS)
COMMAND ${PERL_EXECUTABLE} ${HDF5_SOURCE_DIR}/bin/make_overflow ${HDF5_SRC_DIR}/H5overflow.txt OUTPUT_VARIABLE SCRIPT_OUTPUT
)
message(STATUS ${SCRIPT_OUTPUT})
- else ()
+ else (PERL_FOUND)
message (STATUS "Cannot generate headers - perl not found")
- endif ()
-endif ()
+ endif (PERL_FOUND)
+endif (HDF5_GENERATE_HEADERS)
#-----------------------------------------------------------------------------
# Setup the H5Detect utility which generates H5Tinit with platform
@@ -832,7 +829,7 @@ add_executable (H5detect ${HDF5_SRC_DIR}/H5detect.c)
TARGET_C_PROPERTIES (H5detect STATIC " " " ")
if (MSVC OR MINGW)
target_link_libraries (H5detect "ws2_32.lib")
-endif ()
+endif (MSVC OR MINGW)
set (CMD $<TARGET_FILE:H5detect>)
add_custom_command (
@@ -846,7 +843,7 @@ add_executable (H5make_libsettings ${HDF5_SRC_DIR}/H5make_libsettings.c)
TARGET_C_PROPERTIES (H5make_libsettings STATIC " " " ")
if (MSVC OR MINGW)
target_link_libraries (H5make_libsettings "ws2_32.lib")
-endif ()
+endif (MSVC OR MINGW)
set (CMD $<TARGET_FILE:H5make_libsettings>)
add_custom_command (
@@ -867,10 +864,10 @@ TARGET_C_PROPERTIES (${HDF5_LIB_TARGET} STATIC " " " ")
target_link_libraries (${HDF5_LIB_TARGET} ${LINK_LIBS})
if (NOT WIN32)
target_link_libraries (${HDF5_LIB_TARGET} dl)
-endif ()
+endif (NOT WIN32)
if (H5_HAVE_PARALLEL AND MPI_C_FOUND)
target_link_libraries (${HDF5_LIB_TARGET} ${MPI_C_LIBRARIES})
-endif ()
+endif (H5_HAVE_PARALLEL AND MPI_C_FOUND)
set_global_variable (HDF5_LIBRARIES_TO_EXPORT ${HDF5_LIB_TARGET})
H5_SET_LIB_OPTIONS (${HDF5_LIB_TARGET} ${HDF5_LIB_NAME} STATIC)
set_target_properties (${HDF5_LIB_TARGET} PROPERTIES
@@ -884,7 +881,7 @@ if (HDF5_ENABLE_DEBUG_APIS)
COMPILE_DEFINITIONS
"H5Z_DEBUG;H5T_DEBUG;H5ST_DEBUG;H5S_DEBUG;H5O_DEBUG;H5I_DEBUG;H5HL_DEBUG;H5F_DEBUG;H5D_DEBUG;H5B2_DEBUG;H5AC_DEBUG"
)
-endif ()
+endif (HDF5_ENABLE_DEBUG_APIS)
set (install_targets ${HDF5_LIB_TARGET})
if (BUILD_SHARED_LIBS)
@@ -911,10 +908,10 @@ if (BUILD_SHARED_LIBS)
target_link_libraries (${HDF5_LIBSH_TARGET} ${LINK_SHARED_LIBS})
if (NOT WIN32)
target_link_libraries (${HDF5_LIBSH_TARGET} dl)
- endif ()
+ endif (NOT WIN32)
if (H5_HAVE_PARALLEL AND MPI_C_FOUND)
target_link_libraries (${HDF5_LIBSH_TARGET} ${MPI_C_LIBRARIES})
- endif ()
+ endif (H5_HAVE_PARALLEL AND MPI_C_FOUND)
set_global_variable (HDF5_LIBRARIES_TO_EXPORT "${HDF5_LIBRARIES_TO_EXPORT};${HDF5_LIBSH_TARGET}")
H5_SET_LIB_OPTIONS (${HDF5_LIBSH_TARGET} ${HDF5_LIB_NAME} SHARED ${HDF5_PACKAGE_SOVERSION})
set_target_properties (${HDF5_LIBSH_TARGET} PROPERTIES
@@ -929,16 +926,16 @@ if (BUILD_SHARED_LIBS)
"H5_HAVE_THREADSAFE"
)
target_link_libraries (${HDF5_LIBSH_TARGET} Threads::Threads)
- endif ()
+ endif (HDF5_ENABLE_THREADSAFE)
if (HDF5_ENABLE_DEBUG_APIS)
set_property (TARGET ${HDF5_LIBSH_TARGET}
APPEND PROPERTY COMPILE_DEFINITIONS
"H5Z_DEBUG;H5T_DEBUG;H5ST_DEBUG;H5S_DEBUG;H5O_DEBUG;H5I_DEBUG;H5HL_DEBUG;H5F_DEBUG;H5D_DEBUG;H5B2_DEBUG;H5AC_DEBUG"
)
- endif ()
+ endif (HDF5_ENABLE_DEBUG_APIS)
set (install_targets ${install_targets} ${HDF5_LIBSH_TARGET})
-endif ()
+endif (BUILD_SHARED_LIBS)
#-----------------------------------------------------------------------------
# Add file(s) to CMake Install
@@ -953,7 +950,7 @@ if (NOT HDF5_INSTALL_NO_DEVELOPMENT)
COMPONENT
headers
)
-endif ()
+endif (NOT HDF5_INSTALL_NO_DEVELOPMENT)
#-----------------------------------------------------------------------------
# Add Target(s) to CMake Install for import into other projects
@@ -961,8 +958,7 @@ endif ()
if (HDF5_EXPORTED_TARGETS)
if (BUILD_SHARED_LIBS)
INSTALL_TARGET_PDB (${HDF5_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} libraries)
- endif ()
- INSTALL_TARGET_PDB (${HDF5_LIB_TARGET} ${HDF5_INSTALL_BIN_DIR} libraries)
+ endif (BUILD_SHARED_LIBS)
install (
TARGETS
@@ -975,4 +971,4 @@ if (HDF5_EXPORTED_TARGETS)
FRAMEWORK DESTINATION ${HDF5_INSTALL_FWRK_DIR} COMPONENT libraries
INCLUDES DESTINATION include
)
-endif ()
+endif (HDF5_EXPORTED_TARGETS)
diff --git a/src/H5AC.c b/src/H5AC.c
index ee68a6f..117e662 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -107,43 +107,38 @@ hbool_t H5_coll_api_sanity_check_g = false;
/* Local Variables */
/*******************/
-/* Metadata entry class list */
-
-/* Remember to add new type ID to the H5AC_type_t enum in H5ACprivate.h when
- * adding a new class.
- */
-
-static const H5AC_class_t *const H5AC_class_s[] = {
- H5AC_BT, /* ( 0) B-tree nodes */
- H5AC_SNODE, /* ( 1) symbol table nodes */
- H5AC_LHEAP_PRFX, /* ( 2) local heap prefix */
- H5AC_LHEAP_DBLK, /* ( 3) local heap data block */
- H5AC_GHEAP, /* ( 4) global heap */
- H5AC_OHDR, /* ( 5) object header */
- H5AC_OHDR_CHK, /* ( 6) object header chunk */
- H5AC_BT2_HDR, /* ( 7) v2 B-tree header */
- H5AC_BT2_INT, /* ( 8) v2 B-tree internal node */
- H5AC_BT2_LEAF, /* ( 9) v2 B-tree leaf node */
- H5AC_FHEAP_HDR, /* (10) fractal heap header */
- H5AC_FHEAP_DBLOCK, /* (11) fractal heap direct block */
- H5AC_FHEAP_IBLOCK, /* (12) fractal heap indirect block */
- H5AC_FSPACE_HDR, /* (13) free space header */
- H5AC_FSPACE_SINFO, /* (14) free space sections */
- H5AC_SOHM_TABLE, /* (15) shared object header message master table */
- H5AC_SOHM_LIST, /* (16) shared message index stored as a list */
- H5AC_EARRAY_HDR, /* (17) extensible array header */
- H5AC_EARRAY_IBLOCK, /* (18) extensible array index block */
- H5AC_EARRAY_SBLOCK, /* (19) extensible array super block */
- H5AC_EARRAY_DBLOCK, /* (20) extensible array data block */
- H5AC_EARRAY_DBLK_PAGE, /* (21) extensible array data block page */
- H5AC_FARRAY_HDR, /* (22) fixed array header */
- H5AC_FARRAY_DBLOCK, /* (23) fixed array data block */
- H5AC_FARRAY_DBLK_PAGE, /* (24) fixed array data block page */
- H5AC_SUPERBLOCK, /* (25) file superblock */
- H5AC_DRVRINFO, /* (26) driver info block (supplements superblock) */
- H5AC_EPOCH_MARKER, /* (27) epoch marker - always internal to cache */
- H5AC_PROXY_ENTRY, /* (28) cache entry proxy */
- H5AC_PREFETCHED_ENTRY /* (29) prefetched entry - always internal to cache */
+static const char *H5AC_entry_type_names[H5AC_NTYPES] =
+{
+ "B-tree nodes",
+ "symbol table nodes",
+ "local heap prefixes",
+ "local heap data blocks",
+ "global heaps",
+ "object headers",
+ "object header chunks",
+ "v2 B-tree headers",
+ "v2 B-tree internal nodes",
+ "v2 B-tree leaf nodes",
+ "fractal heap headers",
+ "fractal heap direct blocks",
+ "fractal heap indirect blocks",
+ "free space headers",
+ "free space sections",
+ "shared OH message master table",
+ "shared OH message index",
+ "extensible array headers",
+ "extensible array index blocks",
+ "extensible array super blocks",
+ "extensible array data blocks",
+ "extensible array data block pages",
+ "fixed array headers",
+ "fixed array data block",
+ "fixed array data block pages",
+ "superblock",
+ "driver info",
+ "epoch marker", /* internal to cache only */
+ "proxy entry",
+ "test entry" /* for testing only -- not used for actual files */
};
@@ -371,13 +366,12 @@ H5AC_term_package(void)
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_config_t * image_config_ptr)
+H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr)
{
#ifdef H5_HAVE_PARALLEL
char prefix[H5C__PREFIX_LEN] = "";
H5AC_aux_t * aux_ptr = NULL;
#endif /* H5_HAVE_PARALLEL */
- struct H5C_cache_image_ctl_t int_ci_config = H5C__DEFAULT_CACHE_IMAGE_CTL;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -386,16 +380,11 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co
HDassert(f);
HDassert(NULL == f->shared->cache);
HDassert(config_ptr != NULL) ;
- HDassert(image_config_ptr != NULL) ;
- HDassert(image_config_ptr->version == H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION);
- HDcompile_assert(NELMTS(H5AC_class_s) == H5AC_NTYPES);
+ HDcompile_assert(NELMTS(H5AC_entry_type_names) == H5AC_NTYPES);
HDcompile_assert(H5C__MAX_NUM_TYPE_IDS == H5AC_NTYPES);
- /* Validate configurations */
if(H5AC_validate_config(config_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache configuration")
- if(H5AC_validate_cache_image_config(image_config_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache image configuration")
#ifdef H5_HAVE_PARALLEL
if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
@@ -413,7 +402,7 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co
HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get mpi size")
if(NULL == (aux_ptr = H5FL_CALLOC(H5AC_aux_t)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate H5AC auxilary structure")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate H5AC auxilary structure.")
aux_ptr->magic = H5AC__H5AC_AUX_T_MAGIC;
aux_ptr->mpi_comm = mpi_comm;
@@ -437,16 +426,15 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co
aux_ptr->candidate_slist_ptr = NULL;
aux_ptr->write_done = NULL;
aux_ptr->sync_point_done = NULL;
- aux_ptr->p0_image_len = 0;
sprintf(prefix, "%d:", mpi_rank);
if(mpi_rank == 0) {
if(NULL == (aux_ptr->d_slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create dirtied entry list")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create dirtied entry list.")
if(NULL == (aux_ptr->c_slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create cleaned entry list")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create cleaned entry list.")
} /* end if */
/* construct the candidate slist for all processes.
@@ -454,25 +442,25 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co
* will use it in the case of a flush.
*/
if(NULL == (aux_ptr->candidate_slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create candidate entry list")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create candidate entry list.")
if(aux_ptr != NULL)
if(aux_ptr->mpi_rank == 0)
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
- H5AC_class_s,
+ (const char **)H5AC_entry_type_names,
H5AC__check_if_write_permitted, TRUE, H5AC__log_flushed_entry,
(void *)aux_ptr);
else
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
- H5AC_class_s,
+ (const char **)H5AC_entry_type_names,
H5AC__check_if_write_permitted, TRUE, NULL,
(void *)aux_ptr);
else
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
- H5AC_class_s,
+ (const char **)H5AC_entry_type_names,
H5AC__check_if_write_permitted, TRUE, NULL, NULL);
} /* end if */
else {
@@ -483,7 +471,7 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co
*/
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
- H5AC_class_s,
+ (const char **)H5AC_entry_type_names,
H5AC__check_if_write_permitted, TRUE, NULL, NULL);
#ifdef H5_HAVE_PARALLEL
} /* end else */
@@ -512,20 +500,6 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co
if(H5AC_set_cache_auto_resize_config(f->shared->cache, config_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "auto resize configuration failed")
- /* don't need to get the current H5C image config here since the
- * cache has just been created, and thus f->shared->cache->image_ctl
- * must still set to its initial value (H5C__DEFAULT_CACHE_IMAGE_CTL).
- * Note that this not true as soon as control returns to the application
- * program, as some test code modifies f->shared->cache->image_ctl.
- */
- int_ci_config.version = image_config_ptr->version;
- int_ci_config.generate_image = image_config_ptr->generate_image;
- int_ci_config.save_resize_status = image_config_ptr->save_resize_status;
- int_ci_config.entry_ageout = image_config_ptr->entry_ageout;
-
- if(H5C_set_cache_image_config(f, f->shared->cache, &int_ci_config) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "auto resize configuration failed")
-
done:
#ifdef H5_HAVE_PARALLEL
/* if there is a failure, try to tidy up the auxilary structure */
@@ -583,7 +557,7 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
#if H5AC__TRACE_FILE_ENABLED
if(H5AC__close_trace_file(f->shared->cache) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__close_trace_file() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__close_trace_file() failed.")
#endif /* H5AC__TRACE_FILE_ENABLED */
if(H5F_USE_MDC_LOGGING(f)) {
@@ -597,7 +571,7 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
#ifdef H5_HAVE_PARALLEL
/* destroying the cache, so clear all collective entries */
if(H5C_clear_coll_entries(f->shared->cache, FALSE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed.")
aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(f->shared->cache);
if(aux_ptr)
@@ -606,7 +580,7 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
/* Attempt to flush all entries from rank 0 & Bcast clean list to other ranks */
if(H5AC__flush_entries(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.")
#endif /* H5_HAVE_PARALLEL */
/* Destroy the cache */
@@ -616,18 +590,12 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
#ifdef H5_HAVE_PARALLEL
if(aux_ptr != NULL) {
- if(aux_ptr->d_slist_ptr != NULL) {
- HDassert(H5SL_count(aux_ptr->d_slist_ptr) == 0);
+ if(aux_ptr->d_slist_ptr != NULL)
H5SL_close(aux_ptr->d_slist_ptr);
- } /* end if */
- if(aux_ptr->c_slist_ptr != NULL) {
- HDassert(H5SL_count(aux_ptr->c_slist_ptr) == 0);
+ if(aux_ptr->c_slist_ptr != NULL)
H5SL_close(aux_ptr->c_slist_ptr);
- } /* end if */
- if(aux_ptr->candidate_slist_ptr != NULL) {
- HDassert(H5SL_count(aux_ptr->candidate_slist_ptr) == 0);
+ if(aux_ptr->candidate_slist_ptr != NULL)
H5SL_close(aux_ptr->candidate_slist_ptr);
- } /* end if */
aux_ptr->magic = 0;
aux_ptr = H5FL_FREE(H5AC_aux_t, aux_ptr);
} /* end if */
@@ -738,7 +706,7 @@ H5AC_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
#endif /* H5AC__TRACE_FILE_ENABLED */
if(H5C_expunge_entry(f, dxpl_id, type, addr, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "H5C_expunge_entry() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "H5C_expunge_entry() failed.")
done:
#if H5AC__TRACE_FILE_ENABLED
@@ -808,17 +776,17 @@ H5AC_flush(H5F_t *f, hid_t dxpl_id)
#ifdef H5_HAVE_PARALLEL
/* flushing the cache, so clear all collective entries */
if(H5C_clear_coll_entries(f->shared->cache, FALSE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed.")
/* Attempt to flush all entries from rank 0 & Bcast clean list to other ranks */
if(H5AC__flush_entries(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.")
#endif /* H5_HAVE_PARALLEL */
/* Flush the cache */
/* (Again, in parallel - writes out the superblock) */
if(H5C_flush_cache(f, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache.")
done:
#if H5AC__TRACE_FILE_ENABLED
@@ -872,11 +840,11 @@ H5AC_get_entry_status(const H5F_t *f, haddr_t addr, unsigned *status)
FUNC_ENTER_NOAPI(FAIL)
if((f == NULL) || (!H5F_addr_defined(addr)) || (status == NULL))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad param(s) on entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad param(s) on entry.")
if(H5C_get_entry_status(f, addr, NULL, &in_cache, &is_dirty,
&is_protected, &is_pinned, &is_corked, &is_flush_dep_parent, &is_flush_dep_child, &image_is_up_to_date) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_entry_status() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_entry_status() failed.")
if(in_cache) {
*status |= H5AC_ES__IN_CACHE;
@@ -990,7 +958,7 @@ H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t add
/* Check if we should try to flush */
if(aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold)
if(H5AC__run_sync_point(f, dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
} /* end if */
}
#endif /* H5_HAVE_PARALLEL */
@@ -1010,41 +978,6 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5AC_load_cache_image_on_next_protect
- *
- * Purpose: Load the cache image block at the specified location,
- * decode it, and insert its contents into the metadata
- * cache.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 7/6/15
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5AC_load_cache_image_on_next_protect(H5F_t * f, haddr_t addr, hsize_t len,
- hbool_t rw)
-{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- HDassert(f->shared->cache);
-
- if(H5C_load_cache_image_on_next_protect(f, addr, len, rw) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "call to H5C_load_cache_image_on_next_protect failed")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_load_cache_image_on_next_protect() */
-
-
-/*-------------------------------------------------------------------------
* Function: H5AC_mark_entry_dirty
*
* Purpose: Mark a pinned or protected entry as dirty. The target
@@ -1397,13 +1330,13 @@ H5_ATTR_UNUSED
#endif /* H5_HAVE_PARALLEL */
if(H5C_move_entry(f->shared->cache, type, old_addr, new_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "H5C_move_entry() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "H5C_move_entry() failed.")
#ifdef H5_HAVE_PARALLEL
/* Check if we should try to flush */
if(NULL != aux_ptr && aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold)
if(H5AC__run_sync_point(f, dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
#endif /* H5_HAVE_PARALLEL */
done:
@@ -1493,7 +1426,7 @@ done:
* Function: H5AC_prep_for_file_close
*
* Purpose: This function should be called just prior to the cache
- * flushes at file close.
+ * flushes at file close.
*
* The objective of the call is to allow the metadata cache
* to do any preparatory work prior to generation of a
@@ -1575,7 +1508,7 @@ H5AC_create_flush_dependency(void * parent_thing, void * child_thing)
/* Create the flush dependency */
if(H5C_create_flush_dependency(parent_thing, child_thing) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "H5C_create_flush_dependency() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "H5C_create_flush_dependency() failed.")
done:
#if H5AC__TRACE_FILE_ENABLED
@@ -1676,7 +1609,7 @@ H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
#endif /* H5AC_DO_TAGGING_SANITY_CHECKS */
if(NULL == (thing = H5C_protect(f, dxpl_id, type, addr, udata, flags)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_protect() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_protect() failed.")
#if H5AC__TRACE_FILE_ENABLED
if(trace_file_ptr != NULL)
@@ -1899,7 +1832,7 @@ H5AC_destroy_flush_dependency(void * parent_thing, void * child_thing)
/* Destroy the flush dependency */
if(H5C_destroy_flush_dependency(parent_thing, child_thing) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "H5C_destroy_flush_dependency() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "H5C_destroy_flush_dependency() failed.")
done:
#if H5AC__TRACE_FILE_ENABLED
@@ -2023,18 +1956,18 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
if(deleted && aux_ptr->mpi_rank == 0)
if(H5AC__log_deleted_entry((H5AC_info_t *)thing) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5AC__log_deleted_entry() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5AC__log_deleted_entry() failed.")
} /* end if */
#endif /* H5_HAVE_PARALLEL */
if(H5C_unprotect(f, dxpl_id, addr, thing, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5C_unprotect() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5C_unprotect() failed.")
#ifdef H5_HAVE_PARALLEL
/* Check if we should try to flush */
if((aux_ptr != NULL) && (aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold))
if(H5AC__run_sync_point(f, dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
#endif /* H5_HAVE_PARALLEL */
done:
@@ -2077,22 +2010,22 @@ H5AC_get_cache_auto_resize_config(const H5AC_t *cache_ptr,
/* Check args */
if((cache_ptr == NULL) || (config_ptr == NULL) ||
(config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr or config_ptr on entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr or config_ptr on entry.")
#ifdef H5_HAVE_PARALLEL
{
H5AC_aux_t *aux_ptr;
aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr);
if((aux_ptr != NULL) && (aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad aux_ptr on entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad aux_ptr on entry.")
}
#endif /* H5_HAVE_PARALLEL */
/* Retrieve the configuration */
if(H5C_get_cache_auto_resize_config((const H5C_t *)cache_ptr, &internal_config) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_auto_resize_config() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_auto_resize_config() failed.")
if(H5C_get_evictions_enabled((const H5C_t *)cache_ptr, &evictions_enabled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_resize_enabled() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_resize_enabled() failed.")
/* Set the information to return */
if(internal_config.rpt_fcn == NULL)
@@ -2161,7 +2094,7 @@ done:
*/
herr_t
H5AC_get_cache_size(H5AC_t *cache_ptr, size_t *max_size_ptr, size_t *min_clean_size_ptr,
- size_t *cur_size_ptr, uint32_t *cur_num_entries_ptr)
+ size_t *cur_size_ptr, int32_t *cur_num_entries_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -2169,7 +2102,7 @@ H5AC_get_cache_size(H5AC_t *cache_ptr, size_t *max_size_ptr, size_t *min_clean_s
if(H5C_get_cache_size((H5C_t *)cache_ptr, max_size_ptr, min_clean_size_ptr,
cur_size_ptr, cur_num_entries_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_size() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_size() failed.")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -2196,7 +2129,7 @@ H5AC_get_cache_hit_rate(H5AC_t *cache_ptr, double *hit_rate_ptr)
FUNC_ENTER_NOAPI(FAIL)
if(H5C_get_cache_hit_rate((H5C_t *)cache_ptr, hit_rate_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_hit_rate() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_hit_rate() failed.")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -2223,7 +2156,7 @@ H5AC_reset_cache_hit_rate_stats(H5AC_t * cache_ptr)
FUNC_ENTER_NOAPI(FAIL)
if(H5C_reset_cache_hit_rate_stats((H5C_t *)cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats() failed.")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -2272,14 +2205,14 @@ H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr, H5AC_cache_config_t *config
#endif /* H5AC__TRACE_FILE_ENABLED */
if(cache_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "bad cache_ptr on entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "bad cache_ptr on entry.")
#ifdef H5_HAVE_PARALLEL
{
H5AC_aux_t *aux_ptr;
aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr);
if((aux_ptr != NULL) && (aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "bad aux_ptr on entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "bad aux_ptr on entry.")
}
#endif /* H5_HAVE_PARALLEL */
@@ -2291,29 +2224,29 @@ H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr, H5AC_cache_config_t *config
FILE * file_ptr;
if(NULL == (file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_trace_file_ptr() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_trace_file_ptr() failed.")
if((!(config_ptr->close_trace_file)) && (file_ptr != NULL))
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Trace file already open")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Trace file already open.")
} /* end if */
/* Close & reopen trace file, if requested */
if(config_ptr->close_trace_file)
if(H5AC__close_trace_file(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__close_trace_file() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__close_trace_file() failed.")
if(config_ptr->open_trace_file)
if(H5AC__open_trace_file(cache_ptr, config_ptr->trace_file_name) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "H5AC__open_trace_file() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "H5AC__open_trace_file() failed.")
/* Convert external configuration to internal representation */
if(H5AC__ext_config_2_int_config(config_ptr, &internal_config) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__ext_config_2_int_config() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__ext_config_2_int_config() failed.")
/* Set configuration */
if(H5C_set_cache_auto_resize_config(cache_ptr, &internal_config) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_set_cache_auto_resize_config() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_set_cache_auto_resize_config() failed.")
if(H5C_set_evictions_enabled(cache_ptr, config_ptr->evictions_enabled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_set_evictions_enabled() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_set_evictions_enabled() failed.")
#ifdef H5_HAVE_PARALLEL
{
@@ -2412,9 +2345,9 @@ H5AC_validate_config(H5AC_cache_config_t *config_ptr)
/* Check args */
if(config_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "NULL config_ptr on entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "NULL config_ptr on entry.")
if(config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Unknown config version")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Unknown config version.")
/* don't bother to test trace_file_name unless open_trace_file is TRUE */
if(config_ptr->open_trace_file) {
@@ -2426,31 +2359,31 @@ H5AC_validate_config(H5AC_cache_config_t *config_ptr)
*/
name_len = HDstrlen(config_ptr->trace_file_name);
if(name_len == 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name is empty")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name is empty.")
else if(name_len > H5AC__MAX_TRACE_FILE_NAME_LEN)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name too long")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name too long.")
} /* end if */
if((config_ptr->evictions_enabled == FALSE) &&
((config_ptr->incr_mode != H5C_incr__off) ||
(config_ptr->flash_incr_mode != H5C_flash_incr__off) ||
(config_ptr->decr_mode != H5C_decr__off)))
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Can't disable evictions while auto-resize is enabled")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Can't disable evictions while auto-resize is enabled.")
if(config_ptr->dirty_bytes_threshold < H5AC__MIN_DIRTY_BYTES_THRESHOLD)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too small")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too small.")
else if(config_ptr->dirty_bytes_threshold > H5AC__MAX_DIRTY_BYTES_THRESHOLD)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too big")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too big.")
if((config_ptr->metadata_write_strategy != H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY) &&
(config_ptr->metadata_write_strategy != H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED))
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->metadata_write_strategy out of range")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->metadata_write_strategy out of range.")
if(H5AC__ext_config_2_int_config(config_ptr, &internal_config) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__ext_config_2_int_config() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__ext_config_2_int_config() failed.")
if(H5C_validate_resize_config(&internal_config, H5C_RESIZE_CFG__VALIDATE_ALL) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "error(s) in new config")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "error(s) in new config.")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -2458,61 +2391,6 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5AC_validate_cache_image_config()
- *
- * Purpose: Run a sanity check on the contents of the supplied
- * instance of H5AC_cache_image_config_t.
- *
- * Do nothing and return SUCCEED if no errors are detected,
- * and flag an error and return FAIL otherwise.
- *
- * At present, this function operates by packing the data
- * from the instance of H5AC_cache_image_config_t into an
- * instance of H5C_cache_image_ctl_t, and then calling
- * H5C_validate_cache_image_config(). If and when
- * H5AC_cache_image_config_t and H5C_cache_image_ctl_t
- * diverge, we may have to change this.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 6/25/15
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5AC_validate_cache_image_config(H5AC_cache_image_config_t *config_ptr)
-{
- H5C_cache_image_ctl_t internal_config = H5C__DEFAULT_CACHE_IMAGE_CTL;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Check args */
- if(config_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "NULL config_ptr on entry")
-
- if(config_ptr->version != H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Unknown image config version")
-
- /* don't need to get the current H5C image config here since the
- * default values of fields not in the H5AC config will always be
- * valid.
- */
- internal_config.version = config_ptr->version;
- internal_config.generate_image = config_ptr->generate_image;
- internal_config.save_resize_status = config_ptr->save_resize_status;
- internal_config.entry_ageout = config_ptr->entry_ageout;
-
- if(H5C_validate_cache_image_config(&internal_config) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "error(s) in new cache image config")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_validate_cache_image_config() */
-
-
-/*-------------------------------------------------------------------------
*
* Function: H5AC__check_if_write_permitted
*
@@ -2595,7 +2473,7 @@ H5AC__ext_config_2_int_config(H5AC_cache_config_t *ext_conf_ptr,
if((ext_conf_ptr == NULL) || (ext_conf_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION) ||
(int_conf_ptr == NULL))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad ext_conf_ptr or inf_conf_ptr on entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad ext_conf_ptr or inf_conf_ptr on entry.")
int_conf_ptr->version = H5C__CURR_AUTO_SIZE_CTL_VER;
if(ext_conf_ptr->rpt_fcn_enabled)
@@ -2662,7 +2540,7 @@ H5AC_ignore_tags(const H5F_t *f)
/* Set up a new metadata tag */
if(H5C_ignore_tags(f->shared->cache) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "H5C_ignore_tags() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "H5C_ignore_tags() failed.")
done:
FUNC_LEAVE_NOAPI(ret_value)
diff --git a/src/H5ACdbg.c b/src/H5ACdbg.c
index 8ca5102..8d99c6f 100644
--- a/src/H5ACdbg.c
+++ b/src/H5ACdbg.c
@@ -252,207 +252,6 @@ done:
/*-------------------------------------------------------------------------
*
- * Function: H5AC_get_entry_ptr_from_addr()
- *
- * Purpose: Debugging function that attempts to look up an entry in the
- * cache by its file address, and if found, returns a pointer
- * to the entry in *entry_ptr_ptr. If the entry is not in the
- * cache, *entry_ptr_ptr is set to NULL.
- *
- * WARNING: This call should be used only in debugging
- * routines, and it should be avoided when
- * possible.
- *
- * Further, if we ever multi-thread the cache,
- * this routine will have to be either discarded
- * or heavily re-worked.
- *
- * Finally, keep in mind that the entry whose
- * pointer is obtained in this fashion may not
- * be in a stable state.
- *
- * Note that this function is only defined if NDEBUG
- * is not defined.
- *
- * As heavy use of this function is almost certainly a
- * bad idea, the metadata cache tracks the number of
- * successful calls to this function, and (if
- * H5C_DO_SANITY_CHECKS is defined) displays any
- * non-zero count on cache shutdown.
- *
- * This function is just a wrapper that calls the H5C
- * version of the function.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: John Mainzer, 5/30/14
- *
- *-------------------------------------------------------------------------
- */
-#ifndef NDEBUG
-herr_t
-H5AC_get_entry_ptr_from_addr(const H5F_t *f, haddr_t addr, void **entry_ptr_ptr)
-{
- H5C_t *cache_ptr; /* Ptr to cache */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
-
- if(H5C_get_entry_ptr_from_addr(cache_ptr, addr, entry_ptr_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_entry_ptr_from_addr() failed")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_get_entry_ptr_from_addr() */
-#endif /* NDEBUG */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5AC_flush_dependency_exists()
- *
- * Purpose: Test to see if a flush dependency relationship exists
- * between the supplied parent and child. Both parties
- * are indicated by addresses so as to avoid the necessity
- * of protect / unprotect calls prior to this call.
- *
- * If either the parent or the child is not in the metadata
- * cache, the function sets *fd_exists_ptr to FALSE.
- *
- * If both are in the cache, the childs list of parents is
- * searched for the proposed parent. If the proposed parent
- * is found in the childs parent list, the function sets
- * *fd_exists_ptr to TRUE. In all other non-error cases,
- * the function sets *fd_exists_ptr FALSE.
- *
- * Return: SUCCEED on success/FAIL on failure. Note that
- * *fd_exists_ptr is undefined on failure.
- *
- * Programmer: John Mainzer
- * 9/28/16
- *
- *-------------------------------------------------------------------------
- */
-#ifndef NDEBUG
-herr_t
-H5AC_flush_dependency_exists(H5F_t *f, haddr_t parent_addr, haddr_t child_addr,
- hbool_t *fd_exists_ptr)
-{
- H5C_t *cache_ptr; /* Ptr to cache */
- herr_t ret_value = FAIL; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
-
- ret_value = H5C_flush_dependency_exists(cache_ptr, parent_addr, child_addr, fd_exists_ptr);
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_flush_dependency_exists() */
-#endif /* NDEBUG */
-
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5AC_verify_entry_type()
- *
- * Purpose: Debugging function that attempts to look up an entry in the
- * cache by its file address, and if found, test to see if its
- * type field contains the expected value.
- *
- * If the specified entry is in cache, *in_cache_ptr is set
- * to TRUE, and *type_ok_ptr is set to TRUE or FALSE depending
- * on whether the entries type field matches the
- * expected_type parameter
- *
- * If the target entry is not in cache, *in_cache_ptr is
- * set to FALSE, and *type_ok_ptr is undefined.
- *
- * Note that this function is only defined if NDEBUG
- * is not defined.
- *
- * This function is just a wrapper that calls the H5C
- * version of the function.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: John Mainzer, 5/30/14
- *
- * Changes: None.
- *
- * JRM -- 9/17/16
- *
- *-------------------------------------------------------------------------
- */
-#ifndef NDEBUG
-herr_t
-H5AC_verify_entry_type(const H5F_t *f, haddr_t addr,
- const H5AC_class_t *expected_type, hbool_t *in_cache_ptr,
- hbool_t *type_ok_ptr)
-{
- H5C_t * cache_ptr;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
-
- if(H5C_verify_entry_type(cache_ptr, addr, expected_type, in_cache_ptr, type_ok_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_verify_entry_type() failed")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_verify_entry_type() */
-#endif /* NDEBUG */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5AC_get_serialization_in_progress
- *
- * Purpose: Return the current value of
- * cache_ptr->serialization_in_progress.
- *
- * Return: Current value of cache_ptr->serialization_in_progress.
- *
- * Programmer: John Mainzer
- * 8/24/15
- *
- *-------------------------------------------------------------------------
- */
-#ifndef NDEBUG
-hbool_t
-H5AC_get_serialization_in_progress(H5F_t *f)
-{
- H5C_t * cache_ptr;
- hbool_t ret_value = FALSE; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Sanity check */
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
-
- /* Set return value */
- ret_value = H5C_get_serialization_in_progress(cache_ptr);
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_get_serialization_in_progress() */
-#endif /* NDEBUG */
-
-
-/*-------------------------------------------------------------------------
- *
* Function: H5AC_cache_is_clean()
*
* Purpose: Debugging function that verifies that all rings in the
@@ -475,7 +274,7 @@ hbool_t
H5AC_cache_is_clean(const H5F_t *f, H5AC_ring_t inner_ring)
{
H5C_t *cache_ptr;
- hbool_t ret_value = FALSE; /* Return value */
+ hbool_t ret_value; /* Return value */
FUNC_ENTER_NOAPI_NOINIT_NOERR
diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c
index 945aaba..44ffd9d 100644
--- a/src/H5ACmpio.c
+++ b/src/H5ACmpio.c
@@ -81,7 +81,7 @@ typedef struct H5AC_addr_list_ud_t
{
H5AC_aux_t * aux_ptr; /* 'Auxiliary' parallel cache info */
haddr_t * addr_buf_ptr; /* Array to store addresses */
- unsigned u; /* Counter for position in array */
+ int i; /* Counter for position in array */
} H5AC_addr_list_ud_t;
@@ -90,21 +90,21 @@ typedef struct H5AC_addr_list_ud_t
/********************/
static herr_t H5AC__broadcast_candidate_list(H5AC_t *cache_ptr,
- unsigned *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
+ int *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
static herr_t H5AC__broadcast_clean_list(H5AC_t *cache_ptr);
static herr_t H5AC__construct_candidate_list(H5AC_t *cache_ptr,
H5AC_aux_t *aux_ptr, int sync_point_op);
static herr_t H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr,
- unsigned *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
+ int *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
static herr_t H5AC__propagate_and_apply_candidate_list(H5F_t *f, hid_t dxpl_id);
static herr_t H5AC__propagate_flushed_and_still_clean_entries_list(H5F_t *f,
hid_t dxpl_id);
-static herr_t H5AC__receive_haddr_list(MPI_Comm mpi_comm, unsigned *num_entries_ptr,
+static herr_t H5AC__receive_haddr_list(MPI_Comm mpi_comm, int *num_entries_ptr,
haddr_t **haddr_buf_ptr_ptr);
static herr_t H5AC__receive_candidate_list(const H5AC_t *cache_ptr,
- unsigned *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
+ int *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
static herr_t H5AC__receive_and_apply_clean_list(H5F_t *f, hid_t dxpl_id);
-static herr_t H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, unsigned num_candidates,
+static herr_t H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, int num_candidates,
haddr_t *candidates_list_ptr);
static herr_t H5AC__rsp__dist_md_write__flush(H5F_t *f, hid_t dxpl_id);
static herr_t H5AC__rsp__dist_md_write__flush_to_min_clean(H5F_t *f, hid_t dxpl_id);
@@ -151,7 +151,7 @@ H5FL_DEFINE_STATIC(H5AC_slist_entry_t);
*/
herr_t
H5AC__set_sync_point_done_callback(H5C_t * cache_ptr,
- void (* sync_point_done)(unsigned num_writes, haddr_t * written_entries_tbl))
+ void (* sync_point_done)(int num_writes, haddr_t * written_entries_tbl))
{
H5AC_aux_t * aux_ptr;
@@ -282,13 +282,13 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, unsigned *num_entries_ptr,
+H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, int *num_entries_ptr,
haddr_t **haddr_buf_ptr_ptr)
{
H5AC_aux_t * aux_ptr = NULL;
haddr_t * haddr_buf_ptr = NULL;
int mpi_result;
- unsigned num_entries;
+ int num_entries;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -310,13 +310,13 @@ H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, unsigned *num_entries_ptr,
* receivers can set up buffers to receive them. If there aren't
* any, we are done.
*/
- num_entries = (unsigned)H5SL_count(aux_ptr->candidate_slist_ptr);
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm)))
+ num_entries = (int)H5SL_count(aux_ptr->candidate_slist_ptr);
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
if(num_entries > 0) {
size_t buf_size = 0;
- unsigned chk_num_entries = 0;
+ int chk_num_entries = 0;
/* convert the candidate list into the format we
* are used to receiving from process 0, and also load it
@@ -328,7 +328,7 @@ H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, unsigned *num_entries_ptr,
HDassert(haddr_buf_ptr != NULL);
/* Now broadcast the list of candidate entries */
- buf_size = sizeof(haddr_t) * num_entries;
+ buf_size = sizeof(haddr_t) * (size_t)num_entries;
if(MPI_SUCCESS != (mpi_result = MPI_Bcast((void *)haddr_buf_ptr, (int)buf_size, MPI_BYTE, 0, aux_ptr->mpi_comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
} /* end if */
@@ -378,8 +378,8 @@ H5AC__broadcast_clean_list_cb(void *_item, void H5_ATTR_UNUSED *_key,
/* Store the entry's address in the buffer */
addr = slist_entry_ptr->addr;
- udata->addr_buf_ptr[udata->u] = addr;
- udata->u++;
+ udata->addr_buf_ptr[udata->i] = addr;
+ udata->i++;
/* now release the entry */
slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
@@ -420,7 +420,7 @@ H5AC__broadcast_clean_list(H5AC_t * cache_ptr)
haddr_t * addr_buf_ptr = NULL;
H5AC_aux_t * aux_ptr;
int mpi_result;
- unsigned num_entries = 0;
+ int num_entries = 0;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -437,8 +437,8 @@ H5AC__broadcast_clean_list(H5AC_t * cache_ptr)
* receives can set up a buffer to receive them. If there aren't
* any, we are done.
*/
- num_entries = (unsigned)H5SL_count(aux_ptr->c_slist_ptr);
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm)))
+ num_entries = (int)H5SL_count(aux_ptr->c_slist_ptr);
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
if(num_entries > 0) {
@@ -446,14 +446,14 @@ H5AC__broadcast_clean_list(H5AC_t * cache_ptr)
size_t buf_size;
/* allocate a buffer to store the list of entry base addresses in */
- buf_size = sizeof(haddr_t) * num_entries;
+ buf_size = sizeof(haddr_t) * (size_t)num_entries;
if(NULL == (addr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for addr buffer")
/* Set up user data for callback */
udata.aux_ptr = aux_ptr;
udata.addr_buf_ptr = addr_buf_ptr;
- udata.u = 0;
+ udata.i = 0;
/* Free all the clean list entries, building the address list in the callback */
/* (Callback also removes the matching entries from the dirtied list) */
@@ -568,8 +568,8 @@ H5AC__copy_candidate_list_to_buffer_cb(void *_item, void H5_ATTR_UNUSED *_key,
HDassert(udata);
/* Store the entry's address in the buffer */
- udata->addr_buf_ptr[udata->u] = slist_entry_ptr->addr;
- udata->u++;
+ udata->addr_buf_ptr[udata->i] = slist_entry_ptr->addr;
+ udata->i++;
/* now release the entry */
slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
@@ -610,14 +610,14 @@ H5AC__copy_candidate_list_to_buffer_cb(void *_item, void H5_ATTR_UNUSED *_key,
*-------------------------------------------------------------------------
*/
static herr_t
-H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr, unsigned *num_entries_ptr,
+H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr, int *num_entries_ptr,
haddr_t **haddr_buf_ptr_ptr)
{
H5AC_aux_t * aux_ptr = NULL;
H5AC_addr_list_ud_t udata;
haddr_t * haddr_buf_ptr = NULL;
size_t buf_size;
- unsigned num_entries = 0;
+ int num_entries = 0;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -635,19 +635,19 @@ H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr, unsigned *num_entri
HDassert(haddr_buf_ptr_ptr != NULL);
HDassert(*haddr_buf_ptr_ptr == NULL);
- num_entries = (unsigned)H5SL_count(aux_ptr->candidate_slist_ptr);
+ num_entries = (int)H5SL_count(aux_ptr->candidate_slist_ptr);
/* allocate a buffer(s) to store the list of candidate entry
* base addresses in
*/
- buf_size = sizeof(haddr_t) * num_entries;
+ buf_size = sizeof(haddr_t) * (size_t)num_entries;
if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for haddr buffer")
/* Set up user data for callback */
udata.aux_ptr = aux_ptr;
udata.addr_buf_ptr = haddr_buf_ptr;
- udata.u = 0;
+ udata.i = 0;
/* Free all the candidate list entries, building the address list in the callback */
if(H5SL_free(aux_ptr->candidate_slist_ptr, H5AC__copy_candidate_list_to_buffer_cb, &udata) < 0)
@@ -1234,7 +1234,7 @@ H5AC__propagate_and_apply_candidate_list(H5F_t *f, hid_t dxpl_id)
H5AC_aux_t * aux_ptr;
haddr_t * candidates_list_ptr = NULL;
int mpi_result;
- unsigned num_candidates = 0;
+ int num_candidates = 0;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -1448,12 +1448,12 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5AC__receive_haddr_list(MPI_Comm mpi_comm, unsigned *num_entries_ptr,
+H5AC__receive_haddr_list(MPI_Comm mpi_comm, int *num_entries_ptr,
haddr_t **haddr_buf_ptr_ptr)
{
haddr_t * haddr_buf_ptr = NULL;
int mpi_result;
- unsigned num_entries;
+ int num_entries;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -1468,14 +1468,14 @@ H5AC__receive_haddr_list(MPI_Comm mpi_comm, unsigned *num_entries_ptr,
* can set up a buffer to receive them. If there aren't
* any, we are done.
*/
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_UNSIGNED, 0, mpi_comm)))
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, mpi_comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
if(num_entries > 0) {
size_t buf_size;
/* allocate buffers to store the list of entry base addresses in */
- buf_size = sizeof(haddr_t) * num_entries;
+ buf_size = sizeof(haddr_t) * (size_t)num_entries;
if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for haddr buffer")
@@ -1523,7 +1523,7 @@ H5AC__receive_and_apply_clean_list(H5F_t *f, hid_t dxpl_id)
H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
haddr_t * haddr_buf_ptr = NULL;
- unsigned num_entries = 0;
+ int num_entries = 0;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -1543,7 +1543,7 @@ H5AC__receive_and_apply_clean_list(H5F_t *f, hid_t dxpl_id)
if(num_entries > 0)
/* mark the indicated entries as clean */
- if(H5C_mark_entries_as_clean(f, dxpl_id, num_entries, haddr_buf_ptr) < 0)
+ if(H5C_mark_entries_as_clean(f, dxpl_id, (int32_t)num_entries, haddr_buf_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't mark entries clean.")
/* if it is defined, call the sync point done callback. Note
@@ -1582,7 +1582,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5AC__receive_candidate_list(const H5AC_t *cache_ptr, unsigned *num_entries_ptr,
+H5AC__receive_candidate_list(const H5AC_t *cache_ptr, int *num_entries_ptr,
haddr_t **haddr_buf_ptr_ptr)
{
H5AC_aux_t * aux_ptr;
@@ -1667,7 +1667,7 @@ H5AC__rsp__dist_md_write__flush(H5F_t *f, hid_t dxpl_id)
H5AC_aux_t * aux_ptr;
haddr_t * haddr_buf_ptr = NULL;
int mpi_result;
- unsigned num_entries = 0;
+ int num_entries = 0;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -2224,11 +2224,11 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, unsigned num_candidates,
+H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, int num_candidates,
haddr_t *candidates_list_ptr)
{
H5AC_aux_t * aux_ptr;
- unsigned u;
+ int i;
FUNC_ENTER_STATIC_NOERR
@@ -2249,12 +2249,12 @@ H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, unsigned num_candidates,
* cleaned list. However, for this metadata write strategy,
* we just want to remove all references to the candidate entries.
*/
- for(u = 0; u < num_candidates; u++) {
+ for(i = 0; i < num_candidates; i++) {
H5AC_slist_entry_t * d_slist_entry_ptr;
H5AC_slist_entry_t * c_slist_entry_ptr;
haddr_t addr;
- addr = candidates_list_ptr[u];
+ addr = candidates_list_ptr[i];
/* addr may be either on the dirtied list, or on the flushed
* and still clean list. Remove it.
diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h
index 77ba0ae..dbbd8a0 100644
--- a/src/H5ACpkg.h
+++ b/src/H5ACpkg.h
@@ -351,12 +351,6 @@ H5FL_EXTERN(H5AC_aux_t);
* this verification. The field is set to NULL when the
* callback is not needed.
*
- * The following field supports the metadata cache image feature.
- *
- * p0_image_len: unsiged integer containing the length of the metadata cache
- * image constructed by MPI process 0. This field should be 0
- * if the value is unknown, or if cache image is not enabled.
- *
****************************************************************************/
#ifdef H5_HAVE_PARALLEL
@@ -404,11 +398,8 @@ typedef struct H5AC_aux_t
void (* write_done)(void);
- void (* sync_point_done)(unsigned num_writes,
+ void (* sync_point_done)(int num_writes,
haddr_t * written_entries_tbl);
-
- unsigned p0_image_len;
-
} H5AC_aux_t; /* struct H5AC_aux_t */
#endif /* H5_HAVE_PARALLEL */
@@ -430,7 +421,7 @@ H5_DLL herr_t H5AC__log_moved_entry(const H5F_t *f, haddr_t old_addr,
H5_DLL herr_t H5AC__flush_entries(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5AC__run_sync_point(H5F_t *f, hid_t dxpl_id, int sync_point_op);
H5_DLL herr_t H5AC__set_sync_point_done_callback(H5C_t *cache_ptr,
- void (*sync_point_done)(unsigned num_writes, haddr_t *written_entries_tbl));
+ void (*sync_point_done)(int num_writes, haddr_t *written_entries_tbl));
H5_DLL herr_t H5AC__set_write_done_callback(H5C_t * cache_ptr,
void (* write_done)(void));
#endif /* H5_HAVE_PARALLEL */
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index 1fe6456..3dd6079 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -88,7 +88,7 @@ typedef enum {
H5AC_DRVRINFO_ID, /* (26) driver info block (supplements superblock) */
H5AC_EPOCH_MARKER_ID, /* (27) epoch marker - always internal to cache */
H5AC_PROXY_ENTRY_ID, /* (28) cache entry proxy */
- H5AC_PREFETCHED_ENTRY_ID, /* (29) prefetched entry - always internal to cache */
+ H5AC_TEST_ID, /* (29) test entry -- not used for actual files */
H5AC_NTYPES /* Number of types, must be last */
} H5AC_type_t;
@@ -111,22 +111,14 @@ typedef enum {
* use the dump_stats parameter to takedown_cache(), or call
* H5C_stats() directly.
* JRM -- 4/12/15
- *
- * Added the H5AC_DUMP_IMAGE_STATS_ON_CLOSE #define, which works much
- * the same way as H5AC_DUMP_STATS_ON_CLOSE. However, the set of stats
- * displayed is much smaller, and directed purely at the cache image feature.
- *
- * JRM -- 11/1/15
*/
#if H5C_COLLECT_CACHE_STATS
#define H5AC_DUMP_STATS_ON_CLOSE 0
-#define H5AC_DUMP_IMAGE_STATS_ON_CLOSE 0
#else /* H5C_COLLECT_CACHE_STATS */
#define H5AC_DUMP_STATS_ON_CLOSE 0
-#define H5AC_DUMP_IMAGE_STATS_ON_CLOSE 0
#endif /* H5C_COLLECT_CACHE_STATS */
@@ -327,13 +319,7 @@ H5_DLLVAR hid_t H5AC_rawdata_dxpl_id;
}
#endif /* H5_HAVE_PARALLEL */
-#define H5AC__DEFAULT_CACHE_IMAGE_CONFIG \
-{ \
- /* int32_t version = */ H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION, \
- /* hbool_t generate_image = */ FALSE, \
- /* hbool_t save_resize_status = */ FALSE, \
- /* int32_t entry_ageout = */ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE \
-}
+
/*
* Library prototypes.
*/
@@ -358,6 +344,7 @@ H5_DLLVAR hid_t H5AC_rawdata_dxpl_id;
#define H5AC__TAKE_OWNERSHIP_FLAG H5C__TAKE_OWNERSHIP_FLAG
#define H5AC__FLUSH_LAST_FLAG H5C__FLUSH_LAST_FLAG
#define H5AC__FLUSH_COLLECTIVELY_FLAG H5C__FLUSH_COLLECTIVELY_FLAG
+#define H5AC__EVICT_ALLOW_LAST_PINS_FLAG H5C__EVICT_ALLOW_LAST_PINS_FLAG
/* #defines of flags used to report entry status in the
@@ -373,44 +360,11 @@ H5_DLLVAR hid_t H5AC_rawdata_dxpl_id;
#define H5AC_ES__IS_CORKED 0x0040
#define H5AC_ES__IMAGE_IS_UP_TO_DATE 0x0080
-/* Metadata entry class declarations */
-H5_DLLVAR const H5AC_class_t H5AC_BT[1];
-H5_DLLVAR const H5AC_class_t H5AC_SNODE[1];
-H5_DLLVAR const H5AC_class_t H5AC_LHEAP_PRFX[1];
-H5_DLLVAR const H5AC_class_t H5AC_LHEAP_DBLK[1];
-H5_DLLVAR const H5AC_class_t H5AC_GHEAP[1];
-H5_DLLVAR const H5AC_class_t H5AC_OHDR[1];
-H5_DLLVAR const H5AC_class_t H5AC_OHDR_CHK[1];
-H5_DLLVAR const H5AC_class_t H5AC_BT2_HDR[1];
-H5_DLLVAR const H5AC_class_t H5AC_BT2_INT[1];
-H5_DLLVAR const H5AC_class_t H5AC_BT2_LEAF[1];
-H5_DLLVAR const H5AC_class_t H5AC_FHEAP_HDR[1];
-H5_DLLVAR const H5AC_class_t H5AC_FHEAP_DBLOCK[1];
-H5_DLLVAR const H5AC_class_t H5AC_FHEAP_IBLOCK[1];
-H5_DLLVAR const H5AC_class_t H5AC_FSPACE_HDR[1];
-H5_DLLVAR const H5AC_class_t H5AC_FSPACE_SINFO[1];
-H5_DLLVAR const H5AC_class_t H5AC_SOHM_TABLE[1];
-H5_DLLVAR const H5AC_class_t H5AC_SOHM_LIST[1];
-H5_DLLVAR const H5AC_class_t H5AC_EARRAY_HDR[1];
-H5_DLLVAR const H5AC_class_t H5AC_EARRAY_IBLOCK[1];
-H5_DLLVAR const H5AC_class_t H5AC_EARRAY_SBLOCK[1];
-H5_DLLVAR const H5AC_class_t H5AC_EARRAY_DBLOCK[1];
-H5_DLLVAR const H5AC_class_t H5AC_EARRAY_DBLK_PAGE[1];
-H5_DLLVAR const H5AC_class_t H5AC_FARRAY_HDR[1];
-H5_DLLVAR const H5AC_class_t H5AC_FARRAY_DBLOCK[1];
-H5_DLLVAR const H5AC_class_t H5AC_FARRAY_DBLK_PAGE[1];
-H5_DLLVAR const H5AC_class_t H5AC_SUPERBLOCK[1];
-H5_DLLVAR const H5AC_class_t H5AC_DRVRINFO[1];
-H5_DLLVAR const H5AC_class_t H5AC_EPOCH_MARKER[1];
-H5_DLLVAR const H5AC_class_t H5AC_PROXY_ENTRY[1];
-H5_DLLVAR const H5AC_class_t H5AC_PREFETCHED_ENTRY[1];
-
/* external function declarations: */
H5_DLL herr_t H5AC_init(void);
-H5_DLL herr_t H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr,
- H5AC_cache_image_config_t * image_config_ptr);
+H5_DLL herr_t H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr);
H5_DLL herr_t H5AC_get_entry_status(const H5F_t *f, haddr_t addr,
unsigned *status_ptr);
H5_DLL herr_t H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
@@ -440,18 +394,13 @@ H5_DLL herr_t H5AC_remove_entry(void *entry);
H5_DLL herr_t H5AC_get_cache_auto_resize_config(const H5AC_t * cache_ptr,
H5AC_cache_config_t *config_ptr);
H5_DLL herr_t H5AC_get_cache_size(H5AC_t *cache_ptr, size_t *max_size_ptr,
- size_t *min_clean_size_ptr, size_t *cur_size_ptr, uint32_t *cur_num_entries_ptr);
+ size_t *min_clean_size_ptr, size_t *cur_size_ptr, int32_t *cur_num_entries_ptr);
H5_DLL herr_t H5AC_get_cache_hit_rate(H5AC_t *cache_ptr, double *hit_rate_ptr);
H5_DLL herr_t H5AC_reset_cache_hit_rate_stats(H5AC_t *cache_ptr);
H5_DLL herr_t H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr,
H5AC_cache_config_t *config_ptr);
H5_DLL herr_t H5AC_validate_config(H5AC_cache_config_t *config_ptr);
-/* Cache image routines */
-H5_DLL herr_t H5AC_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr,
- hsize_t len, hbool_t rw);
-H5_DLL herr_t H5AC_validate_cache_image_config(H5AC_cache_image_config_t *config_ptr);
-
/* Tag & Ring routines */
H5_DLL herr_t H5AC_tag(hid_t dxpl_id, haddr_t metadata_tag, haddr_t *prev_tag);
H5_DLL herr_t H5AC_flush_tagged_metadata(H5F_t * f, haddr_t metadata_tag, hid_t dxpl_id);
@@ -484,14 +433,6 @@ H5_DLL herr_t H5AC_add_candidate(H5AC_t * cache_ptr, haddr_t addr);
H5_DLL herr_t H5AC_stats(const H5F_t *f);
H5_DLL herr_t H5AC_dump_cache(const H5F_t *f);
#ifndef NDEBUG
-H5_DLL herr_t H5AC_get_entry_ptr_from_addr(const H5F_t *f, haddr_t addr,
- void **entry_ptr_ptr);
-H5_DLL herr_t H5AC_flush_dependency_exists(H5F_t *f, haddr_t parent_addr,
- haddr_t child_addr, hbool_t *fd_exists_ptr);
-H5_DLL herr_t H5AC_verify_entry_type(const H5F_t *f, haddr_t addr,
- const H5AC_class_t *expected_type, hbool_t *in_cache_ptr,
- hbool_t *type_ok_ptr);
-H5_DLL hbool_t H5AC_get_serialization_in_progress(H5F_t *f);
H5_DLL hbool_t H5AC_cache_is_clean(const H5F_t *f, H5AC_ring_t inner_ring);
#endif /* NDEBUG */ /* end debugging functions */
diff --git a/src/H5ACpublic.h b/src/H5ACpublic.h
index 5fdb3f4..dd16764 100644
--- a/src/H5ACpublic.h
+++ b/src/H5ACpublic.h
@@ -508,67 +508,6 @@ typedef struct H5AC_cache_config_t
} H5AC_cache_config_t;
-/****************************************************************************
- *
- * structure H5AC_cache_image_config_t
- *
- * H5AC_cache_image_ctl_t is a public structure intended for use in public
- * APIs. At least in its initial incarnation, it is a copy of struct
- * H5C_cache_image_ctl_t.
- *
- * The fields of the structure are discussed individually below:
- *
- * version: Integer field containing the version number of this version
- * of the H5C_image_ctl_t structure. Any instance of
- * H5C_image_ctl_t passed to the cache must have a known
- * version number, or an error will be flagged.
- *
- * generate_image: Boolean flag indicating whether a cache image should
- * be created on file close.
- *
- * save_resize_status: Boolean flag indicating whether the cache image
- * should include the adaptive cache resize configuration and status.
- * Note that this field is ignored at present.
- *
- * entry_ageout: Integer field indicating the maximum number of
- * times a prefetched entry can appear in subsequent cache images.
- * This field exists to allow the user to avoid the buildup of
- * infrequently used entries in long sequences of cache images.
- *
- * The value of this field must lie in the range
- * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE (-1) to
- * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX (100).
- *
- * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE means that no limit
- * is imposed on number of times a prefeteched entry can appear
- * in subsequent cache images.
- *
- * A value of 0 prevents prefetched entries from being included
- * in cache images.
- *
- * Positive integers restrict prefetched entries to the specified
- * number of appearances.
- *
- * Note that the number of subsequent cache images that a prefetched
- * entry has appeared in is tracked in an 8 bit field. Thus, while
- * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX can be increased from its
- * current value, any value in excess of 255 will be the functional
- * equivalent of H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE.
- *
- ****************************************************************************/
-
-#define H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION 1
-
-#define H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE -1
-#define H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX 100
-
-typedef struct H5AC_cache_image_config_t {
- int32_t version;
- hbool_t generate_image;
- hbool_t save_resize_status;
- int32_t entry_ageout;
-} H5AC_cache_image_config_t;
-
#ifdef __cplusplus
}
#endif
diff --git a/src/H5B2pkg.h b/src/H5B2pkg.h
index 71fdfde..7b1ec4d 100644
--- a/src/H5B2pkg.h
+++ b/src/H5B2pkg.h
@@ -307,6 +307,15 @@ typedef struct H5B2_node_info_test_t {
/* Package Private Variables */
/*****************************/
+/* H5B2 header inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_BT2_HDR[1];
+
+/* H5B2 internal node inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_BT2_INT[1];
+
+/* H5B2 leaf node inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_BT2_LEAF[1];
+
/* Declare a free list to manage the H5B2_internal_t struct */
H5FL_EXTERN(H5B2_internal_t);
diff --git a/src/H5Bpkg.h b/src/H5Bpkg.h
index fb93b8a..41e0951 100644
--- a/src/H5Bpkg.h
+++ b/src/H5Bpkg.h
@@ -73,6 +73,9 @@ typedef struct H5B_cache_ud_t {
/* Package Private Variables */
/*****************************/
+/* H5B header inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_BT[1];
+
/* Declare a free list to manage the haddr_t sequence information */
H5FL_SEQ_EXTERN(haddr_t);
diff --git a/src/H5C.c b/src/H5C.c
index 805b4f5..4adee6d 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -46,9 +46,9 @@
* - Change protect/unprotect to lock/unlock.
*
* - Flush entries in increasing address order in
- * H5C__make_space_in_cache().
+ * H5C_make_space_in_cache().
*
- * - Also in H5C__make_space_in_cache(), use high and low water marks
+ * - Also in H5C_make_space_in_cache(), use high and low water marks
* to reduce the number of I/O calls.
*
* - When flushing, attempt to combine contiguous entries to reduce
@@ -75,7 +75,7 @@
/****************/
#include "H5Cmodule.h" /* This source code file is part of the H5C module */
-#define H5F_FRIEND /* suppress error about including H5Fpkg */
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
/***********/
@@ -155,17 +155,21 @@ static void * H5C_load_entry(H5F_t * f,
haddr_t addr,
void * udata);
+static herr_t H5C_make_space_in_cache(H5F_t * f,
+ hid_t dxpl_id,
+ size_t space_needed,
+ hbool_t write_permitted);
+
static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t * entry);
static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t * entry);
-static herr_t H5C__serialize_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring);
-static herr_t H5C__serialize_single_entry(H5F_t *f, hid_t dxpl_id,
- H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr);
-
static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t * type,
haddr_t addr, size_t *len, hbool_t actual);
+static herr_t H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
+ hid_t dxpl_id);
+
#if H5C_DO_SLIST_SANITY_CHECKS
static hbool_t H5C_entry_in_skip_list(H5C_t * cache_ptr,
H5C_cache_entry_t *target_ptr);
@@ -239,7 +243,7 @@ H5C_t *
H5C_create(size_t max_cache_size,
size_t min_clean_size,
int max_type_id,
- const H5C_class_t * const * class_table_ptr,
+ const char * (* type_name_table_ptr),
H5C_write_permitted_func_t check_write_permitted,
hbool_t write_permitted,
H5C_log_flush_func_t log_flush,
@@ -257,21 +261,21 @@ H5C_create(size_t max_cache_size,
HDassert( max_type_id >= 0 );
HDassert( max_type_id < H5C__MAX_NUM_TYPE_IDS );
- HDassert( class_table_ptr );
+ HDassert( type_name_table_ptr );
for ( i = 0; i <= max_type_id; i++ ) {
- HDassert( (class_table_ptr)[i] );
- HDassert(HDstrlen((class_table_ptr)[i]->name) > 0);
+ HDassert( (type_name_table_ptr)[i] );
+ HDassert( HDstrlen(( type_name_table_ptr)[i]) > 0 );
} /* end for */
if(NULL == (cache_ptr = H5FL_CALLOC(H5C_t)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
if(NULL == (cache_ptr->slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, NULL, "can't create skip list")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, NULL, "can't create skip list.")
if(NULL == (cache_ptr->tag_list = H5SL_create(H5SL_TYPE_HADDR, NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, NULL, "can't create skip list for tagged entry addresses")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, NULL, "can't create skip list for tagged entry addresses.")
/* If we get this far, we should succeed. Go ahead and initialize all
* the fields.
@@ -293,7 +297,7 @@ H5C_create(size_t max_cache_size,
cache_ptr->max_type_id = max_type_id;
- cache_ptr->class_table_ptr = class_table_ptr;
+ cache_ptr->type_name_table_ptr = type_name_table_ptr;
cache_ptr->max_cache_size = max_cache_size;
cache_ptr->min_clean_size = min_clean_size;
@@ -385,7 +389,6 @@ H5C_create(size_t max_cache_size,
cache_ptr->resize_enabled = FALSE;
cache_ptr->cache_full = FALSE;
cache_ptr->size_decreased = FALSE;
- cache_ptr->resize_in_progress = FALSE;
(cache_ptr->resize_ctl).version = H5C__CURR_AUTO_SIZE_CTL_VER;
(cache_ptr->resize_ctl).rpt_fcn = NULL;
@@ -431,52 +434,28 @@ H5C_create(size_t max_cache_size,
((cache_ptr->epoch_markers)[i]).magic =
H5C__H5C_CACHE_ENTRY_T_MAGIC;
((cache_ptr->epoch_markers)[i]).addr = (haddr_t)i;
- ((cache_ptr->epoch_markers)[i]).type = H5AC_EPOCH_MARKER;
+ ((cache_ptr->epoch_markers)[i]).type = &H5C__epoch_marker_class;
}
- /* Initialize cache image generation on file close related fields.
- * Initial value of image_ctl must match H5C__DEFAULT_CACHE_IMAGE_CTL
- * in H5Cprivate.h.
- */
- cache_ptr->image_ctl.version = H5C__CURR_CACHE_IMAGE_CTL_VER;
- cache_ptr->image_ctl.generate_image = FALSE;
- cache_ptr->image_ctl.save_resize_status = FALSE;
- cache_ptr->image_ctl.entry_ageout = -1;
- cache_ptr->image_ctl.flags = H5C_CI__ALL_FLAGS;
-
- cache_ptr->serialization_in_progress= FALSE;
- cache_ptr->load_image = FALSE;
- cache_ptr->image_loaded = FALSE;
- cache_ptr->delete_image = FALSE;
- cache_ptr->image_addr = HADDR_UNDEF;
- cache_ptr->image_len = 0;
- cache_ptr->image_data_len = 0;
-
cache_ptr->entries_loaded_counter = 0;
cache_ptr->entries_inserted_counter = 0;
cache_ptr->entries_relocated_counter = 0;
- cache_ptr->entry_fd_height_change_counter = 0;
-
- cache_ptr->num_entries_in_image = 0;
- cache_ptr->image_entries = NULL;
- cache_ptr->image_buffer = NULL;
/* initialize free space manager related fields: */
cache_ptr->rdfsm_settled = FALSE;
cache_ptr->mdfsm_settled = FALSE;
- if(H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
+ if ( H5C_reset_cache_hit_rate_stats(cache_ptr) != SUCCEED ) {
+
/* this should be impossible... */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "H5C_reset_cache_hit_rate_stats failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, \
+ "H5C_reset_cache_hit_rate_stats failed.")
+ }
H5C_stats__reset(cache_ptr);
cache_ptr->prefix[0] = '\0'; /* empty string */
-#ifndef NDEBUG
- cache_ptr->get_entry_ptr_from_addr_counter = 0;
-#endif /* NDEBUG */
-
/* Set return value */
ret_value = cache_ptr;
@@ -754,11 +733,6 @@ H5C_prep_for_file_close(H5F_t *f, hid_t dxpl_id)
/* Make certain there aren't any protected entries */
HDassert(cache_ptr->pl_len == 0);
- /* Prepare cache image */
- if(H5C__prep_image_for_file_close(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create cache image")
-
-
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_prep_for_file_close() */
@@ -801,20 +775,10 @@ H5C_dest(H5F_t * f, hid_t dxpl_id)
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(cache_ptr->close_warning_received);
-#if H5AC_DUMP_IMAGE_STATS_ON_CLOSE
- if(H5C_image_stats(cache_ptr, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't display cache image stats")
-#endif /* H5AC_DUMP_IMAGE_STATS_ON_CLOSE */
-
/* Flush and invalidate all cache entries */
if(H5C_flush_invalidate_cache(f, dxpl_id, H5C__NO_FLAGS_SET) < 0 )
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
- /* Generate & write cache image if requested */
- if(cache_ptr->image_ctl.generate_image)
- if(H5C__generate_cache_image(f, dxpl_id, cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "Can't generate metadata cache image")
-
if(cache_ptr->slist_ptr != NULL) {
H5SL_close(cache_ptr->slist_ptr);
cache_ptr->slist_ptr = NULL;
@@ -826,12 +790,6 @@ H5C_dest(H5F_t * f, hid_t dxpl_id)
} /* end if */
#ifndef NDEBUG
-#if H5C_DO_SANITY_CHECKS
- if(cache_ptr->get_entry_ptr_from_addr_counter > 0)
- HDfprintf(stdout, "*** %ld calls to H5C_get_entry_ptr_from_add(). ***\n",
- cache_ptr->get_entry_ptr_from_addr_counter);
-#endif /* H5C_DO_SANITY_CHECKS */
-
cache_ptr->magic = 0;
#endif /* NDEBUG */
@@ -857,12 +815,14 @@ done:
herr_t
H5C_evict(H5F_t * f, hid_t dxpl_id)
{
+ H5C_t *cache_ptr = f->shared->cache;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
/* Sanity check */
- HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
/* Flush and invalidate all cache entries except the pinned entries */
if(H5C_flush_invalidate_cache(f, dxpl_id, H5C__EVICT_ALLOW_LAST_PINS_FLAG) < 0 )
@@ -908,7 +868,7 @@ H5C_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
#if H5C_DO_EXTREME_SANITY_CHECKS
if(H5C_validate_lru_list(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on entry.\n")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
/* Look for entry in cache */
@@ -922,9 +882,9 @@ H5C_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
/* Check for entry being pinned or protected */
if(entry_ptr->is_protected)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is protected")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is protected.")
if(entry_ptr->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is pinned")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is pinned.")
#ifdef H5_HAVE_PARALLEL
if(entry_ptr->coll_access) {
entry_ptr->coll_access = FALSE;
@@ -949,7 +909,7 @@ H5C_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
if(H5C_validate_lru_list(cache_ptr) < 0)
- HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on exit")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on exit.\n")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
@@ -1008,12 +968,12 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
{
#if H5C_DO_SANITY_CHECKS
int i;
- uint32_t index_len = 0;
+ int32_t index_len = 0;
size_t index_size = (size_t)0;
size_t clean_index_size = (size_t)0;
size_t dirty_index_size = (size_t)0;
size_t slist_size = (size_t)0;
- uint32_t slist_len = 0;
+ int32_t slist_len = 0;
#endif /* H5C_DO_SANITY_CHECKS */
H5C_ring_t ring;
H5C_t * cache_ptr;
@@ -1060,7 +1020,7 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
(H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
(H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry.\n")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
ignore_protected = ( (flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0 );
@@ -1072,7 +1032,7 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
if(destroy) {
if(H5C_flush_invalidate_cache(f, dxpl_id, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate failed.")
} /* end if */
else {
/* flush each ring, starting from the outermost ring and
@@ -1080,9 +1040,9 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
*/
ring = H5C_RING_USER;
while(ring < H5C_RING_NTYPES) {
-
- /* Only call the free space manager settle routines when close
- * warning has been received.
+ /* only call the free space manager settle routines when close
+ * warning has been received, and then only when the index is
+ * non-empty for that ring.
*/
if(cache_ptr->close_warning_received) {
switch(ring) {
@@ -1090,20 +1050,36 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
break;
case H5C_RING_RDFSM:
- /* Settle raw data FSM */
- if(!cache_ptr->rdfsm_settled)
- if(H5MF_settle_raw_data_fsm(f, dxpl_id, &cache_ptr->rdfsm_settled) < 0)
+ if(!cache_ptr->rdfsm_settled) {
+ hbool_t fsm_settled = FALSE; /* Whether the FSM was actually settled */
+
+ /* Settle raw data FSM */
+ if(H5MF_settle_raw_data_fsm(f, dxpl_id, &fsm_settled) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
+
+ /* Only set the flag if the FSM was actually settled */
+ if(fsm_settled)
+ cache_ptr->rdfsm_settled = TRUE;
+ } /* end if */
break;
case H5C_RING_MDFSM:
- /* Settle metadata FSM */
- if(!cache_ptr->mdfsm_settled)
- if(H5MF_settle_meta_data_fsm(f, dxpl_id, &cache_ptr->mdfsm_settled) < 0)
+ if(!cache_ptr->mdfsm_settled) {
+ hbool_t fsm_settled = FALSE; /* Whether the FSM was actually settled */
+
+ /* Settle metadata FSM */
+ if(H5MF_settle_meta_data_fsm(f, dxpl_id, &fsm_settled) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
+
+ /* Only set the flag if the FSM was actually settled */
+ if(fsm_settled)
+ cache_ptr->mdfsm_settled = TRUE;
+ } /* end if */
break;
case H5C_RING_SBE:
+ break;
+
case H5C_RING_SB:
break;
@@ -1114,7 +1090,7 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
} /* end if */
if(H5C_flush_ring(f, dxpl_id, ring, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush ring failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush ring failed.")
ring++;
} /* end while */
} /* end else */
@@ -1155,6 +1131,7 @@ H5C_flush_to_min_clean(H5F_t * f,
hid_t dxpl_id)
{
H5C_t * cache_ptr;
+ herr_t result;
hbool_t write_permitted;
#if 0 /* modified code -- commented out for now */ /* JRM */
int i;
@@ -1176,19 +1153,36 @@ H5C_flush_to_min_clean(H5F_t * f,
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- if(cache_ptr->check_write_permitted != NULL) {
- if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't get write_permitted")
- } /* end if */
- else
+ if ( cache_ptr->check_write_permitted != NULL ) {
+
+ result = (cache_ptr->check_write_permitted)(f, &write_permitted);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Can't get write_permitted")
+ }
+ } else {
+
write_permitted = cache_ptr->write_permitted;
+ }
- if(!write_permitted)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "cache write is not permitted!?!")
+ if ( ! write_permitted ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "cache write is not permitted!?!\n");
+ }
#if 1 /* original code */
- if(H5C__make_space_in_cache(f, dxpl_id, (size_t)0, write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C__make_space_in_cache failed")
+ result = H5C_make_space_in_cache(f,
+ dxpl_id,
+ (size_t)0,
+ write_permitted);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C_make_space_in_cache failed.")
+ }
#else /* modified code -- commented out for now */
if ( cache_ptr->max_cache_size > cache_ptr->index_size ) {
@@ -1226,8 +1220,12 @@ H5C_flush_to_min_clean(H5F_t * f,
*/
flushed_entries_list = (haddr_t *)H5MM_malloc(sizeof(haddr_t) *
(size_t)(cache_ptr->slist_len));
- if(flushed_entries_list == NULL)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for flushed entries list")
+
+ if ( flushed_entries_list == NULL ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
+ "memory allocation failed for flushed entries list")
+ }
/* Scan the dirty LRU list from tail forward and mark sufficient
* entries to free up the necessary space. Keep a list of the
@@ -1257,8 +1255,13 @@ H5C_flush_to_min_clean(H5F_t * f,
/* Flush the marked entries */
- if(H5C_flush_cache(f, primary_dxpl_id, secondary_dxpl_id, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_flush_cache failed")
+ result = H5C_flush_cache(f, primary_dxpl_id, secondary_dxpl_id,
+ H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_flush_cache failed.")
+ }
/* Now touch up the LRU list so as to place the flushed entries in
* the order they they would be in if we had flushed them in the
@@ -1333,9 +1336,8 @@ H5C_insert_entry(H5F_t * f,
hbool_t set_flush_marker;
hbool_t write_permitted = TRUE;
size_t empty_space;
- H5C_cache_entry_t *entry_ptr = NULL;
+ H5C_cache_entry_t *entry_ptr;
H5C_cache_entry_t *test_entry_ptr;
- hbool_t entry_tagged = FALSE;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1348,7 +1350,6 @@ H5C_insert_entry(H5F_t * f,
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
HDassert( type );
- HDassert( type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type );
HDassert( type->image_len );
HDassert( H5F_addr_defined(addr) );
HDassert( thing );
@@ -1356,10 +1357,14 @@ H5C_insert_entry(H5F_t * f,
#if H5C_DO_EXTREME_SANITY_CHECKS
/* no need to verify that entry is not already in the index as */
/* we already make that check below. */
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "an extreme sanity check failed on entry.\n");
+ }
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
set_flush_marker = ( (flags & H5C__SET_FLUSH_MARKER_FLAG) != 0 );
@@ -1384,9 +1389,9 @@ H5C_insert_entry(H5F_t * f,
if(test_entry_ptr != NULL) {
if(test_entry_ptr == entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "entry already in cache")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "entry already in cache.")
else
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "duplicate entry in cache")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "duplicate entry in cache.")
} /* end if */
entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
@@ -1453,33 +1458,16 @@ H5C_insert_entry(H5F_t * f,
entry_ptr->coll_prev = NULL;
#endif /* H5_HAVE_PARALLEL */
- /* initialize cache image related fields */
- entry_ptr->include_in_image = FALSE;
- entry_ptr->lru_rank = 0;
- entry_ptr->image_dirty = FALSE;
- entry_ptr->fd_parent_count = 0;
- entry_ptr->fd_parent_addrs = NULL;
- entry_ptr->fd_child_count = 0;
- entry_ptr->fd_dirty_child_count = 0;
- entry_ptr->image_fd_height = 0;
- entry_ptr->prefetched = FALSE;
- entry_ptr->prefetch_type_id = 0;
- entry_ptr->age = 0;
-#ifndef NDEBUG /* debugging field */
- entry_ptr->serialization_count = 0;
-#endif /* NDEBUG */
-
/* Apply tag to newly inserted entry */
if(H5C__tag_entry(cache_ptr, entry_ptr, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry")
- entry_tagged = TRUE;
H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
if(cache_ptr->flash_size_increase_possible &&
(entry_ptr->size > cache_ptr->flash_size_increase_threshold))
if(H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__flash_increase_cache_size failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__flash_increase_cache_size failed.")
if(cache_ptr->index_size >= cache_ptr->max_cache_size)
empty_space = 0;
@@ -1509,7 +1497,7 @@ H5C_insert_entry(H5F_t * f,
/* Note that space_needed is just the amount of space that
* needed to insert the new entry without exceeding the cache
- * size limit. The subsequent call to H5C__make_space_in_cache()
+ * size limit. The subsequent call to H5C_make_space_in_cache()
* may evict the entries required to free more or less space
* depending on conditions. It MAY be less if the cache is
* currently undersized, or more if the cache is oversized.
@@ -1532,9 +1520,9 @@ H5C_insert_entry(H5F_t * f,
* no point in worrying about the third.
*/
- if(H5C__make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__make_space_in_cache failed")
- } /* end if */
+ if(H5C_make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C_make_space_in_cache failed.")
+ }
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
@@ -1545,10 +1533,10 @@ H5C_insert_entry(H5F_t * f,
H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, FAIL)
#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed just before done")
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_lru_list(cache_ptr) < 0 ) )
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed just before done.\n")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
/* If the entry's type has a 'notify' callback send a 'after insertion'
@@ -1600,16 +1588,12 @@ H5C_insert_entry(H5F_t * f,
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0))
- HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_lru_list(cache_ptr) < 0 ) )
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit.\n")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
- if(ret_value < 0 && entry_tagged)
- if(H5C__untag_entry(cache_ptr, entry_ptr) < 0)
- HDONE_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
-
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_insert_entry() */
@@ -1931,7 +1915,7 @@ H5C_move_entry(H5C_t * cache_ptr,
if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
(H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
(H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry.\n")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
H5C__SEARCH_INDEX(cache_ptr, old_addr, entry_ptr, FAIL)
@@ -2035,7 +2019,7 @@ done:
if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
(H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
(H5C_validate_lru_list(cache_ptr) < 0))
- HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit.\n")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
@@ -2076,14 +2060,17 @@ H5C_resize_entry(void *thing, size_t new_size)
/* Check for usage errors */
if(new_size <= 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "New size is non-positive")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "New size is non-positive.")
if(!(entry_ptr->is_pinned || entry_ptr->is_protected))
HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "Entry isn't pinned or protected??")
#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "an extreme sanity check failed on entry.\n");
+ }
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
/* update for change in entry size if necessary */
@@ -2180,10 +2167,14 @@ H5C_resize_entry(void *thing, size_t new_size)
} /* end if */
done:
+
#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0))
- HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "an extreme sanity check failed on exit.\n");
+ }
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
@@ -2279,10 +2270,13 @@ H5C_pin_protected_entry(void *thing)
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "an extreme sanity check failed on entry.\n");
+ }
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -2295,11 +2289,15 @@ H5C_pin_protected_entry(void *thing)
HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
done:
+
#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0))
- HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "an extreme sanity check failed on exit.\n");
+ }
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
@@ -2326,6 +2324,32 @@ done:
*
* Programmer: John Mainzer - 6/2/04
*
+ * JRM -- 11/13/08
+ * Modified function to call H5C_make_space_in_cache() when
+ * the min_clean_size is violated, not just when there isn't
+ * enough space for and entry that has just been loaded.
+ *
+ * The purpose of this modification is to avoid "metadata
+ * blizzards" in the write only case. In such instances,
+ * the cache was allowed to fill with dirty metadata. When
+ * we finally needed to evict an entry to make space, we had
+ * to flush out a whole cache full of metadata -- which has
+ * interesting performance effects. We hope to avoid (or
+ * perhaps more accurately hide) this effect by maintaining
+ * the min_clean_size, which should force us to start flushing
+ * entries long before we actually have to evict something
+ * to make space.
+ *
+ * JRM -- 9/1/14
+ * Replace the old rw parameter with the flags parameter.
+ * This allows H5C_protect to accept flags other than
+ * H5C__READ_ONLY_FLAG.
+ *
+ * Added support for the H5C__FLUSH_LAST_FLAG.
+ * At present, this flag is only applied if the entry is
+ * not in cache, and is loaded into the cache as a result of
+ * this call.
+ *
*-------------------------------------------------------------------------
*/
void *
@@ -2364,22 +2388,16 @@ H5C_protect(H5F_t * f,
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
HDassert( type );
- HDassert( type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type );
HDassert( H5F_addr_defined(addr) );
#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on entry")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
- /* Load the cache image, if requested */
- if(cache_ptr->load_image) {
- cache_ptr->load_image = FALSE;
- if(H5C__load_cache_image(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't load cache image")
- } /* end if */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on entry.\n")
+ }
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
read_only = ( (flags & H5C__READ_ONLY_FLAG) != 0 );
flush_last = ( (flags & H5C__FLUSH_LAST_FLAG) != 0 );
@@ -2410,24 +2428,9 @@ H5C_protect(H5F_t * f,
/* first check to see if the target is in cache */
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL)
- if(entry_ptr != NULL) {
+ if ( entry_ptr != NULL ) {
if(entry_ptr->ring != ring)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occured for cache entry")
-
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
-
- if(entry_ptr->prefetched) {
- /* This call removes the prefetched entry from the cache,
- * and replaces it with an entry deserialized from the
- * image of the prefetched entry.
- */
- if(H5C__deserialize_prefetched_entry(f, dxpl_id, cache_ptr, &entry_ptr, type, addr, udata) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't deserialize prefetched entry")
-
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(!entry_ptr->prefetched);
- HDassert(entry_ptr->addr == addr);
- } /* end if */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occured for cache entry\n")
/* Check for trying to load the wrong type of entry from an address */
if(entry_ptr->type != type)
@@ -2520,8 +2523,6 @@ H5C_protect(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry")
entry_ptr = (H5C_cache_entry_t *)thing;
- cache_ptr->entries_loaded_counter++;
-
entry_ptr->ring = ring;
#ifdef H5_HAVE_PARALLEL
if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && entry_ptr->coll_access)
@@ -2539,7 +2540,7 @@ H5C_protect(H5F_t * f,
( entry_ptr->size > cache_ptr->flash_size_increase_threshold ) ) {
if(H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__flash_increase_cache_size failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__flash_increase_cache_size failed.")
}
if(cache_ptr->index_size >= cache_ptr->max_cache_size)
@@ -2548,7 +2549,7 @@ H5C_protect(H5F_t * f,
empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
/* try to free up if necceary and if evictions are permitted. Note
- * that if evictions are enabled, we will call H5C__make_space_in_cache()
+ * that if evictions are enabled, we will call H5C_make_space_in_cache()
* regardless if the min_free_space requirement is not met.
*/
if ( ( cache_ptr->evictions_enabled ) &&
@@ -2583,7 +2584,7 @@ H5C_protect(H5F_t * f,
/* Note that space_needed is just the amount of space that
* needed to insert the new entry without exceeding the cache
- * size limit. The subsequent call to H5C__make_space_in_cache()
+ * size limit. The subsequent call to H5C_make_space_in_cache()
* may evict the entries required to free more or less space
* depending on conditions. It MAY be less if the cache is
* currently undersized, or more if the cache is oversized.
@@ -2610,9 +2611,9 @@ H5C_protect(H5F_t * f,
* see no point in worrying about the fourth.
*/
- if(H5C__make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0 )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed")
- } /* end if */
+ if(H5C_make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_make_space_in_cache failed 1.")
+ }
/* Insert the entry in the hash table. It can't be dirty yet, so
* we don't even check to see if it should go in the skip list.
@@ -2647,34 +2648,44 @@ H5C_protect(H5F_t * f,
*/
H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, NULL)
+ /* Update entries loaded in cache counter */
+ cache_ptr->entries_loaded_counter++;
+
/* Record that the entry was loaded, to trigger a notify callback later */
/* (After the entry is fully added to the cache) */
was_loaded = TRUE;
- } /* end else */
+ }
- HDassert(entry_ptr->addr == addr);
- HDassert(entry_ptr->type == type);
+ HDassert( entry_ptr->addr == addr );
+ HDassert( entry_ptr->type == type );
+
+ if ( entry_ptr->is_protected ) {
+
+ if ( ( read_only ) && ( entry_ptr->is_read_only ) ) {
+
+ HDassert( entry_ptr->ro_ref_count > 0 );
- if(entry_ptr->is_protected) {
- if(read_only && entry_ptr->is_read_only) {
- HDassert(entry_ptr->ro_ref_count > 0);
(entry_ptr->ro_ref_count)++;
- } /* end if */
- else
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Target already protected & not read only?!?")
- } /* end if */
- else {
+
+ } else {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
+ "Target already protected & not read only?!?.")
+ }
+ } else {
+
H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, NULL)
entry_ptr->is_protected = TRUE;
if ( read_only ) {
+
entry_ptr->is_read_only = TRUE;
entry_ptr->ro_ref_count = 1;
- } /* end if */
+ }
entry_ptr->dirtied = FALSE;
- } /* end else */
+ }
H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit)
@@ -2692,7 +2703,7 @@ H5C_protect(H5F_t * f,
if ( cache_ptr->check_write_permitted != NULL ) {
if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted 2")
else
have_write_permitted = TRUE;
} else {
@@ -2704,14 +2715,16 @@ H5C_protect(H5F_t * f,
}
}
- if(cache_ptr->resize_enabled &&
- (cache_ptr->cache_accesses >= (cache_ptr->resize_ctl).epoch_length)) {
+ if ( ( cache_ptr->resize_enabled ) &&
+ ( cache_ptr->cache_accesses >=
+ (cache_ptr->resize_ctl).epoch_length ) ) {
if(H5C__auto_adjust_cache_size(f, dxpl_id, write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Cache auto-resize failed")
- } /* end if */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Cache auto-resize failed.")
+ }
+
+ if ( cache_ptr->size_decreased ) {
- if(cache_ptr->size_decreased) {
cache_ptr->size_decreased = FALSE;
/* check to see if the cache is now oversized due to the cache
@@ -2719,7 +2732,7 @@ H5C_protect(H5F_t * f,
* bring the cache size down to the current maximum cache size.
*
* Also, if the min_clean_size requirement is not met, we
- * should also call H5C__make_space_in_cache() to bring us
+ * should also call H5C_make_space_in_cache() to bring us
* into complience.
*/
@@ -2736,10 +2749,10 @@ H5C_protect(H5F_t * f,
if(cache_ptr->index_size > cache_ptr->max_cache_size)
cache_ptr->cache_full = TRUE;
- if(H5C__make_space_in_cache(f, dxpl_id, (size_t)0, write_permitted) < 0 )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed")
+ if(H5C_make_space_in_cache(f, dxpl_id, (size_t)0, write_permitted) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_make_space_in_cache failed 2.")
}
- } /* end if */
+ }
}
/* If we loaded the entry and the entry's type has a 'notify' callback, send
@@ -2776,10 +2789,10 @@ H5C_protect(H5F_t * f,
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0))
- HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on exit")
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_lru_list(cache_ptr) < 0 ) )
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on exit.\n")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
@@ -2806,7 +2819,7 @@ H5C_reset_cache_hit_rate_stats(H5C_t * cache_ptr)
FUNC_ENTER_NOAPI(FAIL)
if((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry.")
cache_ptr->cache_hits = 0;
cache_ptr->cache_accesses = 0;
@@ -2846,27 +2859,27 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr,
FUNC_ENTER_NOAPI(FAIL)
if((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry.")
if(config_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry.")
if(config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unknown config version")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unknown config version.")
/* check general configuration section of the config: */
if(H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_GENERAL) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in general configuration fields of new config")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in general configuration fields of new config.")
/* check size increase control fields of the config: */
if(H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INCREMENT) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size increase control fields of new config")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size increase control fields of new config.")
/* check size decrease control fields of the config: */
if(H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_DECREMENT) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size decrease control fields of new config")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size decrease control fields of new config.")
/* check for conflicts between size increase and size decrease controls: */
if(H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "conflicting threshold fields in new config")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "conflicting threshold fields in new config.")
/* will set the increase possible fields to FALSE later if needed */
cache_ptr->size_increase_possible = TRUE;
@@ -2886,7 +2899,7 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr,
break;
default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown incr_mode?!?!?")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown incr_mode?!?!?.")
} /* end switch */
/* logically, this is were configuration for flash cache size increases
@@ -2920,7 +2933,7 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr,
break;
default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown decr_mode?!?!?")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown decr_mode?!?!?.")
} /* end switch */
if(config_ptr->max_size == config_ptr->min_size) {
@@ -2976,18 +2989,18 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr,
if(H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
/* this should be impossible... */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed.")
/* remove excess epoch markers if any */
if((config_ptr->decr_mode == H5C_decr__age_out_with_threshold) ||
(config_ptr->decr_mode == H5C_decr__age_out)) {
if(cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction)
if(H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers.")
} /* end if */
else if(cache_ptr->epoch_markers_active > 0) {
if(H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers.")
}
/* configure flash size increase facility. We wait until the
@@ -3011,7 +3024,7 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr,
break;
default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?.")
break;
} /* end switch */
} /* end if */
@@ -3042,7 +3055,7 @@ H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled)
FUNC_ENTER_NOAPI(FAIL)
if((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.")
/* There is no fundamental reason why we should not permit
* evictions to be disabled while automatic resize is enabled.
@@ -3053,7 +3066,7 @@ H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled)
if((evictions_enabled != TRUE) &&
((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) ||
(cache_ptr->resize_ctl.decr_mode != H5C_decr__off)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't disable evictions when auto resize enabled")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't disable evictions when auto resize enabled.")
cache_ptr->evictions_enabled = evictions_enabled;
@@ -3148,10 +3161,13 @@ H5C_unpin_entry(void *_entry_ptr)
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "an extreme sanity check failed on entry.\n");
+ }
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -3160,14 +3176,19 @@ H5C_unpin_entry(void *_entry_ptr)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry from client")
done:
+
#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0))
- HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "an extreme sanity check failed on exit.\n");
+ }
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
+
} /* H5C_unpin_entry() */
@@ -3254,10 +3275,13 @@ H5C_unprotect(H5F_t * f,
was_clean = ! ( entry_ptr->is_dirty );
#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "an extreme sanity check failed on entry.\n");
+ }
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
/* if the entry has multiple read only protects, just decrement
@@ -3414,9 +3438,9 @@ H5C_unprotect(H5F_t * f,
/* verify that the target entry is in the cache. */
H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
if(test_entry_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?.")
else if(test_entry_ptr != entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "hash table contains multiple entries for addr?!?")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "hash table contains multiple entries for addr?!?.")
/* Set the 'free file space' flag for the flush, if needed */
if(free_file_space)
@@ -3439,9 +3463,9 @@ H5C_unprotect(H5F_t * f,
/* verify that the target entry is in the cache. */
H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
if(test_entry_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?.")
else if(test_entry_ptr != entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "hash table contains multiple entries for addr?!?")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "hash table contains multiple entries for addr?!?.")
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear entry")
@@ -3452,14 +3476,19 @@ H5C_unprotect(H5F_t * f,
H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr)
done:
+
#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0)) {
- HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "an extreme sanity check failed on exit.\n");
+ }
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
+
} /* H5C_unprotect() */
@@ -3467,20 +3496,23 @@ done:
*
* Function: H5C_unsettle_entry_ring
*
- * Purpose: Advise the metadata cache that the specified entry's free space
- * manager ring is no longer settled (if it was on entry).
+ * Purpose: Advise the metadata cache that the specified entry's metadata
+ * cache manager ring is no longer settled (if it was on entry).
*
- * If the target free space manager ring is already
+ * If the target metadata cache manager ring is already
* unsettled, do nothing, and return SUCCEED.
*
- * If the target free space manager ring is settled, and
+ * If the target metadata cache manager ring is settled, and
* we are not in the process of a file shutdown, mark
* the ring as unsettled, and return SUCCEED.
*
- * If the target free space manager is settled, and we
+ * If the target metadata cache manager is settled, and we
* are in the process of a file shutdown, post an error
* message, and return FAIL.
*
+ * Note that this function simply passes the call on to
+ * the metadata cache proper, and returns the result.
+ *
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
@@ -3560,13 +3592,18 @@ H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr,
FUNC_ENTER_NOAPI(FAIL)
- if(config_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry")
+ if ( config_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry.")
+ }
+
+ if ( config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version.")
+ }
- if(config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version")
- if((tests & H5C_RESIZE_CFG__VALIDATE_GENERAL) != 0) {
+ if ( (tests & H5C_RESIZE_CFG__VALIDATE_GENERAL) != 0 ) {
if(config_ptr->max_size > H5C__MAX_MAX_CACHE_SIZE)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "max_size too big")
@@ -3577,29 +3614,43 @@ H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr,
if(config_ptr->min_size > config_ptr->max_size)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size > max_size")
- if(config_ptr->set_initial_size &&
- ((config_ptr->initial_size < config_ptr->min_size) ||
- (config_ptr->initial_size > config_ptr->max_size)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "initial_size must be in the interval [min_size, max_size]")
+ if ( ( config_ptr->set_initial_size ) &&
+ ( ( config_ptr->initial_size < config_ptr->min_size ) ||
+ ( config_ptr->initial_size > config_ptr->max_size ) ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "initial_size must be in the interval [min_size, max_size]");
+ }
+
+ if ( ( config_ptr->min_clean_fraction < (double)0.0f ) ||
+ ( config_ptr->min_clean_fraction > (double)1.0f ) ) {
- if((config_ptr->min_clean_fraction < (double)0.0f) ||
- (config_ptr->min_clean_fraction > (double)1.0f))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_clean_fraction must be in the interval [0.0, 1.0]")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "min_clean_fraction must be in the interval [0.0, 1.0]");
+ }
+
+ if ( config_ptr->epoch_length < H5C__MIN_AR_EPOCH_LENGTH ) {
- if(config_ptr->epoch_length < H5C__MIN_AR_EPOCH_LENGTH)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too small")
+ }
+
+ if ( config_ptr->epoch_length > H5C__MAX_AR_EPOCH_LENGTH ) {
- if(config_ptr->epoch_length > H5C__MAX_AR_EPOCH_LENGTH)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too big")
+ }
} /* H5C_RESIZE_CFG__VALIDATE_GENERAL */
- if((tests & H5C_RESIZE_CFG__VALIDATE_INCREMENT) != 0) {
- if((config_ptr->incr_mode != H5C_incr__off) &&
- (config_ptr->incr_mode != H5C_incr__threshold))
+ if ( (tests & H5C_RESIZE_CFG__VALIDATE_INCREMENT) != 0 ) {
+
+ if ( ( config_ptr->incr_mode != H5C_incr__off ) &&
+ ( config_ptr->incr_mode != H5C_incr__threshold ) ) {
+
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid incr_mode")
+ }
+
+ if ( config_ptr->incr_mode == H5C_incr__threshold ) {
- if(config_ptr->incr_mode == H5C_incr__threshold) {
if((config_ptr->lower_hr_threshold < (double)0.0f) ||
(config_ptr->lower_hr_threshold > (double)1.0f))
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "lower_hr_threshold must be in the range [0.0, 1.0]")
@@ -3612,24 +3663,33 @@ H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr,
*/
} /* H5C_incr__threshold */
- switch(config_ptr->flash_incr_mode) {
+ switch ( config_ptr->flash_incr_mode )
+ {
case H5C_flash_incr__off:
/* nothing to do here */
break;
case H5C_flash_incr__add_space:
- if((config_ptr->flash_multiple < (double)0.1f) ||
- (config_ptr->flash_multiple > (double)10.0f))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "flash_multiple must be in the range [0.1, 10.0]")
- if((config_ptr->flash_threshold < (double)0.1f) ||
- (config_ptr->flash_threshold > (double)1.0f))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "flash_threshold must be in the range [0.1, 1.0]")
+ if ( ( config_ptr->flash_multiple < (double)0.1f ) ||
+ ( config_ptr->flash_multiple > (double)10.0f ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "flash_multiple must be in the range [0.1, 10.0]");
+ }
+
+ if ( ( config_ptr->flash_threshold < (double)0.1f ) ||
+ ( config_ptr->flash_threshold > (double)1.0f ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "flash_threshold must be in the range [0.1, 1.0]");
+ }
break;
default:
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid flash_incr_mode")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "Invalid flash_incr_mode");
break;
- } /* end switch */
+ }
} /* H5C_RESIZE_CFG__VALIDATE_INCREMENT */
@@ -3645,23 +3705,35 @@ H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr,
}
if ( config_ptr->decr_mode == H5C_decr__threshold ) {
- if(config_ptr->upper_hr_threshold > (double)1.0f)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be <= 1.0")
- if((config_ptr->decrement > (double)1.0f) ||
- (config_ptr->decrement < (double)0.0f))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "decrement must be in the interval [0.0, 1.0]")
+ if ( config_ptr->upper_hr_threshold > (double)1.0f ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "upper_hr_threshold must be <= 1.0");
+ }
+
+ if ( ( config_ptr->decrement > (double)1.0f ) ||
+ ( config_ptr->decrement < (double)0.0f ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "decrement must be in the interval [0.0, 1.0]");
+ }
/* no need to check max_decrement as it is a size_t
* and thus must be non-negative.
*/
} /* H5C_decr__threshold */
- if((config_ptr->decr_mode == H5C_decr__age_out) ||
- (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) {
+ if ( ( config_ptr->decr_mode == H5C_decr__age_out ) ||
+ ( config_ptr->decr_mode == H5C_decr__age_out_with_threshold )
+ ) {
+
+ if ( config_ptr->epochs_before_eviction < 1 ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "epochs_before_eviction must be positive");
+ }
- if(config_ptr->epochs_before_eviction < 1)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction must be positive")
if(config_ptr->epochs_before_eviction > H5C__MAX_EPOCH_MARKERS)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction too big")
@@ -3675,24 +3747,43 @@ H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr,
*/
} /* H5C_decr__age_out || H5C_decr__age_out_with_threshold */
- if(config_ptr->decr_mode == H5C_decr__age_out_with_threshold) {
- if((config_ptr->upper_hr_threshold > (double)1.0f) ||
- (config_ptr->upper_hr_threshold < (double)0.0f))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be in the interval [0.0, 1.0]")
+ if ( config_ptr->decr_mode == H5C_decr__age_out_with_threshold ) {
+
+ if ( ( config_ptr->upper_hr_threshold > (double)1.0f ) ||
+ ( config_ptr->upper_hr_threshold < (double)0.0f ) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "upper_hr_threshold must be in the interval [0.0, 1.0]");
+ }
} /* H5C_decr__age_out_with_threshold */
+
} /* H5C_RESIZE_CFG__VALIDATE_DECREMENT */
if ( (tests & H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) != 0 ) {
- if((config_ptr->incr_mode == H5C_incr__threshold)
- && ((config_ptr->decr_mode == H5C_decr__threshold) ||
- (config_ptr->decr_mode == H5C_decr__age_out_with_threshold))
- && (config_ptr->lower_hr_threshold >= config_ptr->upper_hr_threshold))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "conflicting threshold fields in config")
+
+ if ( ( config_ptr->incr_mode == H5C_incr__threshold )
+ &&
+ ( ( config_ptr->decr_mode == H5C_decr__threshold )
+ ||
+ ( config_ptr->decr_mode == H5C_decr__age_out_with_threshold )
+ )
+ &&
+ ( config_ptr->lower_hr_threshold
+ >=
+ config_ptr->upper_hr_threshold
+ )
+ ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
+ "conflicting threshold fields in config.")
+ }
} /* H5C_RESIZE_CFG__VALIDATE_INTERACTIONS */
done:
+
FUNC_LEAVE_NOAPI(ret_value)
+
} /* H5C_validate_resize_config() */
@@ -3788,7 +3879,6 @@ H5C_create_flush_dependency(void * parent_thing, void * child_thing)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for flush dependency parent list")
child_entry->flush_dep_parent_nalloc *= 2;
} /* end else */
- cache_ptr->entry_fd_height_change_counter++;
} /* end if */
/* Add the dependency to the child's parent array */
@@ -3996,7 +4086,7 @@ H5C__auto_adjust_cache_size(H5F_t * f,
hbool_t write_permitted)
{
H5C_t * cache_ptr = f->shared->cache;
- hbool_t reentrant_call = FALSE;
+ herr_t result;
hbool_t inserted_epoch_marker = FALSE;
size_t new_max_cache_size = 0;
size_t old_max_cache_size = 0;
@@ -4016,33 +4106,29 @@ H5C__auto_adjust_cache_size(H5F_t * f,
HDassert( (double)0.0f <= (cache_ptr->resize_ctl).min_clean_fraction );
HDassert( (cache_ptr->resize_ctl).min_clean_fraction <= (double)100.0f );
- /* check to see if cache_ptr->resize_in_progress is TRUE. If it, this
- * is a re-entrant call via a client callback called in the resize
- * process. To avoid an infinite recursion, set reentrant_call to
- * TRUE, and goto done.
- */
- if(cache_ptr->resize_in_progress) {
- reentrant_call = TRUE;
- HGOTO_DONE(SUCCEED)
- } /* end if */
+ if ( !cache_ptr->resize_enabled ) {
- cache_ptr->resize_in_progress = TRUE;
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled.")
+ }
- if(!cache_ptr->resize_enabled)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled")
+ HDassert( ( (cache_ptr->resize_ctl).incr_mode != H5C_incr__off ) || \
+ ( (cache_ptr->resize_ctl).decr_mode != H5C_decr__off ) );
- HDassert(((cache_ptr->resize_ctl).incr_mode != H5C_incr__off) || \
- ((cache_ptr->resize_ctl).decr_mode != H5C_decr__off));
+ if ( H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED ) {
- if(H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate.")
+ }
HDassert( ( (double)0.0f <= hit_rate ) && ( hit_rate <= (double)1.0f ) );
- switch((cache_ptr->resize_ctl).incr_mode) {
+ switch ( (cache_ptr->resize_ctl).incr_mode )
+ {
case H5C_incr__off:
- if(cache_ptr->size_increase_possible)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "size_increase_possible but H5C_incr__off?!?!?")
+ if ( cache_ptr->size_increase_possible ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "size_increase_possible but H5C_incr__off?!?!?")
+ }
break;
case H5C_incr__threshold:
@@ -4092,7 +4178,7 @@ H5C__auto_adjust_cache_size(H5F_t * f,
break;
default:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode.")
}
/* If the decr_mode is either age out or age out with threshold, we
@@ -4119,10 +4205,17 @@ H5C__auto_adjust_cache_size(H5F_t * f,
)
) {
- if(H5C__autoadjust__ageout__insert_new_marker(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't insert new epoch marker")
+ result = H5C__autoadjust__ageout__insert_new_marker(cache_ptr);
- inserted_epoch_marker = TRUE;
+ if ( result != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "can't insert new epoch marker.")
+
+ } else {
+
+ inserted_epoch_marker = TRUE;
+ }
}
/* don't run the cache size decrease code unless the cache size
@@ -4182,18 +4275,32 @@ H5C__auto_adjust_cache_size(H5F_t * f,
case H5C_decr__age_out_with_threshold:
case H5C_decr__age_out:
- if(!inserted_epoch_marker) {
- if(!cache_ptr->size_decrease_possible)
+ if ( ! inserted_epoch_marker ) {
+
+ if ( ! cache_ptr->size_decrease_possible ) {
+
status = decrease_disabled;
- else {
- if(H5C__autoadjust__ageout(f, dxpl_id, hit_rate, &status, &new_max_cache_size, write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ageout code failed")
- } /* end else */
- } /* end if */
+
+ } else {
+
+ result = H5C__autoadjust__ageout(f,
+ dxpl_id,
+ hit_rate,
+ &status,
+ &new_max_cache_size,
+ write_permitted);
+
+ if ( result != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "ageout code failed.")
+ }
+ }
+ }
break;
default:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode.")
}
}
@@ -4209,8 +4316,13 @@ H5C__auto_adjust_cache_size(H5F_t * f,
) {
/* move last epoch marker to the head of the LRU list */
- if(H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error cycling epoch marker")
+ result = H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr);
+
+ if ( result != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "error cycling epoch marker.")
+ }
}
if ( ( status == increase ) || ( status == decrease ) ) {
@@ -4251,7 +4363,9 @@ H5C__auto_adjust_cache_size(H5F_t * f,
switch ( (cache_ptr->resize_ctl).flash_incr_mode )
{
case H5C_flash_incr__off:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flash_size_increase_possible but H5C_flash_incr__off?!")
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "flash_size_increase_possible but H5C_flash_incr__off?!")
break;
case H5C_flash_incr__add_space:
@@ -4262,13 +4376,15 @@ H5C__auto_adjust_cache_size(H5F_t * f,
break;
default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Unknown flash_incr_mode?!?!?.")
break;
}
}
}
if ( (cache_ptr->resize_ctl).rpt_fcn != NULL ) {
+
(*((cache_ptr->resize_ctl).rpt_fcn))
(cache_ptr,
H5C__CURR_AUTO_RESIZE_RPT_FCN_VER,
@@ -4280,17 +4396,14 @@ H5C__auto_adjust_cache_size(H5F_t * f,
new_min_clean_size);
}
- if(H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
+ if ( H5C_reset_cache_hit_rate_stats(cache_ptr) != SUCCEED ) {
+
/* this should be impossible... */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C_reset_cache_hit_rate_stats failed.")
+ }
done:
- /* Sanity checks */
- HDassert(cache_ptr->resize_in_progress);
- if(!reentrant_call)
- cache_ptr->resize_in_progress = FALSE;
- HDassert((!reentrant_call) || (cache_ptr->resize_in_progress));
-
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C__auto_adjust_cache_size() */
@@ -4323,6 +4436,7 @@ H5C__autoadjust__ageout(H5F_t * f,
hbool_t write_permitted)
{
H5C_t * cache_ptr = f->shared->cache;
+ herr_t result;
size_t test_size;
herr_t ret_value = SUCCEED; /* Return value */
@@ -4335,9 +4449,17 @@ H5C__autoadjust__ageout(H5F_t * f,
HDassert( ( new_max_cache_size_ptr ) && ( *new_max_cache_size_ptr == 0 ) );
/* remove excess epoch markers if any */
- if(cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction)
- if(H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers")
+ if ( cache_ptr->epoch_markers_active >
+ (cache_ptr->resize_ctl).epochs_before_eviction ) {
+
+ result = H5C__autoadjust__ageout__remove_excess_markers(cache_ptr);
+
+ if ( result != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "can't remove excess epoch markers.")
+ }
+ }
if ( ( (cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out )
||
@@ -4353,7 +4475,7 @@ H5C__autoadjust__ageout(H5F_t * f,
/* evict aged out cache entries if appropriate... */
if(H5C__autoadjust__ageout__evict_aged_out_entries(f, dxpl_id, write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries.")
/* ... and then reduce cache size if appropriate */
if ( cache_ptr->index_size < cache_ptr->max_cache_size ) {
@@ -4435,8 +4557,11 @@ H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t * cache_ptr)
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- if(cache_ptr->epoch_markers_active <= 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "No active epoch markers on entry?!?!?")
+ if ( cache_ptr->epoch_markers_active <= 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "No active epoch markers on entry?!?!?.")
+ }
/* remove the last marker from both the ring buffer and the LRU list */
@@ -4448,10 +4573,15 @@ H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t * cache_ptr)
cache_ptr->epoch_marker_ringbuf_size -= 1;
- if(cache_ptr->epoch_marker_ringbuf_size < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
- if((cache_ptr->epoch_marker_active)[i] != TRUE)
+ if ( cache_ptr->epoch_marker_ringbuf_size < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow.")
+ }
+
+ if ( (cache_ptr->epoch_marker_active)[i] != TRUE ) {
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
+ }
H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), \
(cache_ptr)->LRU_head_ptr, \
@@ -4464,9 +4594,9 @@ H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t * cache_ptr)
* the ring buffer.
*/
- HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
- HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
- HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
+ HDassert( ((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i );
+ HDassert( ((cache_ptr->epoch_markers)[i]).next == NULL );
+ HDassert( ((cache_ptr->epoch_markers)[i]).prev == NULL );
cache_ptr->epoch_marker_ringbuf_last =
(cache_ptr->epoch_marker_ringbuf_last + 1) %
@@ -4476,8 +4606,10 @@ H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t * cache_ptr)
cache_ptr->epoch_marker_ringbuf_size += 1;
- if(cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
+ if ( cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow.")
+ }
H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), \
(cache_ptr)->LRU_head_ptr, \
@@ -4539,6 +4671,19 @@ done:
*
* Programmer: John Mainzer, 11/22/04
*
+ * Changes: Modified function to detect deletions of entries
+ * during a scan of the LRU, and where appropriate,
+ * restart the scan to avoid proceeding with a next
+ * entry that is no longer in the cache.
+ *
+ * Note the absence of checks after flushes of clean
+ * entries. As a second entry can only be removed by
+ * by a call to the pre_serialize or serialize callback
+ * of the first, and as these callbacks will not be called
+ * on clean entries, no checks are needed.
+ *
+ * JRM -- 4/6/15
+ *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -4630,26 +4775,39 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
}
- if(prev_ptr != NULL) {
+ if ( prev_ptr != NULL ) {
+
if(corked) /* dirty corked entry is skipped */
entry_ptr = prev_ptr;
- else if(restart_scan || (prev_ptr->is_dirty != prev_is_dirty)
- || (prev_ptr->next != next_ptr)
- || (prev_ptr->is_protected)
- || (prev_ptr->is_pinned)) {
- /* Something has happened to the LRU -- start over
+
+ else if ( ( restart_scan )
+ ||
+ ( prev_ptr->is_dirty != prev_is_dirty )
+ ||
+ ( prev_ptr->next != next_ptr )
+ ||
+ ( prev_ptr->is_protected )
+ ||
+ ( prev_ptr->is_pinned ) ) {
+
+ /* something has happened to the LRU -- start over
* from the tail.
*/
restart_scan = FALSE;
entry_ptr = cache_ptr->LRU_tail_ptr;
H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
- } /* end else-if */
- else
+
+ } else {
+
entry_ptr = prev_ptr;
- } /* end if */
- else
+
+ }
+ } else {
+
entry_ptr = NULL;
+
+ }
} /* end while */
/* for now at least, don't bother to maintain the minimum clean size,
@@ -4667,9 +4825,9 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
* entry).
*/
- } /* end if */
- else /* ! write_permitted */ {
- /* Since we are not allowed to write, all we can do is evict
+ } else /* ! write_permitted */ {
+
+ /* since we are not allowed to write, all we can do is evict
* any clean entries that we may encounter before we either
* hit the eviction size limit, or encounter the epoch marker.
*
@@ -4682,19 +4840,23 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
* performance implications, but it shouldn't cause any net
* slowdown.
*/
- HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS);
+
+ HDassert( H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS );
+
entry_ptr = cache_ptr->LRU_tail_ptr;
- while(entry_ptr != NULL &&
- ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) &&
- (bytes_evicted < eviction_size_limit)) {
- HDassert(!(entry_ptr->is_protected));
+
+ while ( ( entry_ptr != NULL ) &&
+ ( (entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID ) &&
+ ( bytes_evicted < eviction_size_limit ) )
+ {
+ HDassert( ! (entry_ptr->is_protected) );
prev_ptr = entry_ptr->prev;
- if(!(entry_ptr->is_dirty)) {
+ if ( ! (entry_ptr->is_dirty) ) {
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry")
- } /* end if */
+ }
/* just skip the entry if it is dirty, as we can't do
* anything with it now since we can't write.
*
@@ -4702,15 +4864,21 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
* and thus we needn't test to see if the LRU has been changed
* out from under us.
*/
+
entry_ptr = prev_ptr;
+
} /* end while */
- } /* end else */
+ }
+
+ if ( cache_ptr->index_size < cache_ptr->max_cache_size ) {
- if(cache_ptr->index_size < cache_ptr->max_cache_size)
cache_ptr->cache_full = FALSE;
+ }
done:
+
FUNC_LEAVE_NOAPI(ret_value)
+
} /* H5C__autoadjust__ageout__evict_aged_out_entries() */
@@ -4739,16 +4907,23 @@ H5C__autoadjust__ageout__insert_new_marker(H5C_t * cache_ptr)
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- if(cache_ptr->epoch_markers_active >= (cache_ptr->resize_ctl).epochs_before_eviction)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Already have a full complement of markers")
+ if ( cache_ptr->epoch_markers_active >=
+ (cache_ptr->resize_ctl).epochs_before_eviction ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Already have a full complement of markers.")
+ }
/* find an unused marker */
i = 0;
- while((cache_ptr->epoch_marker_active)[i] && i < H5C__MAX_EPOCH_MARKERS)
+ while ( ( (cache_ptr->epoch_marker_active)[i] ) &&
+ ( i < H5C__MAX_EPOCH_MARKERS ) )
+ {
i++;
+ }
if(i >= H5C__MAX_EPOCH_MARKERS)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker.")
HDassert( ((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i );
HDassert( ((cache_ptr->epoch_markers)[i]).next == NULL );
@@ -4766,7 +4941,7 @@ H5C__autoadjust__ageout__insert_new_marker(H5C_t * cache_ptr)
if ( cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow.")
}
H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), \
@@ -4825,11 +5000,15 @@ H5C__autoadjust__ageout__remove_all_markers(H5C_t * cache_ptr)
cache_ptr->epoch_marker_ringbuf_size -= 1;
- if(cache_ptr->epoch_marker_ringbuf_size < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
+ if ( cache_ptr->epoch_marker_ringbuf_size < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow.")
+ }
+
+ if ( (cache_ptr->epoch_marker_active)[i] != TRUE ) {
- if((cache_ptr->epoch_marker_active)[i] != TRUE)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
+ }
/* remove the epoch marker from the LRU list */
H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), \
@@ -4887,10 +5066,15 @@ H5C__autoadjust__ageout__remove_excess_markers(H5C_t * cache_ptr)
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- if(cache_ptr->epoch_markers_active <= (cache_ptr->resize_ctl).epochs_before_eviction)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry")
+ if ( cache_ptr->epoch_markers_active <=
+ (cache_ptr->resize_ctl).epochs_before_eviction ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry.")
+ }
- while(cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction) {
+ while ( cache_ptr->epoch_markers_active >
+ (cache_ptr->resize_ctl).epochs_before_eviction )
+ {
/* get the index of the last epoch marker in the LRU list
* and remove it from the ring buffer.
*/
@@ -4904,10 +5088,15 @@ H5C__autoadjust__ageout__remove_excess_markers(H5C_t * cache_ptr)
cache_ptr->epoch_marker_ringbuf_size -= 1;
- if(cache_ptr->epoch_marker_ringbuf_size < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
- if((cache_ptr->epoch_marker_active)[i] != TRUE)
+ if ( cache_ptr->epoch_marker_ringbuf_size < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow.")
+ }
+
+ if ( (cache_ptr->epoch_marker_active)[i] != TRUE ) {
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
+ }
/* remove the epoch marker from the LRU list */
H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), \
@@ -4976,8 +5165,11 @@ H5C__flash_increase_cache_size(H5C_t * cache_ptr,
HDassert( new_entry_size > cache_ptr->flash_size_increase_threshold );
HDassert( old_entry_size < new_entry_size );
- if(old_entry_size >= new_entry_size)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "old_entry_size >= new_entry_size")
+ if ( old_entry_size >= new_entry_size ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "old_entry_size >= new_entry_size")
+ }
space_needed = new_entry_size - old_entry_size;
@@ -4990,7 +5182,8 @@ H5C__flash_increase_cache_size(H5C_t * cache_ptr,
switch ( (cache_ptr->resize_ctl).flash_incr_mode )
{
case H5C_flash_incr__off:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flash_size_increase_possible but H5C_flash_incr__off?!")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "flash_size_increase_possible but H5C_flash_incr__off?!")
break;
case H5C_flash_incr__add_space:
@@ -5010,7 +5203,8 @@ H5C__flash_increase_cache_size(H5C_t * cache_ptr,
break;
default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Unknown flash_incr_mode?!?!?.")
break;
}
@@ -5039,7 +5233,8 @@ H5C__flash_increase_cache_size(H5C_t * cache_ptr,
switch ( (cache_ptr->resize_ctl).flash_incr_mode )
{
case H5C_flash_incr__off:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flash_size_increase_possible but H5C_flash_incr__off?!")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "flash_size_increase_possible but H5C_flash_incr__off?!")
break;
case H5C_flash_incr__add_space:
@@ -5050,7 +5245,8 @@ H5C__flash_increase_cache_size(H5C_t * cache_ptr,
break;
default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Unknown flash_incr_mode?!?!?.")
break;
}
@@ -5064,8 +5260,10 @@ H5C__flash_increase_cache_size(H5C_t * cache_ptr,
/* get the hit rate for the reporting function. Should still
* be good as we havent reset the hit rate statistics.
*/
- if(H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
+ if ( H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate.")
+ }
(*((cache_ptr->resize_ctl).rpt_fcn))
(cache_ptr,
@@ -5078,9 +5276,12 @@ H5C__flash_increase_cache_size(H5C_t * cache_ptr,
new_min_clean_size);
}
- if(H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
+ if ( H5C_reset_cache_hit_rate_stats(cache_ptr) != SUCCEED ) {
+
/* this should be impossible... */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5C_reset_cache_hit_rate_stats failed.")
+ }
}
done:
@@ -5146,8 +5347,8 @@ H5C_flush_invalidate_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
#if H5C_DO_SANITY_CHECKS
{
int32_t i;
- uint32_t index_len = 0;
- uint32_t slist_len = 0;
+ int32_t index_len = 0;
+ int32_t slist_len = 0;
size_t index_size = (size_t)0;
size_t clean_index_size = (size_t)0;
size_t dirty_index_size = (size_t)0;
@@ -5182,7 +5383,7 @@ H5C_flush_invalidate_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
/* remove ageout markers if present */
if(cache_ptr->epoch_markers_active > 0)
if(H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers.")
/* flush invalidate each ring, starting from the outermost ring and
* working inward.
@@ -5190,7 +5391,7 @@ H5C_flush_invalidate_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
ring = H5C_RING_USER;
while(ring < H5C_RING_NTYPES) {
if(H5C_flush_invalidate_ring(f, dxpl_id, ring, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed.")
ring++;
} /* end while */
@@ -5278,7 +5479,7 @@ H5C_flush_invalidate_ring(H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
{
H5C_t *cache_ptr;
hbool_t restart_slist_scan;
- uint32_t protected_entries = 0;
+ int32_t protected_entries = 0;
int32_t i;
int32_t cur_ring_pel_len;
int32_t old_ring_pel_len;
@@ -5288,7 +5489,7 @@ H5C_flush_invalidate_ring(H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
H5C_cache_entry_t *entry_ptr = NULL;
H5C_cache_entry_t *next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- uint32_t initial_slist_len = 0;
+ int64_t initial_slist_len = 0;
size_t initial_slist_size = 0;
#endif /* H5C_DO_SANITY_CHECKS */
herr_t ret_value = SUCCEED;
@@ -5473,10 +5674,9 @@ H5C_flush_invalidate_ring(H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
* everything we can before we flag an error.
*/
protected_entries++;
- } /* end if */
- else if(entry_ptr->is_pinned) {
+ } else if(entry_ptr->is_pinned) {
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
if(cache_ptr->slist_changed) {
/* The slist has been modified by something
@@ -5490,10 +5690,10 @@ H5C_flush_invalidate_ring(H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
cache_ptr->slist_changed = FALSE;
H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
} /* end if */
- } /* end else-if */
+ } /* end if */
else {
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed.")
if(cache_ptr->slist_changed) {
/* The slist has been modified by something
@@ -5522,8 +5722,8 @@ H5C_flush_invalidate_ring(H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
*/
if(node_ptr == NULL) {
- HDassert(cache_ptr->slist_len == (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase));
- HDassert(cache_ptr->slist_size == (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase));
+ HDassert(cache_ptr->slist_len == (initial_slist_len + cache_ptr->slist_len_increase));
+ HDassert((int64_t)cache_ptr->slist_size == ((int64_t)initial_slist_size + cache_ptr->slist_size_increase));
} /* end if */
#endif /* H5C_DO_SANITY_CHECKS */
@@ -5597,7 +5797,7 @@ H5C_flush_invalidate_ring(H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
cache_ptr->entry_watched_for_removal = next_entry_ptr;
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed.")
/* Restart the index list scan if necessary. Must
* do this if the next entry is evicted, and also if
@@ -5678,9 +5878,9 @@ H5C_flush_invalidate_ring(H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
HDassert(protected_entries <= cache_ptr->pl_len);
if(protected_entries > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries.")
else if(cur_ring_pel_len > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring.")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -5720,12 +5920,12 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
hbool_t ignore_protected;
hbool_t tried_to_flush_protected_entry = FALSE;
hbool_t restart_slist_scan;
- uint32_t protected_entries = 0;
+ int32_t protected_entries = 0;
H5SL_node_t * node_ptr = NULL;
H5C_cache_entry_t * entry_ptr = NULL;
H5C_cache_entry_t * next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- uint32_t initial_slist_len = 0;
+ int64_t initial_slist_len = 0;
size_t initial_slist_size = 0;
#endif /* H5C_DO_SANITY_CHECKS */
int i;
@@ -5744,7 +5944,7 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
(H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
(H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry.\n")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
ignore_protected = ( (flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0 );
@@ -5908,7 +6108,7 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
} /* end if */
else {
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
if(cache_ptr->slist_changed) {
/* The slist has been modified by something
@@ -5930,8 +6130,8 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
#if H5C_DO_SANITY_CHECKS
/* Verify that the slist size and length are as expected. */
- HDassert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) == cache_ptr->slist_len);
- HDassert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) == cache_ptr->slist_size);
+ HDassert((initial_slist_len + cache_ptr->slist_len_increase) == cache_ptr->slist_len);
+ HDassert((size_t)((int64_t)initial_slist_size + cache_ptr->slist_size_increase) == cache_ptr->slist_size);
#endif /* H5C_DO_SANITY_CHECKS */
} /* while */
@@ -5983,6 +6183,34 @@ done:
*
* Programmer: John Mainzer, 5/5/04
*
+ * Changes: Refactored function to remove the type_ptr parameter.
+ *
+ * JRM -- 8/7/14
+ *
+ * Added code to check for slist changes in pre_serialize and
+ * serialize calls, and set
+ * cache_ptr->slist_change_in_pre_serialize and
+ * cache_ptr->slist_change_in_serialize as appropriate.
+ *
+ * JRM -- 12/13/14
+ *
+ * Refactored function to delay all modifications of the
+ * metadata cache data structures until after any calls
+ * to the pre-serialize or serialize callbacks.
+ *
+ * Need to do this, as some pre-serialize or serialize
+ * calls result in calls to the metadata cache and
+ * modifications to its data structures. Thus, at the
+ * time of any such call, the target entry flags and
+ * the metadata cache must all be consistant.
+ *
+ * Also added the entry_size_change_ptr parameter, which
+ * allows the function to report back any change in the size
+ * of the entry during the flush. Such size changes may
+ * occur during the pre-serialize callback.
+ *
+ * JRM -- 12/24/14
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -6000,8 +6228,6 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
hbool_t destroy_entry; /* internal flag */
hbool_t generate_image; /* internal flag */
hbool_t was_dirty;
- hbool_t suppress_image_entry_writes = FALSE;
- hbool_t suppress_image_entry_frees = FALSE;
haddr_t entry_addr = HADDR_UNDEF;
herr_t ret_value = SUCCEED; /* Return value */
@@ -6014,7 +6240,6 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
HDassert(entry_ptr);
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(entry_ptr->ring != H5C_RING_UNDEFINED);
- HDassert(entry_ptr->type);
/* setup external flags from the flags parameter */
destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0);
@@ -6045,64 +6270,44 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
else
write_entry = FALSE;
- /* if we have received close warning, and we have been instructed to
- * generate a metadata cache image, and we have actually constructed
- * the entry images, set suppress_image_entry_frees to TRUE.
- *
- * Set suppress_image_entry_writes to TRUE if indicated by the
- * image_ctl flags.
- */
- if(cache_ptr->close_warning_received && cache_ptr->image_ctl.generate_image
- && cache_ptr->num_entries_in_image > 0 && cache_ptr->image_entries) {
- /* Sanity checks */
- HDassert(entry_ptr->image_up_to_date || !(entry_ptr->include_in_image));
- HDassert(entry_ptr->image_ptr || !(entry_ptr->include_in_image));
- HDassert((!clear_only) || !(entry_ptr->include_in_image));
- HDassert((!take_ownership) || !(entry_ptr->include_in_image));
- HDassert((!free_file_space) || !(entry_ptr->include_in_image));
-
- suppress_image_entry_frees = TRUE;
-
- if(cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES)
- suppress_image_entry_writes = TRUE;
- } /* end if */
-
/* run initial sanity checks */
#if H5C_DO_SANITY_CHECKS
if(entry_ptr->in_slist) {
HDassert(entry_ptr->is_dirty);
if((entry_ptr->flush_marker) && (!entry_ptr->is_dirty))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks")
- } /* end if */
- else {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks.")
+ } else {
HDassert(!entry_ptr->is_dirty);
HDassert(!entry_ptr->flush_marker);
if((entry_ptr->is_dirty) || (entry_ptr->flush_marker))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks")
- } /* end else */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks.")
+ }
#endif /* H5C_DO_SANITY_CHECKS */
if(entry_ptr->is_protected) {
HDassert(!entry_ptr->is_protected);
/* Attempt to flush a protected entry -- scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry.")
} /* end if */
- /* Set entry_ptr->flush_in_progress = TRUE and set
+ /* set entry_ptr->flush_in_progress = TRUE and set
* entry_ptr->flush_marker = FALSE
*
- * We will set flush_in_progress back to FALSE at the end if the
+ * in the parallel case, do some sanity checking in passing.
+ */
+ HDassert(entry_ptr->type);
+
+ was_dirty = entry_ptr->is_dirty; /* needed later for logging */
+
+ /* We will set flush_in_progress back to FALSE at the end if the
* entry still exists at that point.
*/
entry_ptr->flush_in_progress = TRUE;
entry_ptr->flush_marker = FALSE;
- /* Preserve current dirty state for later */
- was_dirty = entry_ptr->is_dirty;
-
/* The entry is dirty, and we are doing a flush, a flush destroy or have
* been requested to generate an image. In those cases, serialize the
* entry.
@@ -6119,9 +6324,6 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
} /* end if */
if(!(entry_ptr->image_up_to_date)) {
- /* Sanity check */
- HDassert(!entry_ptr->prefetched);
-
/* Generate the entry's image */
if(H5C__generate_image(f, cache_ptr, entry_ptr, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image")
@@ -6142,17 +6344,7 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!")
#endif /* H5C_DO_SANITY_CHECKS */
- /* Write the image to disk unless the write is suppressed.
- *
- * This happens if both suppress_image_entry_writes and
- * entry_ptr->include_in_image are TRUE, or if the
- * H5AC__CLASS_SKIP_WRITES is set in the entry's type. This
- * flag should only be used in test code
- */
- if((!suppress_image_entry_writes || !entry_ptr->include_in_image)
- && (((entry_ptr->type->flags) & H5C__CLASS_SKIP_WRITES) == 0)) {
- H5FD_mem_t mem_type = H5FD_MEM_DEFAULT;
-
+ if(((entry_ptr->type->flags) & H5C__CLASS_SKIP_WRITES) == 0) {
#ifdef H5_HAVE_PARALLEL
if(cache_ptr->coll_write_list) {
if(H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0)
@@ -6160,16 +6352,8 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
} /* end if */
else
#endif /* H5_HAVE_PARALLEL */
-
- if(entry_ptr->prefetched) {
- HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
- mem_type = cache_ptr->class_table_ptr[entry_ptr->prefetch_type_id]->mem_type;
- } /* end if */
- else
- mem_type = entry_ptr->type->mem_type;
-
- if(H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, dxpl_id, entry_ptr->image_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file")
+ if(H5F_block_write(f, entry_ptr->type->mem_type, entry_ptr->addr, entry_ptr->size, dxpl_id, entry_ptr->image_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file.")
} /* end if */
/* if the entry has a notify callback, notify it that we have
@@ -6313,29 +6497,10 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
/* Sanity check */
HDassert(0 == entry_ptr->flush_dep_nparents);
- /* if both suppress_image_entry_frees and entry_ptr->include_in_image
- * are true, simply set entry_ptr->image_ptr to NULL, as we have
- * another pointer to the buffer in an instance of H5C_image_entry_t
- * in cache_ptr->image_entries.
- *
- * Otherwise, free the buffer if it exists.
- */
- if(suppress_image_entry_frees && entry_ptr->include_in_image)
- entry_ptr->image_ptr = NULL;
- else if(entry_ptr->image_ptr != NULL)
+ /* Start by freeing the buffer for the on disk image */
+ if(entry_ptr->image_ptr != NULL)
entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
- /* If the entry is not a prefetched entry, verify that the flush
- * dependency parents addresses array has been transfered.
- *
- * If the entry is prefetched, the free_isr routine will dispose of
- * the flush dependency parents adresses array if necessary.
- */
- if(!entry_ptr->prefetched) {
- HDassert(0 == entry_ptr->fd_parent_count);
- HDassert(NULL == entry_ptr->fd_parent_addrs);
- } /* end if */
-
/* Check whether we should free the space in the file that
* the entry occupies
*/
@@ -6419,7 +6584,7 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
HDassert(entry_ptr->image_ptr == NULL);
if(entry_ptr->type->free_icr((void *)entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed.")
} /* end if */
else {
HDassert(take_ownership);
@@ -6434,11 +6599,12 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
if(cache_ptr->log_flush)
if((cache_ptr->log_flush)(cache_ptr, entry_addr, was_dirty, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed.")
done:
HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) ||
( ! entry_ptr->flush_in_progress ) );
+
HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) ||
( take_ownership ) || ( ! entry_ptr->is_dirty ) );
@@ -6496,14 +6662,14 @@ H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr,
/* Check if the amount of data to read will be past the EOA */
if(H5F_addr_gt((addr + *len), eoa)) {
if(actual)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "actual len exceeds EOA")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "actual len exceeds EOA.")
else
/* Trim down the length of the metadata */
*len = (size_t)(eoa - addr);
} /* end if */
if(*len <= 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "len not positive after adjustment for EOA")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "len not positive after adjustment for EOA.")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -6583,7 +6749,7 @@ H5C_load_entry(H5F_t * f,
/* Allocate the buffer for reading the on-disk entry image */
if(NULL == (image = (uint8_t *)H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer.")
#if H5C_DO_MEMORY_SANITY_CHECKS
HDmemcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
@@ -6660,7 +6826,7 @@ H5C_load_entry(H5F_t * f,
if(actual_len != len) {
/* Verify that the length isn't past the EOA for the file */
if(H5C__verify_len_eoa(f, type, addr, &actual_len, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA.")
/* Expand buffer to new size */
if(NULL == (new_image = H5MM_realloc(image, actual_len + H5C_IMAGE_EXTRA_SPACE)))
@@ -6810,22 +6976,6 @@ H5C_load_entry(H5F_t * f,
entry->coll_prev = NULL;
#endif /* H5_HAVE_PARALLEL */
- /* initialize cache image related fields */
- entry->include_in_image = FALSE;
- entry->lru_rank = 0;
- entry->image_dirty = FALSE;
- entry->fd_parent_count = 0;
- entry->fd_parent_addrs = NULL;
- entry->fd_child_count = 0;
- entry->fd_dirty_child_count = 0;
- entry->image_fd_height = 0;
- entry->prefetched = FALSE;
- entry->prefetch_type_id = 0;
- entry->age = 0;
-#ifndef NDEBUG /* debugging field */
- entry->serialization_count = 0;
-#endif /* NDEBUG */
-
H5C__RESET_CACHE_ENTRY_STATS(entry);
ret_value = thing;
@@ -6846,7 +6996,7 @@ done:
/*-------------------------------------------------------------------------
*
- * Function: H5C__make_space_in_cache
+ * Function: H5C_make_space_in_cache
*
* Purpose: Attempt to evict cache entries until the index_size
* is at least needed_space below max_cache_size.
@@ -6877,19 +7027,53 @@ done:
*
* Programmer: John Mainzer, 5/14/04
*
+ * Changes: Modified function to skip over entries with the
+ * flush_in_progress flag set. If this is not done,
+ * an infinite recursion is possible if the cache is
+ * full, and the pre-serialize or serialize routine
+ * attempts to load another entry.
+ *
+ * This error was exposed by a re-factor of the
+ * H5C__flush_single_entry() routine. However, it was
+ * a potential bug from the moment that entries were
+ * allowed to load other entries on flush.
+ *
+ * In passing, note that the primary and secondary dxpls
+ * mentioned in the comment above have been replaced by
+ * a single dxpl at some point, and thus the discussion
+ * above is somewhat obsolete. Date of this change is
+ * unkown.
+ *
+ * JRM -- 12/26/14
+ *
+ * Modified function to detect deletions of entries
+ * during a scan of the LRU, and where appropriate,
+ * restart the scan to avoid proceeding with a next
+ * entry that is no longer in the cache.
+ *
+ * Note the absence of checks after flushes of clean
+ * entries. As a second entry can only be removed by
+ * by a call to the pre_serialize or serialize callback
+ * of the first, and as these callbacks will not be called
+ * on clean entries, no checks are needed.
+ *
+ * JRM -- 4/6/15
+ *
*-------------------------------------------------------------------------
*/
-herr_t
-H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed,
- hbool_t write_permitted)
+static herr_t
+H5C_make_space_in_cache(H5F_t * f,
+ hid_t dxpl_id,
+ size_t space_needed,
+ hbool_t write_permitted)
{
H5C_t * cache_ptr = f->shared->cache;
#if H5C_COLLECT_CACHE_STATS
int32_t clean_entries_skipped = 0;
int32_t total_entries_scanned = 0;
#endif /* H5C_COLLECT_CACHE_STATS */
- uint32_t entries_examined = 0;
- uint32_t initial_list_len;
+ int32_t entries_examined = 0;
+ int32_t initial_list_len;
size_t empty_space;
hbool_t prev_is_dirty = FALSE;
hbool_t didnt_flush_entry = FALSE;
@@ -6897,27 +7081,33 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed,
H5C_cache_entry_t * entry_ptr;
H5C_cache_entry_t * prev_ptr;
H5C_cache_entry_t * next_ptr;
- uint32_t num_corked_entries = 0;
+ int32_t num_corked_entries = 0;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_PACKAGE
+ FUNC_ENTER_NOAPI_NOINIT
- /* Sanity checks */
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
+ HDassert( f );
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert( cache_ptr->index_size ==
+ (cache_ptr->clean_index_size + cache_ptr->dirty_index_size) );
if ( write_permitted ) {
+
restart_scan = FALSE;
initial_list_len = cache_ptr->LRU_list_len;
entry_ptr = cache_ptr->LRU_tail_ptr;
- if(cache_ptr->index_size >= cache_ptr->max_cache_size)
+ if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) {
+
empty_space = 0;
- else
+
+ } else {
+
empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
+ }
+
while ( ( ( (cache_ptr->index_size + space_needed)
>
cache_ptr->max_cache_size
@@ -7163,8 +7353,10 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed,
}
done:
+
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__make_space_in_cache() */
+
+} /* H5C_make_space_in_cache() */
/*-------------------------------------------------------------------------
@@ -7974,515 +8166,6 @@ H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t * entry,
/*-------------------------------------------------------------------------
- * Function: H5C__serialize_cache
- *
- * Purpose: Serialize (i.e. construct an on disk image) for all entries
- * in the metadata cache including clean entries.
- *
- * Note that flush dependencies and "flush me last" flags
- * must be observed in the serialization process.
- *
- * Note also that entries may be loaded, flushed, evicted,
- * expunged, relocated, resized, or removed from the cache
- * during this process, just as these actions may occur during
- * a regular flush.
- *
- * However, we are given that the cache will contain no protected
- * entries on entry to this routine (although entries may be
- * briefly protected and then unprotected during the serialize
- * process).
- *
- * The objective of this routine is serialize all entries and
- * to force all entries into their actual locations on disk.
- *
- * The initial need for this routine is to settle all entries
- * in the cache prior to construction of the metadata cache
- * image so that the size of the cache image can be calculated.
- * However, I gather that other uses for the routine are
- * under consideration.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 7/22/15
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C__serialize_cache(H5F_t *f, hid_t dxpl_id)
-{
-#if H5C_DO_SANITY_CHECKS
- int i;
- uint32_t index_len = 0;
- size_t index_size = (size_t)0;
- size_t clean_index_size = (size_t)0;
- size_t dirty_index_size = (size_t)0;
- size_t slist_size = (size_t)0;
- uint32_t slist_len = 0;
-#endif /* H5C_DO_SANITY_CHECKS */
- H5C_ring_t ring;
- H5C_t * cache_ptr;
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_PACKAGE
-
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_ptr);
-
-#if H5C_DO_SANITY_CHECKS
- HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
-
- for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
- index_len += cache_ptr->index_ring_len[i];
- index_size += cache_ptr->index_ring_size[i];
- clean_index_size += cache_ptr->clean_index_ring_size[i];
- dirty_index_size += cache_ptr->dirty_index_ring_size[i];
-
- slist_len += cache_ptr->slist_ring_len[i];
- slist_size += cache_ptr->slist_ring_size[i];
- } /* end for */
-
- HDassert(cache_ptr->index_len == index_len);
- HDassert(cache_ptr->index_size == index_size);
- HDassert(cache_ptr->clean_index_size == clean_index_size);
- HDassert(cache_ptr->dirty_index_size == dirty_index_size);
- HDassert(cache_ptr->slist_len == slist_len);
- HDassert(cache_ptr->slist_size == slist_size);
-#endif /* H5C_DO_SANITY_CHECKS */
-
-#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
-#ifndef NDEBUG
- /* if this is a debug build, set the serialization_count field of
- * each entry in the cache to zero before we start the serialization.
- * This allows us to detect the case in which any entry is serialized
- * more than once (a performance issues), and more importantly, the
- * case is which any flush depencency parent is serializes more than
- * once (a correctness issue).
- */
- {
- H5C_cache_entry_t * scan_ptr = NULL;
-
- scan_ptr = cache_ptr->il_head;
- while(scan_ptr != NULL) {
- HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- scan_ptr->serialization_count = 0;
- scan_ptr = scan_ptr->il_next;
- } /* end while */
- } /* end block */
-#endif /* NDEBUG */
-
- /* set cache_ptr->serialization_in_progress to TRUE, and back
- * to FALSE at the end of the function. Must maintain this flag
- * to support H5C_get_serialization_in_progress(), which is in
- * turn required to support sanity checking in some cache
- * clients.
- */
- HDassert(!cache_ptr->serialization_in_progress);
- cache_ptr->serialization_in_progress = TRUE;
-
- /* Serialize each ring, starting from the outermost ring and
- * working inward.
- */
- ring = H5C_RING_USER;
- while(ring < H5C_RING_NTYPES) {
- HDassert(cache_ptr->close_warning_received);
- switch(ring) {
- case H5C_RING_USER:
- break;
-
- case H5C_RING_RDFSM:
- /* Settle raw data FSM */
- if(!cache_ptr->rdfsm_settled)
- if(H5MF_settle_raw_data_fsm(f, dxpl_id, &cache_ptr->rdfsm_settled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
- break;
-
- case H5C_RING_MDFSM:
- /* Settle metadata FSM */
- if(!cache_ptr->mdfsm_settled)
- if(H5MF_settle_meta_data_fsm(f, dxpl_id, &cache_ptr->mdfsm_settled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
- break;
-
- case H5C_RING_SBE:
- case H5C_RING_SB:
- break;
-
- default:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
- break;
- } /* end switch */
-
- if(H5C__serialize_ring(f, dxpl_id, ring) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialize ring failed")
-
- ring++;
- } /* end while */
-
-#ifndef NDEBUG
- /* Verify that no entry has been serialized more than once.
- * FD parents with multiple serializations should have been caught
- * elsewhere, so no specific check for them here.
- */
- {
- H5C_cache_entry_t * scan_ptr = NULL;
-
- scan_ptr = cache_ptr->il_head;
- while(scan_ptr != NULL) {
- HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(scan_ptr->serialization_count <= 1);
-
- scan_ptr = scan_ptr->il_next;
- } /* end while */
- } /* end block */
-#endif /* NDEBUG */
-
-done:
- cache_ptr->serialization_in_progress = FALSE;
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__serialize_cache() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5C__serialize_ring
- *
- * Purpose: Serialize the entries contained in the specified cache and
- * ring. All entries in rings outside the specified ring
- * must have been serialized on entry.
- *
- * If the cache contains protected entries in the specified
- * ring, the function will fail, as protected entries cannot
- * be serialized. However all unprotected entries in the
- * target ring should be serialized before the function
- * returns failure.
- *
- * If flush dependencies appear in the target ring, the
- * function makes repeated passes through the index list
- * serializing entries in flush dependency order.
- *
- * All entries outside the H5C_RING_SBE are marked for
- * inclusion in the cache image. Entries in H5C_RING_SBE
- * and below are marked for exclusion from the image.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 9/11/15
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__serialize_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring)
-{
- hbool_t done = FALSE;
- H5C_t * cache_ptr;
- H5C_cache_entry_t * entry_ptr;
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_STATIC
-
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(ring > H5C_RING_UNDEFINED);
- HDassert(ring < H5C_RING_NTYPES);
-
- HDassert(cache_ptr->serialization_in_progress);
-
- /* The objective here is to serialize all entries in the cache ring
- * in flush dependency order.
- *
- * The basic algorithm is to scan the cache index list looking for
- * unserialized entries that are either not in a flush dependency
- * relationship, or which have no unserialized children. Any such
- * entry is serialized and its flush dependency parents (if any) are
- * informed -- allowing them to decrement their userialized child counts.
- *
- * However, this algorithm is complicated by the ability
- * of client serialization callbacks to perform operations on
- * on the cache which can result in the insertion, deletion,
- * relocation, resize, dirty, flush, eviction, or removal (via the
- * take ownership flag) of entries. Changes in the flush dependency
- * structure are also possible.
- *
- * On the other hand, the algorithm is simplified by the fact that
- * we are serializing, not flushing. Thus, as long as all entries
- * are serialized correctly, it doesn't matter if we have to go back
- * and serialize an entry a second time.
- *
- * These possible actions result in the following modfications to
- * tha basic algorithm:
- *
- * 1) In the event of an entry expunge, eviction or removal, we must
- * restart the scan as it is possible that the next entry in our
- * scan is no longer in the cache. Were we to examine this entry,
- * we would be accessing deallocated memory.
- *
- * 2) A resize, dirty, or insertion of an entry may result in the
- * the increment of a flush dependency parent's dirty and/or
- * unserialized child count. In the context of serializing the
- * the cache, this is a non-issue, as even if we have already
- * serialized the parent, it will be marked dirty and its image
- * marked out of date if appropriate when the child is serialized.
- *
- * However, this is a major issue for a flush, as were this to happen
- * in a flush, it would violate the invariant that the flush dependency
- * feature is intended to enforce. As the metadata cache has no
- * control over the behavior of cache clients, it has no way of
- * preventing this behaviour. However, it should detect it if at all
- * possible.
- *
- * Do this by maintaining a count of the number of times each entry is
- * serialized during a cache serialization. If any flush dependency
- * parent is serialized more than once, throw an assertion failure.
- *
- * 3) An entry relocation will typically change the location of the
- * entry in the index list. This shouldn't cause problems as we
- * will scan the index list until we make a complete pass without
- * finding anything to serialize -- making relocations of either
- * the current or next entries irrelevant.
- *
- * Note that since a relocation may result in our skipping part of
- * the index list, we must always do at least one more pass through
- * the index list after an entry relocation.
- *
- * 4) Changes in the flush dependency structure are possible on
- * entry insertion, load, expunge, evict, or remove. Destruction
- * of a flush dependency has no effect, as it can only relax the
- * flush dependencies. Creation of a flush dependency can create
- * an unserialized child of a flush dependency parent where all
- * flush dependency children were previously serialized. Should
- * this child dirty the flush dependency parent when it is serialized,
- * the parent will be re-serialized.
- *
- * Per the discussion of 2) above, this is a non issue for cache
- * serialization, and a major problem for cache flush. Using the
- * same detection mechanism, throw an assertion failure if this
- * condition appears.
- *
- * Observe that either eviction or removal of entries as a result of
- * a serialization is not a problem as long as the flush depencency
- * tree does not change beyond the removal of a leaf.
- */
- while(!done) {
- /* Reset the counters so that we can detect insertions, loads,
- * moves, and flush dependency height changes caused by the pre_serialize
- * and serialize callbacks.
- */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
-
- done = TRUE; /* set to FALSE if any activity in inner loop */
- entry_ptr = cache_ptr->il_head;
- while(entry_ptr != NULL) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
-
- /* Verify that either the entry is already serialized, or
- * that it is assigned to either the target or an inner
- * ring.
- */
- HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
-
- /* Skip flush me last entries or inner ring entries */
- if(!entry_ptr->flush_me_last && entry_ptr->ring == ring) {
-
- /* if we encounter an unserialized entry in the current
- * ring that is not marked flush me last, we are not done.
- */
- if(!entry_ptr->image_up_to_date)
- done = FALSE;
-
- /* Serialize the entry if its image is not up to date
- * and it has no unserialized flush dependency children.
- */
- if(!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) {
- HDassert(entry_ptr->serialization_count == 0);
-
- /* Serialize the entry */
- if(H5C__serialize_single_entry(f, dxpl_id, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
-
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
- HDassert(entry_ptr->serialization_count == 0);
-
-#ifndef NDEBUG
- /* Increment serialization counter (to detect multiple serializations) */
- entry_ptr->serialization_count++;
-#endif /* NDEBUG */
- } /* end if */
- } /* end if */
-
- /* Check for the cache being perturbed during the entry serialize */
- if((cache_ptr->entries_loaded_counter > 0) ||
- (cache_ptr->entries_inserted_counter > 0) ||
- (cache_ptr->entries_relocated_counter > 0)) {
-
-#if H5C_COLLECT_CACHE_STATS
- H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr);
-#endif /* H5C_COLLECT_CACHE_STATS */
-
- /* Reset the counters */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
-
- /* Restart scan */
- entry_ptr = cache_ptr->il_head;
- } /* end if */
- else
- /* Advance to next entry */
- entry_ptr = entry_ptr->il_next;
- } /* while ( entry_ptr != NULL ) */
- } /* while ( ! done ) */
-
-
- /* Reset the counters so that we can detect insertions, loads,
- * moves, and flush dependency height changes caused by the pre_serialize
- * and serialize callbacks.
- */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
-
- /* At this point, all entries not marked "flush me last" and in
- * the current ring or outside it should be serialized and have up
- * to date images. Scan the index list again to serialize the
- * "flush me last" entries (if they are in the current ring) and to
- * verify that all other entries have up to date images.
- */
- entry_ptr = cache_ptr->il_head;
- while(entry_ptr != NULL) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring > H5C_RING_UNDEFINED);
- HDassert(entry_ptr->ring < H5C_RING_NTYPES);
- HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
-
- if(entry_ptr->ring == ring) {
- if(entry_ptr->flush_me_last) {
- if(!entry_ptr->image_up_to_date) {
- HDassert(entry_ptr->serialization_count == 0);
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
-
- /* Serialize the entry */
- if(H5C__serialize_single_entry(f, dxpl_id, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
-
- /* Check for the cache changing */
- if((cache_ptr->entries_loaded_counter > 0) ||
- (cache_ptr->entries_inserted_counter > 0) ||
- (cache_ptr->entries_relocated_counter > 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush_me_last entry serialization triggered restart")
-
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
- HDassert(entry_ptr->serialization_count == 0);
-#ifndef NDEBUG
- /* Increment serialization counter (to detect multiple serializations) */
- entry_ptr->serialization_count++;
-#endif /* NDEBUG */
- } /* end if */
- } /* end if */
- else {
- HDassert(entry_ptr->image_up_to_date);
- HDassert(entry_ptr->serialization_count <= 1);
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
- } /* end else */
- } /* if ( entry_ptr->ring == ring ) */
-
- entry_ptr = entry_ptr->il_next;
- } /* while ( entry_ptr != NULL ) */
-
-done:
- HDassert(cache_ptr->serialization_in_progress);
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__serialize_ring() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5C__serialize_single_entry
- *
- * Purpose: Serialize the cache entry pointed to by the entry_ptr
- * parameter.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer, 7/24/15
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__serialize_single_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
- H5C_cache_entry_t *entry_ptr)
-{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_STATIC
-
- /* Sanity checks */
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(!entry_ptr->prefetched);
- HDassert(!entry_ptr->image_up_to_date);
- HDassert(entry_ptr->is_dirty);
- HDassert(!entry_ptr->is_protected);
- HDassert(!entry_ptr->flush_in_progress);
- HDassert(entry_ptr->type);
-
- /* Set entry_ptr->flush_in_progress to TRUE so the the target entry
- * will not be evicted out from under us. Must set it back to FALSE
- * when we are done.
- */
- entry_ptr->flush_in_progress = TRUE;
-
- /* Allocate buffer for the entry image if required. */
- if(NULL == entry_ptr->image_ptr) {
- HDassert(entry_ptr->size > 0);
- if(NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)) )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
-#if H5C_DO_MEMORY_SANITY_CHECKS
- HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- } /* end if */
-
- /* Generate image for entry */
- if(H5C__generate_image(f, cache_ptr, entry_ptr, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry")
-
- /* Reset the flush_in progress flag */
- entry_ptr->flush_in_progress = FALSE;
-
-done:
- HDassert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress));
- HDassert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date));
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__serialize_single_entry() */
-
-
-/*-------------------------------------------------------------------------
* Function: H5C__generate_image
*
* Purpose: Serialize an entry and generate its image.
@@ -8503,7 +8186,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-herr_t
+static herr_t
H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr,
hid_t dxpl_id)
{
@@ -8513,18 +8196,10 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr,
unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET;
herr_t ret_value = SUCCEED;
- FUNC_ENTER_PACKAGE
+ FUNC_ENTER_STATIC
/* Sanity check */
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(!entry_ptr->image_up_to_date);
- HDassert(entry_ptr->is_dirty);
- HDassert(!entry_ptr->is_protected);
- HDassert(entry_ptr->type);
/* make note of the entry's current address */
old_addr = entry_ptr->addr;
@@ -8538,7 +8213,8 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr,
/* Check for any flags set in the pre-serialize callback */
if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) {
/* Check for unexpected flags from serialize callback */
- if(serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG | H5C__SERIALIZE_MOVED_FLAG))
+ if(serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG |
+ H5C__SERIALIZE_MOVED_FLAG))
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)")
#ifdef H5_HAVE_PARALLEL
@@ -8569,15 +8245,12 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr,
* tests will be necessary.
*/
if(cache_ptr->aux_ptr != NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occured in parallel case")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occured in parallel case.")
#endif
/* If required, resize the buffer and update the entry and the cache
* data structures */
if(serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) {
- /* Sanity check */
- HDassert(new_len > 0);
-
/* Allocate a new image buffer */
if(NULL == (entry_ptr->image_ptr = H5MM_realloc(entry_ptr->image_ptr, new_len + H5C_IMAGE_EXTRA_SPACE)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
@@ -8588,8 +8261,9 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr,
/* Update statistics for resizing the entry */
H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
- /* Update the hash table for the size change */
- H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len, entry_ptr, !(entry_ptr->is_dirty));
+ /* update the hash table for the size change */
+ H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, \
+ new_len, entry_ptr, !(entry_ptr->is_dirty));
/* The entry can't be protected since we are in the process of
* flushing it. Thus we must update the replacement policy data
@@ -8602,11 +8276,10 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr,
* for the flush or flush destroy yet, the entry should
* be in the slist. Thus update it for the size change.
*/
- HDassert(entry_ptr->is_dirty);
HDassert(entry_ptr->in_slist);
H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len);
- /* Finally, update the entry for its new size */
+ /* finally, update the entry for its new size */
entry_ptr->size = new_len;
} /* end if */
@@ -8623,10 +8296,10 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr,
H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL);
H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE);
- /* Update the entry for its new address */
+ /* update the entry for its new address */
entry_ptr->addr = new_addr;
- /* And then reinsert in the index and slist */
+ /* and then reinsert in the index and slist */
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL);
H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL);
} /* end if */
@@ -8643,13 +8316,6 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr,
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
entry_ptr->image_up_to_date = TRUE;
- /* Propagate the fact that the entry is serialized up the
- * flush dependency chain if appropriate. Since the image must
- * have been out of date for this function to have been called
- * (see assertion on entry), no need to check that -- only check
- * for flush dependency parents.
- */
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
if(entry_ptr->flush_dep_nparents > 0)
if(H5C__mark_flush_dep_serialized(entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c
index eb5f123..5697bff 100644
--- a/src/H5Cdbg.c
+++ b/src/H5Cdbg.c
@@ -35,7 +35,6 @@
/* Headers */
/***********/
#include "H5private.h" /* Generic Functions */
-#include "H5ACprivate.h" /* Metadata Cache */
#include "H5Cpkg.h" /* Cache */
#include "H5Eprivate.h" /* Error Handling */
@@ -54,6 +53,10 @@
/* Local Prototypes */
/********************/
+#if 0 /* debugging routines */
+herr_t H5C_dump_cache_skip_list(H5C_t *cache_ptr, char *calling_fcn);
+#endif /* debugging routines */
+
/*********************/
/* Package Variables */
@@ -101,7 +104,7 @@ H5C_dump_cache(H5C_t * cache_ptr, const char * cache_name)
/* First, create a skip list */
if(NULL == (slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create skip list")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create skip list.")
/* Next, scan the index, and insert all entries in the skip list.
* Do this, as we want to display cache entries in increasing address
@@ -191,7 +194,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-#ifndef NDEBUG
+#if 0 /* debugging routine */
herr_t
H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
{
@@ -200,14 +203,14 @@ H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
H5C_cache_entry_t * entry_ptr = NULL;
H5SL_node_t * node_ptr = NULL;
- FUNC_ENTER_NOAPI_NOERR
+ FUNC_ENTER_NOAPI(FAIL)
HDassert(cache_ptr != NULL);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(calling_fcn != NULL);
HDfprintf(stdout, "\n\nDumping metadata cache skip list from %s.\n", calling_fcn);
- HDfprintf(stdout, " slist len = %u.\n", cache_ptr->slist_len);
+ HDfprintf(stdout, " slist len = %d.\n", cache_ptr->slist_len);
HDfprintf(stdout, " slist size = %lld.\n", (long long)(cache_ptr->slist_size));
if(cache_ptr->slist_len > 0) {
@@ -237,9 +240,9 @@ H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
(int)(entry_ptr->is_dirty),
entry_ptr->type->name);
- HDfprintf(stdout, " node_ptr = 0x%llx, item = %p\n",
+ HDfprintf(stdout, " node_ptr = 0x%llx, item = 0x%llx\n",
(unsigned long long)node_ptr,
- H5SL_item(node_ptr));
+ (unsigned long long)H5SL_item(node_ptr));
/* increment node_ptr before we delete its target */
node_ptr = H5SL_next(node_ptr);
@@ -254,9 +257,10 @@ H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
HDfprintf(stdout, "\n\n");
+done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_dump_cache_skip_list() */
-#endif /* NDEBUG */
+#endif /* debugging routine */
/*-------------------------------------------------------------------------
@@ -281,7 +285,7 @@ H5C_set_prefix(H5C_t * cache_ptr, char * prefix)
if((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC) ||
(prefix == NULL) || (HDstrlen(prefix) >= H5C__PREFIX_LEN))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad param(s) on entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad param(s) on entry.")
HDstrncpy(&(cache_ptr->prefix[0]), prefix, (size_t)(H5C__PREFIX_LEN));
@@ -381,7 +385,6 @@ H5C_stats(H5C_t * cache_ptr,
size_t aggregate_max_size = 0;
int32_t aggregate_max_pins = 0;
double hit_rate;
- double prefetch_use_rate;
double average_successful_search_depth = 0.0f;
double average_failed_search_depth = 0.0f;
double average_entries_skipped_per_calls_to_msic = 0.0f;
@@ -487,12 +490,12 @@ H5C_stats(H5C_t * cache_ptr,
average_failed_search_depth);
HDfprintf(stdout,
- "%s current (max) index size / length = %ld (%ld) / %lu (%lu)\n",
+ "%s current (max) index size / length = %ld (%ld) / %ld (%ld)\n",
cache_ptr->prefix,
(long)(cache_ptr->index_size),
(long)(cache_ptr->max_index_size),
- (unsigned long)(cache_ptr->index_len),
- (unsigned long)(cache_ptr->max_index_len));
+ (long)(cache_ptr->index_len),
+ (long)(cache_ptr->max_index_len));
HDfprintf(stdout,
"%s current (max) clean/dirty idx size = %ld (%ld) / %ld (%ld)\n",
@@ -503,46 +506,46 @@ H5C_stats(H5C_t * cache_ptr,
(long)(cache_ptr->max_dirty_index_size));
HDfprintf(stdout,
- "%s current (max) slist size / length = %ld (%ld) / %lu (%lu)\n",
+ "%s current (max) slist size / length = %ld (%ld) / %ld (%ld)\n",
cache_ptr->prefix,
(long)(cache_ptr->slist_size),
(long)(cache_ptr->max_slist_size),
- (unsigned long)(cache_ptr->slist_len),
- (unsigned long)(cache_ptr->max_slist_len));
+ (long)(cache_ptr->slist_len),
+ (long)(cache_ptr->max_slist_len));
HDfprintf(stdout,
- "%s current (max) PL size / length = %ld (%ld) / %lu (%lu)\n",
+ "%s current (max) PL size / length = %ld (%ld) / %ld (%ld)\n",
cache_ptr->prefix,
(long)(cache_ptr->pl_size),
(long)(cache_ptr->max_pl_size),
- (unsigned long)(cache_ptr->pl_len),
- (unsigned long)(cache_ptr->max_pl_len));
+ (long)(cache_ptr->pl_len),
+ (long)(cache_ptr->max_pl_len));
HDfprintf(stdout,
- "%s current (max) PEL size / length = %ld (%ld) / %lu (%lu)\n",
+ "%s current (max) PEL size / length = %ld (%ld) / %ld (%ld)\n",
cache_ptr->prefix,
(long)(cache_ptr->pel_size),
(long)(cache_ptr->max_pel_size),
- (unsigned long)(cache_ptr->pel_len),
- (unsigned long)(cache_ptr->max_pel_len));
+ (long)(cache_ptr->pel_len),
+ (long)(cache_ptr->max_pel_len));
HDfprintf(stdout,
- "%s current LRU list size / length = %ld / %lu\n",
+ "%s current LRU list size / length = %ld / %ld\n",
cache_ptr->prefix,
(long)(cache_ptr->LRU_list_size),
- (unsigned long)(cache_ptr->LRU_list_len));
+ (long)(cache_ptr->LRU_list_len));
HDfprintf(stdout,
- "%s current clean LRU size / length = %ld / %lu\n",
+ "%s current clean LRU size / length = %ld / %ld\n",
cache_ptr->prefix,
(long)(cache_ptr->cLRU_list_size),
- (unsigned long)(cache_ptr->cLRU_list_len));
+ (long)(cache_ptr->cLRU_list_len));
HDfprintf(stdout,
- "%s current dirty LRU size / length = %ld / %lu\n",
+ "%s current dirty LRU size / length = %ld / %ld\n",
cache_ptr->prefix,
(long)(cache_ptr->dLRU_list_size),
- (unsigned long)(cache_ptr->dLRU_list_len));
+ (long)(cache_ptr->dLRU_list_len));
HDfprintf(stdout,
"%s Total hits / misses / hit_rate = %ld / %ld / %f\n",
@@ -645,38 +648,6 @@ H5C_stats(H5C_t * cache_ptr,
(long long)(cache_ptr->LRU_scan_restarts),
(long long)(cache_ptr->index_scan_restarts));
- HDfprintf(stdout,
- "%s cache image creations/loads/size = %d / %d / %Hu\n",
- cache_ptr->prefix,
- cache_ptr->images_created,
- cache_ptr->images_loaded,
- cache_ptr->last_image_size);
-
- HDfprintf(stdout,
- "%s prefetches / dirty prefetches = %lld / %lld\n",
- cache_ptr->prefix,
- (long long)(cache_ptr->prefetches),
- (long long)(cache_ptr->dirty_prefetches));
-
- HDfprintf(stdout,
- "%s prefetch hits/flushes/evictions = %lld / %lld / %lld\n",
- cache_ptr->prefix,
- (long long)(cache_ptr->prefetch_hits),
- (long long)(cache_ptr->flushes[H5AC_PREFETCHED_ENTRY_ID]),
- (long long)(cache_ptr->evictions[H5AC_PREFETCHED_ENTRY_ID]));
-
- if(cache_ptr->prefetches > 0)
- prefetch_use_rate =
- (double)100.0f * ((double)(cache_ptr->prefetch_hits)) /
- ((double)(cache_ptr->prefetches));
- else
- prefetch_use_rate = 0.0f;
-
- HDfprintf(stdout,
- "%s prefetched entry use rate = %lf\n",
- cache_ptr->prefix,
- prefetch_use_rate);
-
#if H5C_COLLECT_CACHE_ENTRY_STATS
HDfprintf(stdout, "%s aggregate max / min accesses = %d / %d\n",
@@ -702,7 +673,7 @@ H5C_stats(H5C_t * cache_ptr,
HDfprintf(stdout, "%s Stats on %s:\n",
cache_ptr->prefix,
- ((cache_ptr->class_table_ptr))[i]->name);
+ ((cache_ptr->type_name_table_ptr))[i]);
if((cache_ptr->hits[i] > 0) || (cache_ptr->misses[i] > 0))
hit_rate = (double)100.0f * ((double)(cache_ptr->hits[i])) /
@@ -898,14 +869,6 @@ H5C_stats__reset(H5C_t H5_ATTR_UNUSED * cache_ptr)
cache_ptr->LRU_scan_restarts = 0;
cache_ptr->index_scan_restarts = 0;
- cache_ptr->images_created = 0;
- cache_ptr->images_loaded = 0;
- cache_ptr->last_image_size = (hsize_t)0;
-
- cache_ptr->prefetches = 0;
- cache_ptr->dirty_prefetches = 0;
- cache_ptr->prefetch_hits = 0;
-
#if H5C_COLLECT_CACHE_ENTRY_STATS
for(i = 0; i <= cache_ptr->max_type_id; i++) {
cache_ptr->max_accesses[i] = 0;
@@ -989,303 +952,6 @@ H5C__dump_entry(H5C_t *cache_ptr, const H5C_cache_entry_t *entry_ptr,
if(entry_ptr->flush_dep_nchildren)
H5C__dump_children(cache_ptr, entry_ptr, FALSE, "Child", indent);
} /* end H5C__dump_entry() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5C_flush_dependency_exists()
- *
- * Purpose: Test to see if a flush dependency relationship exists
- * between the supplied parent and child. Both parties
- * are indicated by addresses so as to avoid the necessity
- * of protect / unprotect calls prior to this call.
- *
- * If either the parent or the child is not in the metadata
- * cache, the function sets *fd_exists_ptr to FALSE.
- *
- * If both are in the cache, the childs list of parents is
- * searched for the proposed parent. If the proposed parent
- * is found in the childs parent list, the function sets
- * *fd_exists_ptr to TRUE. In all other non-error cases,
- * the function sets *fd_exists_ptr FALSE.
- *
- * Return: SUCCEED on success/FAIL on failure. Note that
- * *fd_exists_ptr is undefined on failure.
- *
- * Programmer: John Mainzer
- * 9/28/16
- *
- *-------------------------------------------------------------------------
- */
-#ifndef NDEBUG
-herr_t
-H5C_flush_dependency_exists(H5C_t *cache_ptr, haddr_t parent_addr, haddr_t child_addr,
- hbool_t *fd_exists_ptr)
-{
- hbool_t fd_exists = FALSE; /* whether flush dependency exists */
- H5C_cache_entry_t * parent_ptr; /* Ptr to parent entry */
- H5C_cache_entry_t * child_ptr; /* Ptr to child entry */
- hbool_t ret_value = FALSE; /* Return value */
-
- FUNC_ENTER_NOAPI(NULL)
-
- /* Sanity checks */
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(H5F_addr_defined(parent_addr));
- HDassert(H5F_addr_defined(child_addr));
- HDassert(fd_exists_ptr);
-
- H5C__SEARCH_INDEX(cache_ptr, parent_addr, parent_ptr, FAIL)
- H5C__SEARCH_INDEX(cache_ptr, child_addr, child_ptr, FAIL)
-
- if(parent_ptr && child_ptr) {
- HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(child_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
-
- if(child_ptr->flush_dep_nparents > 0) {
- unsigned u; /* Local index variable */
-
- HDassert(child_ptr->flush_dep_parent);
- HDassert(child_ptr->flush_dep_parent_nalloc >= child_ptr->flush_dep_nparents);
-
- for(u = 0; u < child_ptr->flush_dep_nparents; u++) {
- if(child_ptr->flush_dep_parent[u] == parent_ptr) {
- fd_exists = TRUE;
- HDassert(parent_ptr->flush_dep_nchildren > 0);
- break;
- } /* end if */
- } /* end for */
- } /* end if */
- } /* end if */
-
- *fd_exists_ptr = fd_exists;
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_flush_dependency_exists() */
-#endif /* NDEBUG */
-
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C_validate_index_list
- *
- * Purpose: Debugging function that scans the index list for errors.
- *
- * If an error is detected, the function generates a
- * diagnostic and returns FAIL. If no error is detected,
- * the function returns SUCCEED.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: John Mainzer, 9/16/16
- *
- *-------------------------------------------------------------------------
- */
-#ifndef NDEBUG
-herr_t
-H5C_validate_index_list(H5C_t *cache_ptr)
-{
- H5C_cache_entry_t * entry_ptr = NULL;
- uint32_t len = 0;
- int32_t index_ring_len[H5C_RING_NTYPES];
- size_t size = 0;
- size_t clean_size = 0;
- size_t dirty_size = 0;
- size_t index_ring_size[H5C_RING_NTYPES];
- size_t clean_index_ring_size[H5C_RING_NTYPES];
- size_t dirty_index_ring_size[H5C_RING_NTYPES];
- int i;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT
-
- /* Sanity checks */
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- for(i = 0; i < H5C_RING_NTYPES; i++) {
- index_ring_len[i] = 0;
- index_ring_size[i] = 0;
- clean_index_ring_size[i] = 0;
- dirty_index_ring_size[i] = 0;
- } /* end if */
-
- if(((cache_ptr->il_head == NULL) || (cache_ptr->il_tail == NULL))
- && (cache_ptr->il_head != cache_ptr->il_tail))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Index list pointer validation failed")
-
- if((cache_ptr->index_len == 1) && ((cache_ptr->il_head != cache_ptr->il_tail)
- || (cache_ptr->il_head == NULL) || (cache_ptr->il_head->size != cache_ptr->index_size)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Index list pointer sanity checks failed")
-
- if((cache_ptr->index_len >= 1)
- && ((cache_ptr->il_head == NULL)
- || (cache_ptr->il_head->il_prev != NULL)
- || (cache_ptr->il_tail == NULL)
- || (cache_ptr->il_tail->il_next != NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Index list length sanity checks failed")
-
- entry_ptr = cache_ptr->il_head;
- while(entry_ptr != NULL) {
- if((entry_ptr != cache_ptr->il_head)
- && ((entry_ptr->il_prev == NULL) || (entry_ptr->il_prev->il_next != entry_ptr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Index list pointers for entry are invalid")
-
- if((entry_ptr != cache_ptr->il_tail)
- && ((entry_ptr->il_next == NULL) || (entry_ptr->il_next->il_prev != entry_ptr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Index list pointers for entry are invalid")
-
- HDassert(entry_ptr->ring > 0);
- HDassert(entry_ptr->ring < H5C_RING_NTYPES);
-
- len++;
- index_ring_len[entry_ptr->ring] += 1;
-
- size += entry_ptr->size;
- index_ring_size[entry_ptr->ring] += entry_ptr->size;
-
- if(entry_ptr->is_dirty) {
- dirty_size += entry_ptr->size;
- dirty_index_ring_size[entry_ptr->ring] += entry_ptr->size;
- } /* end if */
- else {
- clean_size += entry_ptr->size;
- clean_index_ring_size[entry_ptr->ring] += entry_ptr->size;
- } /* end else */
-
- entry_ptr = entry_ptr->il_next;
- } /* end while */
-
- if((cache_ptr->index_len != len) || (cache_ptr->il_len != len)
- || (cache_ptr->index_size != size) || (cache_ptr->il_size != size)
- || (cache_ptr->clean_index_size != clean_size)
- || (cache_ptr->dirty_index_size != dirty_size)
- || (clean_size + dirty_size != size))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Index, clean and dirty sizes for cache are invalid")
-
- size = 0;
- clean_size = 0;
- dirty_size = 0;
- for(i = 0; i < H5C_RING_NTYPES; i++) {
- size += clean_index_ring_size[i] + dirty_index_ring_size[i];
- clean_size += clean_index_ring_size[i];
- dirty_size += dirty_index_ring_size[i];
- } /* end for */
-
- if((cache_ptr->index_size != size)
- || (cache_ptr->clean_index_size != clean_size)
- || (cache_ptr->dirty_index_size != dirty_size))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Index, clean and dirty sizes for cache are invalid")
-
-done:
- if(ret_value != SUCCEED)
- HDassert(0);
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_validate_index_list() */
-#endif /* NDEBUG */
-
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C_get_entry_ptr_from_addr()
- *
- * Purpose: Debugging function that attempts to look up an entry in the
- * cache by its file address, and if found, returns a pointer
- * to the entry in *entry_ptr_ptr. If the entry is not in the
- * cache, *entry_ptr_ptr is set to NULL.
- *
- * WARNING: This call should be used only in debugging
- * routines, and it should be avoided when
- * possible.
- *
- * Further, if we ever multi-thread the cache,
- * this routine will have to be either discarded
- * or heavily re-worked.
- *
- * Finally, keep in mind that the entry whose
- * pointer is obtained in this fashion may not
- * be in a stable state.
- *
- * Note that this function is only defined if NDEBUG
- * is not defined.
- *
- * As heavy use of this function is almost certainly a
- * bad idea, the metadata cache tracks the number of
- * successful calls to this function, and (if
- * H5C_DO_SANITY_CHECKS is defined) displays any
- * non-zero count on cache shutdown.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: John Mainzer, 5/30/14
- *
- *-------------------------------------------------------------------------
- */
-#ifndef NDEBUG
-herr_t
-H5C_get_entry_ptr_from_addr(H5C_t *cache_ptr, haddr_t addr, void **entry_ptr_ptr)
-{
- H5C_cache_entry_t * entry_ptr = NULL;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Sanity checks */
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(H5F_addr_defined(addr));
- HDassert(entry_ptr_ptr);
-
- H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
-
- if(entry_ptr == NULL)
- /* the entry doesn't exist in the cache -- report this
- * and quit.
- */
- *entry_ptr_ptr = NULL;
- else {
- *entry_ptr_ptr = entry_ptr;
-
- /* increment call counter */
- (cache_ptr->get_entry_ptr_from_addr_counter)++;
- } /* end else */
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_get_entry_ptr_from_addr() */
-#endif /* NDEBUG */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5C_get_serialization_in_progress
- *
- * Purpose: Return the current value of
- * cache_ptr->serialization_in_progress.
- *
- * Return: Current value of cache_ptr->serialization_in_progress.
- *
- * Programmer: John Mainzer
- * 8/24/15
- *
- *-------------------------------------------------------------------------
- */
-#ifndef NDEBUG
-hbool_t
-H5C_get_serialization_in_progress(const H5C_t *cache_ptr)
-{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Sanity check */
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- FUNC_LEAVE_NOAPI(cache_ptr->serialization_in_progress)
-} /* H5C_get_serialization_in_progress() */
-#endif /* NDEBUG */
-
-
/*-------------------------------------------------------------------------
*
* Function: H5C_cache_is_clean()
@@ -1320,79 +986,12 @@ H5C_cache_is_clean(const H5C_t *cache_ptr, H5C_ring_t inner_ring)
while(ring <= inner_ring) {
if(cache_ptr->dirty_index_ring_size[ring] > 0)
- HGOTO_DONE(FALSE)
+ ret_value = FALSE;
ring++;
} /* end while */
-done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_cache_is_clean() */
#endif /* NDEBUG */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C_verify_entry_type()
- *
- * Purpose: Debugging function that attempts to look up an entry in the
- * cache by its file address, and if found, test to see if its
- * type field contains the expted value.
- *
- * If the specified entry is in cache, *in_cache_ptr is set
- * to TRUE, and *type_ok_ptr is set to TRUE or FALSE depending
- * on whether the entries type field matches the expected_type
- * parameter.
- *
- * If the target entry is not in cache, *in_cache_ptr is
- * set to FALSE, and *type_ok_ptr is undefined.
- *
- * Note that this function is only defined if NDEBUG
- * is not defined.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: John Mainzer, 5/30/14
- *
- *-------------------------------------------------------------------------
- */
-#ifndef NDEBUG
-herr_t
-H5C_verify_entry_type(H5C_t *cache_ptr, haddr_t addr,
- const H5C_class_t *expected_type, hbool_t *in_cache_ptr,
- hbool_t *type_ok_ptr)
-{
- H5C_cache_entry_t * entry_ptr = NULL;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Sanity checks */
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(H5F_addr_defined(addr));
- HDassert(expected_type);
- HDassert(in_cache_ptr);
- HDassert(type_ok_ptr);
-
- H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
-
- if(entry_ptr == NULL)
- /* the entry doesn't exist in the cache -- report this
- * and quit.
- */
- *in_cache_ptr = FALSE;
- else {
- *in_cache_ptr = TRUE;
-
- if(entry_ptr->prefetched)
- *type_ok_ptr = (expected_type->id == entry_ptr->prefetch_type_id);
- else
- *type_ok_ptr = (expected_type == entry_ptr->type);
- } /* end else */
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_verify_entry_type() */
-#endif /* NDEBUG */
-
diff --git a/src/H5Cepoch.c b/src/H5Cepoch.c
index 655d795..e576028 100644
--- a/src/H5Cepoch.c
+++ b/src/H5Cepoch.c
@@ -92,7 +92,8 @@ static herr_t H5C__epoch_marker_fsf_size(const void H5_ATTR_UNUSED * thing,
/*******************/
-const H5AC_class_t H5AC_EPOCH_MARKER[1] = {{
+const H5C_class_t H5C__epoch_marker_class =
+{
/* id = */ H5AC_EPOCH_MARKER_ID,
/* name = */ "epoch marker",
/* mem_type = */ H5FD_MEM_DEFAULT, /* value doesn't matter */
@@ -107,7 +108,7 @@ const H5AC_class_t H5AC_EPOCH_MARKER[1] = {{
/* notify = */ H5C__epoch_marker_notify,
/* free_icr = */ H5C__epoch_marker_free_icr,
/* fsf_size = */ H5C__epoch_marker_fsf_size,
-}};
+};
/***************************************************************************
diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c
index ebb98b3..ab94879 100644
--- a/src/H5Cmpio.c
+++ b/src/H5Cmpio.c
@@ -164,13 +164,38 @@ static herr_t H5C__collective_write(H5F_t *f, hid_t dxpl_id);
* Programmer: John Mainzer
* 3/17/10
*
+ * Changes: Ported code to detect next entry status changes as the
+ * the result of a flush from the serial code in the scan of
+ * the LRU. Also added code to detect and adapt to the
+ * removal from the cache of the next entry in the scan of
+ * the LRU.
+ *
+ * Note that at present, all of these changes should not
+ * be required as the operations on entries as they are
+ * flushed that can cause these condiditions are not premitted
+ * in the parallel case. However, Quincey indicates that
+ * this may change, and thus has requested the modification.
+ *
+ * Note the assert(FALSE) in the if statement whose body
+ * restarts the scan of the LRU. As the body of the if
+ * statement should be unreachable, it should never be
+ * triggered until the constraints on the parallel case
+ * are relaxed. Please remove the assertion at that time.
+ *
+ * Also added warning on the Pinned Entry List scan, as it
+ * is potentially subject to the same issue. As there is
+ * no cognate of this scan in the serial code, I don't have
+ * a fix to port to it.
+ *
+ * JRM -- 4/10/19
+ *
*-------------------------------------------------------------------------
*/
herr_t
H5C_apply_candidate_list(H5F_t * f,
hid_t dxpl_id,
H5C_t * cache_ptr,
- unsigned num_candidates,
+ int num_candidates,
haddr_t * candidates_list_ptr,
int mpi_rank,
int mpi_size)
@@ -180,19 +205,19 @@ H5C_apply_candidate_list(H5F_t * f,
int i;
int m;
int n;
- unsigned first_entry_to_flush;
- unsigned last_entry_to_flush;
- unsigned entries_to_clear = 0;
- unsigned entries_to_flush = 0;
- unsigned entries_to_flush_or_clear_last = 0;
- unsigned entries_to_flush_collectively = 0;
- unsigned entries_cleared = 0;
- unsigned entries_flushed = 0;
- unsigned entries_delayed = 0;
- unsigned entries_flushed_or_cleared_last = 0;
- unsigned entries_flushed_collectively = 0;
- unsigned entries_examined = 0;
- unsigned initial_list_len;
+ int first_entry_to_flush;
+ int last_entry_to_flush;
+ int entries_to_clear = 0;
+ int entries_to_flush = 0;
+ int entries_to_flush_or_clear_last = 0;
+ int entries_to_flush_collectively = 0;
+ int entries_cleared = 0;
+ int entries_flushed = 0;
+ int entries_delayed = 0;
+ int entries_flushed_or_cleared_last = 0;
+ int entries_flushed_collectively = 0;
+ int entries_examined = 0;
+ int initial_list_len;
int * candidate_assignment_table = NULL;
haddr_t addr;
H5C_cache_entry_t * clear_ptr = NULL;
@@ -206,30 +231,29 @@ H5C_apply_candidate_list(H5F_t * f,
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
char tbl_buf[1024];
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
- unsigned u; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- /* Sanity checks */
- HDassert(cache_ptr != NULL);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(num_candidates > 0);
- HDassert(num_candidates <= cache_ptr->slist_len);
- HDassert(candidates_list_ptr != NULL);
- HDassert(0 <= mpi_rank);
- HDassert(mpi_rank < mpi_size);
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert( num_candidates > 0 );
+ HDassert( num_candidates <= cache_ptr->slist_len );
+ HDassert( candidates_list_ptr != NULL );
+ HDassert( 0 <= mpi_rank );
+ HDassert( mpi_rank < mpi_size );
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- HDfprintf(stdout, "%s:%d: setting up candidate assignment table.\n", FUNC, mpi_rank);
-
- HDmemset(tbl_buf, 0, sizeof(tbl_buf));
-
+ HDfprintf(stdout, "%s:%d: setting up candidate assignment table.\n",
+ FUNC, mpi_rank);
+ for ( i = 0; i < 1024; i++ ) tbl_buf[i] = '\0';
sprintf(&(tbl_buf[0]), "candidate list = ");
- for(u = 0; u < num_candidates; u++)
- sprintf(&(tbl_buf[HDstrlen(tbl_buf)]), " 0x%llx", (long long)(*(candidates_list_ptr + u)));
+ for ( i = 0; i < num_candidates; i++ )
+ {
+ sprintf(&(tbl_buf[HDstrlen(tbl_buf)]), " 0x%llx",
+ (long long)(*(candidates_list_ptr + i)));
+ }
sprintf(&(tbl_buf[HDstrlen(tbl_buf)]), "\n");
-
HDfprintf(stdout, "%s", tbl_buf);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
@@ -245,6 +269,7 @@ H5C_apply_candidate_list(H5F_t * f,
n = num_candidates / mpi_size;
m = num_candidates % mpi_size;
HDassert(n >= 0);
+
if(NULL == (candidate_assignment_table = (int *)H5MM_malloc(sizeof(int) * (size_t)(mpi_size + 1))))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for candidate assignment table")
@@ -272,8 +297,9 @@ H5C_apply_candidate_list(H5F_t * f,
HDassert((candidate_assignment_table[mpi_size - 1] + n) == num_candidates);
#if H5C_DO_SANITY_CHECKS
- /* Verify that the candidate assignment table has the expected form */
- for(i = 1; i < mpi_size - 1; i++) {
+ /* verify that the candidate assignment table has the expected form */
+ for ( i = 1; i < mpi_size - 1; i++ )
+ {
int a, b;
a = candidate_assignment_table[i] - candidate_assignment_table[i - 1];
@@ -297,71 +323,73 @@ H5C_apply_candidate_list(H5F_t * f,
sprintf(&(tbl_buf[HDstrlen(tbl_buf)]), "\n");
HDfprintf(stdout, "%s", tbl_buf);
- HDfprintf(stdout, "%s:%d: flush entries [%u, %u].\n",
+ HDfprintf(stdout, "%s:%d: flush entries [%d, %d].\n",
FUNC, mpi_rank, first_entry_to_flush, last_entry_to_flush);
HDfprintf(stdout, "%s:%d: marking entries.\n", FUNC, mpi_rank);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
- for(u = 0; u < num_candidates; u++) {
- addr = candidates_list_ptr[u];
- HDassert(H5F_addr_defined(addr));
+ for(i = 0; i < num_candidates; i++) {
+ addr = candidates_list_ptr[i];
+ HDassert( H5F_addr_defined(addr) );
#if H5C_DO_SANITY_CHECKS
- if(u > 0) {
- if(last_addr == addr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "duplicate entry in cleaned list")
- else if(last_addr > addr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "candidate list not sorted")
- } /* end if */
+ if ( i > 0 ) {
+ if ( last_addr == addr ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Duplicate entry in cleaned list.\n")
+ } else if ( last_addr > addr ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "candidate list not sorted.\n")
+ }
+ }
last_addr = addr;
#endif /* H5C_DO_SANITY_CHECKS */
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
- if(entry_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "listed candidate entry not in cache?!?!?")
- if(!entry_ptr->is_dirty)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry not dirty?!?!?")
- if(entry_ptr->is_protected)
+ if(entry_ptr == NULL) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed candidate entry not in cache?!?!?.")
+ } else if(!entry_ptr->is_dirty) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry not dirty?!?!?.")
+ } else if ( entry_ptr->is_protected ) {
/* For now at least, we can't deal with protected entries.
* If we encounter one, scream and die. If it becomes an
* issue, we should be able to work around this.
*/
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry is protected?!?!?")
-
- /* Determine whether the entry is to be cleared or flushed,
- * and mark it accordingly. We will scan the protected and
- * pinned list shortly, and clear or flush according to these
- * markings.
- */
- if(u >= first_entry_to_flush && u <= last_entry_to_flush) {
- entries_to_flush++;
- entry_ptr->flush_immediately = TRUE;
- } /* end if */
- else {
- entries_to_clear++;
- entry_ptr->clear_on_unprotect = TRUE;
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry is protected?!?!?.")
+ } else {
+ /* determine whether the entry is to be cleared or flushed,
+ * and mark it accordingly. We will scan the protected and
+ * pinned list shortly, and clear or flush according to these
+ * markings.
+ */
+ if((i >= first_entry_to_flush) && (i <= last_entry_to_flush)) {
+ entries_to_flush++;
+ entry_ptr->flush_immediately = TRUE;
+ } /* end if */
+ else {
+ entries_to_clear++;
+ entry_ptr->clear_on_unprotect = TRUE;
+ } /* end else */
+
+ /* Entries marked as collectively accessed and are in the
+ candidate list to clear from the cache have to be
+ removed from the coll list. This is OK since the
+ candidate list is collective and uniform across all
+ ranks. */
+ if(TRUE == entry_ptr->coll_access) {
+ entry_ptr->coll_access = FALSE;
+ H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
+ } /* end if */
} /* end else */
-
- /* Entries marked as collectively accessed and are in the
- * candidate list to clear from the cache have to be
- * removed from the coll list. This is OK since the
- * candidate list is collective and uniform across all
- * ranks.
- */
- if(entry_ptr->coll_access) {
- entry_ptr->coll_access = FALSE;
- H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
- } /* end if */
} /* end for */
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- HDfprintf(stdout, "%s:%d: num candidates/to clear/to flush = %u/%u/%u.\n",
- FUNC, mpi_rank, num_candidates, entries_to_clear,
- entries_to_flush);
+ HDfprintf(stdout, "%s:%d: num candidates/to clear/to flush = %d/%d/%d.\n",
+ FUNC, mpi_rank, (int)num_candidates, (int)entries_to_clear,
+ (int)entries_to_flush);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
+
/* We have now marked all the entries on the candidate list for
* either flush or clear -- now scan the LRU and the pinned list
* for these entries and do the deed.
@@ -558,7 +586,7 @@ H5C_apply_candidate_list(H5F_t * f,
} /* end while */
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- HDfprintf(stdout, "%s:%d: entries examined/cleared/flushed = %u/%u/%u.\n",
+ HDfprintf(stdout, "%s:%d: entries examined/cleared/flushed = %d/%d/%d.\n",
FUNC, mpi_rank, entries_examined,
entries_cleared, entries_flushed);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
@@ -685,7 +713,7 @@ H5C_apply_candidate_list(H5F_t * f,
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
HDfprintf(stdout,
- "%s:%d: pel entries examined/cleared/flushed = %u/%u/%u.\n",
+ "%s:%d: pel entries examined/cleared/flushed = %d/%d/%d.\n",
FUNC, mpi_rank, entries_examined,
entries_cleared, entries_flushed);
HDfprintf(stdout, "%s:%d: done.\n", FUNC, mpi_rank);
@@ -730,7 +758,7 @@ H5C_apply_candidate_list(H5F_t * f,
/* Write collective list */
if(H5C__collective_write(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_WRITEERROR, FAIL, "can't write metadata collectively")
+ HGOTO_ERROR(H5E_CACHE, H5E_WRITEERROR, FAIL, "Can't write metadata collectively")
} /* end if */
/* ====================================================================== *
@@ -746,11 +774,12 @@ H5C_apply_candidate_list(H5F_t * f,
(entries_cleared != entries_to_clear) ||
(entries_flushed_or_cleared_last != entries_to_flush_or_clear_last) ||
(entries_flushed_collectively != entries_to_flush_collectively))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry count mismatch")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry count mismatch.")
done:
if(candidate_assignment_table != NULL)
candidate_assignment_table = (int *)H5MM_xfree((void *)candidate_assignment_table);
+
if(cache_ptr->coll_write_list) {
if(H5SL_close(cache_ptr->coll_write_list) < 0)
HDONE_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "failed to destroy skip list")
@@ -807,7 +836,7 @@ H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr)
if(space_needed > 0) { /* we have work to do */
H5C_cache_entry_t *entry_ptr;
- unsigned nominated_entries_count = 0;
+ int nominated_entries_count = 0;
size_t nominated_entries_size = 0;
haddr_t nominated_addr;
@@ -828,7 +857,7 @@ H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr)
nominated_addr = entry_ptr->addr;
if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed(1).")
nominated_entries_size += entry_ptr->size;
nominated_entries_count++;
@@ -852,7 +881,7 @@ H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr)
nominated_addr = entry_ptr->addr;
if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed(2).")
nominated_entries_size += entry_ptr->size;
nominated_entries_count++;
@@ -921,7 +950,7 @@ H5C_construct_candidate_list__min_clean(H5C_t * cache_ptr)
if(space_needed > 0) { /* we have work to do */
H5C_cache_entry_t *entry_ptr;
- unsigned nominated_entries_count = 0;
+ int nominated_entries_count = 0;
size_t nominated_entries_size = 0;
HDassert( cache_ptr->slist_len > 0 );
@@ -944,7 +973,7 @@ H5C_construct_candidate_list__min_clean(H5C_t * cache_ptr)
nominated_addr = entry_ptr->addr;
if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed.")
nominated_entries_size += entry_ptr->size;
nominated_entries_count++;
@@ -988,28 +1017,51 @@ done:
* Programmer: John Mainzer
* 7/5/05
*
+ * Changes: Tidied up code, removeing some old commented out
+ * code that had been left in pending success of the
+ * new version.
+ *
+ * Note that unlike H5C_apply_candidate_list(),
+ * H5C_mark_entries_as_clean() makes all its calls to
+ * H5C__flush_single_entry() with the
+ * H5C__FLUSH_CLEAR_ONLY_FLAG set. As a result,
+ * the pre_serialize() and serialize calls are not made.
+ *
+ * This then implies that (assuming such actions were
+ * permitted in the parallel case) no loads, dirties,
+ * resizes, or removals of other entries can occur as
+ * a side effect of the flush. Hence, there is no need
+ * for the checks for entry removal / status change
+ * that I ported to H5C_apply_candidate_list().
+ *
+ * However, if (in addition to allowing such operations
+ * in the parallel case), we allow such operations outside
+ * of the pre_serialize / serialize routines, this may
+ * cease to be the case -- requiring a review of this
+ * function.
+ *
*-------------------------------------------------------------------------
*/
herr_t
H5C_mark_entries_as_clean(H5F_t * f,
hid_t dxpl_id,
- unsigned ce_array_len,
+ int32_t ce_array_len,
haddr_t * ce_array_ptr)
{
H5C_t * cache_ptr;
- unsigned entries_cleared;
- unsigned entries_examined;
- unsigned initial_list_len;
+ int entries_cleared;
+ int entries_examined;
+ int i;
+ int initial_list_len;
haddr_t addr;
#if H5C_DO_SANITY_CHECKS
- unsigned pinned_entries_marked = 0;
- unsigned protected_entries_marked = 0;
- unsigned other_entries_marked = 0;
+ int pinned_entries_marked = 0;
+ int protected_entries_marked = 0;
+ int other_entries_marked = 0;
haddr_t last_addr;
#endif /* H5C_DO_SANITY_CHECKS */
H5C_cache_entry_t * clear_ptr = NULL;
H5C_cache_entry_t * entry_ptr = NULL;
- unsigned u;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1024,30 +1076,46 @@ H5C_mark_entries_as_clean(H5F_t * f,
HDassert( ce_array_ptr != NULL );
#if H5C_DO_EXTREME_SANITY_CHECKS
- if(H5C_validate_protected_entry_list(cache_ptr) < 0 ||
- H5C_validate_pinned_entry_list(cache_ptr) < 0 ||
- H5C_validate_lru_list(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "an extreme sanity check failed on entry.\n");
+ }
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
- for(u = 0; u < ce_array_len; u++) {
- addr = ce_array_ptr[u];
+ for ( i = 0; i < ce_array_len; i++ )
+ {
+ addr = ce_array_ptr[i];
#if H5C_DO_SANITY_CHECKS
- if(u == 0)
+ if ( i == 0 ) {
+
last_addr = addr;
- else {
- if(last_addr == addr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Duplicate entry in cleaned list")
- if(last_addr > addr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "cleaned list not sorted")
- } /* end else */
+
+ } else {
+
+ if ( last_addr == addr ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Duplicate entry in cleaned list.\n");
+
+ } else if ( last_addr > addr ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "cleaned list not sorted.\n");
+ }
+ }
#if H5C_DO_EXTREME_SANITY_CHECKS
- if(H5C_validate_protected_entry_list(cache_ptr) < 0
- || H5C_validate_pinned_entry_list(cache_ptr) < 0
- || H5C_validate_lru_list(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed in for loop")
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "an extreme sanity check failed in for loop.\n");
+ }
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
#endif /* H5C_DO_SANITY_CHECKS */
@@ -1055,24 +1123,28 @@ H5C_mark_entries_as_clean(H5F_t * f,
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
- if(entry_ptr == NULL) {
+ if ( entry_ptr == NULL ) {
#if H5C_DO_SANITY_CHECKS
HDfprintf(stdout,
- "H5C_mark_entries_as_clean: entry[%u] = %a not in cache.\n",
- u,
- addr);
+ "H5C_mark_entries_as_clean: entry[%d] = %ld not in cache.\n",
+ (int)i,
+ (long)addr);
#endif /* H5C_DO_SANITY_CHECKS */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry not in cache?!?!?")
- } /* end if */
- else if(!entry_ptr->is_dirty) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Listed entry not in cache?!?!?.")
+
+ } else if ( ! entry_ptr->is_dirty ) {
+
#if H5C_DO_SANITY_CHECKS
HDfprintf(stdout,
- "H5C_mark_entries_as_clean: entry %a is not dirty!?!\n",
- addr);
+ "H5C_mark_entries_as_clean: entry %ld is not dirty!?!\n",
+ (long)addr);
#endif /* H5C_DO_SANITY_CHECKS */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry not dirty?!?!?")
- } /* end else-if */
- else {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Listed entry not dirty?!?!?.")
+
+ } else {
+
/* Mark the entry to be cleared on unprotect. We will
* scan the LRU list shortly, and clear all those entries
* not currently protected.
@@ -1128,25 +1200,31 @@ H5C_mark_entries_as_clean(H5F_t * f,
* point.
* JRM -- 4/7/15
*/
+
entries_cleared = 0;
entries_examined = 0;
initial_list_len = cache_ptr->LRU_list_len;
entry_ptr = cache_ptr->LRU_tail_ptr;
- while(entry_ptr != NULL && entries_examined <= initial_list_len &&
- entries_cleared < ce_array_len) {
- if(entry_ptr->clear_on_unprotect) {
+
+ while ( ( entry_ptr != NULL ) &&
+ ( entries_examined <= initial_list_len ) &&
+ ( entries_cleared < ce_array_len ) )
+ {
+ if ( entry_ptr->clear_on_unprotect ) {
+
entry_ptr->clear_on_unprotect = FALSE;
clear_ptr = entry_ptr;
entry_ptr = entry_ptr->prev;
entries_cleared++;
if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear entry")
- } /* end if */
- else
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
+ } else {
+
entry_ptr = entry_ptr->prev;
+ }
entries_examined++;
- } /* end while */
+ }
#if H5C_DO_SANITY_CHECKS
HDassert( entries_cleared == other_entries_marked );
@@ -1155,20 +1233,25 @@ H5C_mark_entries_as_clean(H5F_t * f,
/* It is also possible that some of the cleared entries are on the
* pinned list. Must scan that also.
*/
+
entry_ptr = cache_ptr->pel_head_ptr;
- while(entry_ptr != NULL) {
- if(entry_ptr->clear_on_unprotect) {
+
+ while ( entry_ptr != NULL )
+ {
+ if ( entry_ptr->clear_on_unprotect ) {
+
entry_ptr->clear_on_unprotect = FALSE;
clear_ptr = entry_ptr;
entry_ptr = entry_ptr->next;
entries_cleared++;
- if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry")
- } /* end if */
- else
+ if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
+ } else {
+
entry_ptr = entry_ptr->next;
- } /* end while */
+ }
+ }
#if H5C_DO_SANITY_CHECKS
HDassert( entries_cleared == pinned_entries_marked + other_entries_marked );
@@ -1179,28 +1262,33 @@ H5C_mark_entries_as_clean(H5F_t * f,
( (ce_array_len - entries_cleared) <= cache_ptr->pl_len ) );
#if H5C_DO_SANITY_CHECKS
- u = 0;
+ i = 0;
entry_ptr = cache_ptr->pl_head_ptr;
while ( entry_ptr != NULL )
{
if ( entry_ptr->clear_on_unprotect ) {
- u++;
+ i++;
}
entry_ptr = entry_ptr->next;
}
- HDassert( (entries_cleared + u) == ce_array_len );
+ HDassert( (entries_cleared + i) == ce_array_len );
#endif /* H5C_DO_SANITY_CHECKS */
done:
+
#if H5C_DO_EXTREME_SANITY_CHECKS
- if(H5C_validate_protected_entry_list(cache_ptr) < 0
- || H5C_validate_pinned_entry_list(cache_ptr) < 0
- || H5C_validate_lru_list(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "an extreme sanity check failed on exit.\n");
+ }
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
+
} /* H5C_mark_entries_as_clean() */
@@ -1221,7 +1309,7 @@ done:
herr_t
H5C_clear_coll_entries(H5C_t *cache_ptr, hbool_t partial)
{
- uint32_t clear_cnt;
+ int32_t clear_cnt;
H5C_cache_entry_t * entry_ptr = NULL;
herr_t ret_value = SUCCEED;
@@ -1411,3 +1499,4 @@ done:
FUNC_LEAVE_NOAPI(ret_value);
} /* end H5C__collective_write() */
#endif /* H5_HAVE_PARALLEL */
+
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 5b923e9..6e37bca 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -209,6 +209,7 @@ if ( ( (entry_ptr) == NULL ) || \
( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
( (head_ptr) != (tail_ptr) ) \
) || \
+ ( (len) < 0 ) || \
( ( (len) == 1 ) && \
( ( (head_ptr) != (tail_ptr) ) || \
( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
@@ -374,6 +375,7 @@ if ( ( (entry_ptr) == NULL ) || \
( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
( (hd_ptr) != (tail_ptr) ) \
) || \
+ ( (len) < 0 ) || \
( ( (len) == 1 ) && \
( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \
@@ -481,7 +483,7 @@ if ( ( (hd_ptr) == NULL ) || \
) \
) \
) { \
- HDassert(0 && "il DLL pre remove SC failed"); \
+ HDassert(0 && "il DLL pre remove SC failed"); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "il DLL pre remove SC failed") \
}
@@ -492,6 +494,7 @@ if ( ( (entry_ptr) == NULL ) || \
( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
( (hd_ptr) != (tail_ptr) ) \
) || \
+ ( (len) < 0 ) || \
( ( (len) == 1 ) && \
( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \
@@ -503,7 +506,7 @@ if ( ( (entry_ptr) == NULL ) || \
) \
) \
) { \
- HDassert(0 && "IL DLL pre insert SC failed"); \
+ HDassert(0 && "IL DLL pre insert SC failed"); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "IL DLL pre insert SC failed") \
}
@@ -511,6 +514,7 @@ if ( ( (entry_ptr) == NULL ) || \
if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
( (head_ptr) != (tail_ptr) ) \
) || \
+ ( (len) < 0 ) || \
( ( (len) == 1 ) && \
( ( (head_ptr) != (tail_ptr) ) || \
( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
@@ -522,7 +526,7 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
) \
) \
) { \
- HDassert(0 && "IL DLL sanity check failed"); \
+ HDassert(0 && "IL DLL sanity check failed"); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "IL DLL sanity check failed") \
}
@@ -595,6 +599,23 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
* H5C__UPDATE_CACHE_HIT_RATE_STATS(), which is always active as
* the cache hit rate stats are always collected and available.
*
+ * Changes:
+ *
+ * JRM -- 3/21/06
+ * Added / updated macros for pinned entry related stats.
+ *
+ * JRM -- 8/9/06
+ * More pinned entry stats related updates.
+ *
+ * JRM -- 3/31/07
+ * Updated H5C__UPDATE_STATS_FOR_PROTECT() to keep stats on
+ * read and write protects.
+ *
+ * MAM -- 1/15/09
+ * Created H5C__UPDATE_MAX_INDEX_SIZE_STATS to contain
+ * common code within macros that update the maximum
+ * index, clean_index, and dirty_index statistics fields.
+ *
***********************************************************************/
#define H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) \
@@ -681,31 +702,6 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
#define H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr) \
((cache_ptr)->index_scan_restarts)++;
-#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) \
-{ \
- (cache_ptr)->images_created++; \
-}
-
-#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) \
-{ \
- /* make sure image len is still good */ \
- HDassert((cache_ptr)->image_len > 0); \
- (cache_ptr)->images_loaded++; \
- (cache_ptr)->last_image_size = (cache_ptr)->image_len; \
-}
-
-#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) \
-{ \
- (cache_ptr)->prefetches++; \
- if ( dirty ) \
- (cache_ptr)->dirty_prefetches++; \
-}
-
-#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) \
-{ \
- (cache_ptr)->prefetch_hits++; \
-}
-
#if H5C_COLLECT_CACHE_ENTRY_STATS
#define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) \
@@ -930,10 +926,6 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
#define H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
#define H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
#define H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr)
-#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr)
-#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr)
-#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty)
-#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr)
#endif /* H5C_COLLECT_CACHE_STATS */
@@ -1007,7 +999,8 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT insert SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
+ "Pre HT insert SC failed") \
}
#define H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
@@ -1029,7 +1022,8 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
( (cache_ptr)->index_size != (cache_ptr)->il_size) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT insert SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
+ "Post HT insert SC failed") \
}
#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
@@ -1070,7 +1064,7 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT remove SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Pre HT remove SC failed") \
}
#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \
@@ -1096,7 +1090,7 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT remove SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Post HT remove SC failed") \
}
/* (Keep in sync w/H5C_TEST__PRE_HT_SEARCH_SC macro in test/cache_common.h -QAK) */
@@ -1108,7 +1102,7 @@ if ( ( (cache_ptr) == NULL ) || \
( ! H5F_addr_defined(Addr) ) || \
( H5C__HASH_FCN(Addr) < 0 ) || \
( H5C__HASH_FCN(Addr) >= H5C__HASH_TABLE_LEN ) ) { \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT search SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "Pre HT search SC failed") \
}
/* (Keep in sync w/H5C_TEST__POST_SUC_HT_SEARCH_SC macro in test/cache_common.h -QAK) */
@@ -1130,7 +1124,8 @@ if ( ( (cache_ptr) == NULL ) || \
( (entry_ptr)->ht_prev->ht_next != (entry_ptr) ) ) || \
( ( (entry_ptr)->ht_next != NULL ) && \
( (entry_ptr)->ht_next->ht_prev != (entry_ptr) ) ) ) { \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post successful HT search SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
+ "Post successful HT search SC failed") \
}
/* (Keep in sync w/H5C_TEST__POST_HT_SHIFT_TO_FRONT macro in test/cache_common.h -QAK) */
@@ -1138,7 +1133,8 @@ if ( ( (cache_ptr) == NULL ) || \
if ( ( (cache_ptr) == NULL ) || \
( ((cache_ptr)->index)[k] != (entry_ptr) ) || \
( (entry_ptr)->ht_prev != NULL ) ) { \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT shift to front SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
+ "Post HT shift to front SC failed") \
}
#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
@@ -1173,7 +1169,8 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT entry size change SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Pre HT entry size change SC failed") \
}
#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
@@ -1203,7 +1200,8 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT entry size change SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Post HT entry size change SC failed") \
}
#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
@@ -1230,7 +1228,8 @@ if ( \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
(cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT update for entry clean SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Pre HT update for entry clean SC failed") \
}
#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
@@ -1257,7 +1256,8 @@ if ( \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
(cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT update for entry dirty SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Pre HT update for entry dirty SC failed") \
}
#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
@@ -1273,7 +1273,8 @@ if ( ( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
(cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT update for entry clean SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Post HT update for entry clean SC failed") \
}
#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
@@ -1289,7 +1290,8 @@ if ( ( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
(cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT update for entry dirty SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Post HT update for entry dirty SC failed") \
}
#else /* H5C_DO_SANITY_CHECKS */
@@ -1590,7 +1592,8 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->slist_size ); \
\
if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), "can't insert entry in skip list") \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
+ "Can't insert entry in skip list") \
\
(entry_ptr)->in_slist = TRUE; \
(cache_ptr)->slist_changed = TRUE; \
@@ -1625,7 +1628,8 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->slist_size ); \
\
if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), "can't insert entry in skip list") \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
+ "Can't insert entry in skip list") \
\
(entry_ptr)->in_slist = TRUE; \
(cache_ptr)->slist_changed = TRUE; \
@@ -1677,7 +1681,8 @@ if ( ( (cache_ptr)->index_size != \
\
if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
!= (entry_ptr) ) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "can't delete entry from skip list") \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \
+ "Can't delete entry from skip list.") \
\
HDassert( (cache_ptr)->slist_len > 0 ); \
if(!(during_flush)) \
@@ -1714,7 +1719,8 @@ if ( ( (cache_ptr)->index_size != \
\
if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
!= (entry_ptr) ) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "can't delete entry from skip list") \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \
+ "Can't delete entry from skip list.") \
\
HDassert( (cache_ptr)->slist_len > 0 ); \
if(!(during_flush)) \
@@ -2233,120 +2239,6 @@ if ( ( (cache_ptr)->index_size != \
/*-------------------------------------------------------------------------
*
- * Macro: H5C__UPDATE_RP_FOR_INSERT_APPEND
- *
- * Purpose: Update the replacement policy data structures for an
- * insertion of the specified cache entry.
- *
- * Unlike H5C__UPDATE_RP_FOR_INSERTION below, mark the
- * new entry as the LEAST recently used entry, not the
- * most recently used.
- *
- * For now at least, this macro should only be used in
- * the reconstruction of the metadata cache from a cache
- * image block.
- *
- * At present, we only support the modified LRU policy, so
- * this function deals with that case unconditionally. If
- * we ever support other replacement policies, the function
- * should switch on the current policy and act accordingly.
- *
- * Return: N/A
- *
- * Programmer: John Mainzer, 8/15/15
- *
- *-------------------------------------------------------------------------
- */
-
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
-
-#define H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, entry_ptr, fail_val) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_protected) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->size > 0 ); \
- \
- if ( (entry_ptr)->is_pinned ) { \
- \
- H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
- (cache_ptr)->pel_tail_ptr, \
- (cache_ptr)->pel_len, \
- (cache_ptr)->pel_size, (fail_val)) \
- \
- } else { \
- \
- /* modified LRU specific code */ \
- \
- /* insert the entry at the tail of the LRU list. */ \
- \
- H5C__DLL_APPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
- (cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, (fail_val)) \
- \
- /* insert the entry at the tail of the clean or dirty LRU list as \
- * appropriate. \
- */ \
- \
- if ( entry_ptr->is_dirty ) { \
- H5C__AUX_DLL_APPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
- (cache_ptr)->dLRU_tail_ptr, \
- (cache_ptr)->dLRU_list_len, \
- (cache_ptr)->dLRU_list_size, (fail_val)) \
- } else { \
- H5C__AUX_DLL_APPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
- (cache_ptr)->cLRU_tail_ptr, \
- (cache_ptr)->cLRU_list_len, \
- (cache_ptr)->cLRU_list_size, (fail_val)) \
- } \
- \
- /* End modified LRU specific code. */ \
- } \
-}
-
-#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
-#define H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, entry_ptr, fail_val) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_protected) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->size > 0 ); \
- \
- if ( (entry_ptr)->is_pinned ) { \
- \
- H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
- (cache_ptr)->pel_tail_ptr, \
- (cache_ptr)->pel_len, \
- (cache_ptr)->pel_size, (fail_val)) \
- \
- } else { \
- \
- /* modified LRU specific code */ \
- \
- /* insert the entry at the tail of the LRU list. */ \
- \
- H5C__DLL_APPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
- (cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, (fail_val)) \
- \
- /* End modified LRU specific code. */ \
- } \
-}
-
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
-
-/*-------------------------------------------------------------------------
- *
* Macro: H5C__UPDATE_RP_FOR_INSERTION
*
* Purpose: Update the replacement policy data structures for an
@@ -2545,6 +2437,7 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->pel_tail_ptr, \
(cache_ptr)->pel_len, \
(cache_ptr)->pel_size, (fail_val)) \
+ HDassert( (cache_ptr)->pel_len >= 0 ); \
\
} else { \
\
@@ -2607,6 +2500,7 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->pel_tail_ptr, \
(cache_ptr)->pel_len, \
(cache_ptr)->pel_size, (fail_val)) \
+ HDassert( (cache_ptr)->pel_len >= 0 ); \
\
} else { \
\
@@ -2950,40 +2844,41 @@ if ( ( (cache_ptr)->index_size != \
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
(cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \
(cache_ptr)->pel_size, (fail_val)) \
+ HDassert( (cache_ptr)->pel_len >= 0 ); \
\
- /* modified LRU specific code */ \
+ /* modified LRU specific code */ \
\
- /* insert the entry at the head of the LRU list. */ \
+ /* insert the entry at the head of the LRU list. */ \
\
- H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
- (cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, (fail_val)) \
+ H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
\
- /* Similarly, insert the entry at the head of either the clean \
- * or dirty LRU list as appropriate. \
- */ \
+ /* Similarly, insert the entry at the head of either the clean \
+ * or dirty LRU list as appropriate. \
+ */ \
\
- if ( (entry_ptr)->is_dirty ) { \
+ if ( (entry_ptr)->is_dirty ) { \
\
- H5C__AUX_DLL_PREPEND((entry_ptr), \
- (cache_ptr)->dLRU_head_ptr, \
- (cache_ptr)->dLRU_tail_ptr, \
- (cache_ptr)->dLRU_list_len, \
- (cache_ptr)->dLRU_list_size, \
- (fail_val)) \
+ H5C__AUX_DLL_PREPEND((entry_ptr), \
+ (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, \
+ (fail_val)) \
\
- } else { \
+ } else { \
\
- H5C__AUX_DLL_PREPEND((entry_ptr), \
- (cache_ptr)->cLRU_head_ptr, \
- (cache_ptr)->cLRU_tail_ptr, \
- (cache_ptr)->cLRU_list_len, \
- (cache_ptr)->cLRU_list_size, \
- (fail_val)) \
- } \
+ H5C__AUX_DLL_PREPEND((entry_ptr), \
+ (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, \
+ (fail_val)) \
+ } \
\
- /* End modified LRU specific code. */ \
+ /* End modified LRU specific code. */ \
\
} /* H5C__UPDATE_RP_FOR_UNPIN */
@@ -3006,6 +2901,7 @@ if ( ( (cache_ptr)->index_size != \
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
(cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \
(cache_ptr)->pel_size, (fail_val)) \
+ HDassert( (cache_ptr)->pel_len >= 0 ); \
\
/* modified LRU specific code */ \
\
@@ -3178,22 +3074,22 @@ if ( ( (hd_ptr) == NULL ) || \
( (len) <= 0 ) || \
( (Size) < (entry_ptr)->size ) || \
( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \
- ( ( (entry_ptr)->coll_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \
+ ( ( (entry_ptr)->coll_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \
( ( (entry_ptr)->coll_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \
( ( (len) == 1 ) && \
( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \
- ( (entry_ptr)->coll_next == NULL ) && \
- ( (entry_ptr)->coll_prev == NULL ) && \
+ ( (entry_ptr)->coll_next == NULL ) && \
+ ( (entry_ptr)->coll_prev == NULL ) && \
( (Size) == (entry_ptr)->size ) \
) \
) \
) \
) { \
- HDassert(0 && "coll DLL pre remove SC failed"); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "coll DLL pre remove SC failed") \
+ HDassert(0 && "coll DLL pre remove SC failed"); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "coll DLL pre remove SC failed") \
}
-#define H5C__COLL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \
+#define H5C__COLL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \
if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
( (head_ptr) != (tail_ptr) ) \
) || \
@@ -3205,35 +3101,36 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
) \
) || \
( ( (len) >= 1 ) && \
- ( ( (head_ptr) == NULL ) || ( (head_ptr)->coll_prev != NULL ) || \
- ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \
+ ( ( (head_ptr) == NULL ) || ( (head_ptr)->coll_prev != NULL ) || \
+ ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \
) \
) \
) { \
- HDassert(0 && "COLL DLL sanity check failed"); \
+ HDassert(0 && "COLL DLL sanity check failed"); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "COLL DLL sanity check failed") \
}
#define H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \
if ( ( (entry_ptr) == NULL ) || \
- ( (entry_ptr)->coll_next != NULL ) || \
- ( (entry_ptr)->coll_prev != NULL ) || \
+ ( (entry_ptr)->coll_next != NULL ) || \
+ ( (entry_ptr)->coll_prev != NULL ) || \
( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
( (hd_ptr) != (tail_ptr) ) \
) || \
+ ( (len) < 0 ) || \
( ( (len) == 1 ) && \
( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \
) \
) || \
( ( (len) >= 1 ) && \
- ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->coll_prev != NULL ) || \
- ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \
+ ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->coll_prev != NULL ) || \
+ ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \
) \
) \
) { \
- HDassert(0 && "COLL DLL pre insert SC failed"); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "COLL DLL pre insert SC failed") \
+ HDassert(0 && "COLL DLL pre insert SC failed"); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "COLL DLL pre insert SC failed") \
}
#else /* H5C_DO_SANITY_CHECKS */
@@ -3536,8 +3433,10 @@ typedef struct H5C_tag_info_t {
* types are stored in the type_name_table discussed below, and
* indexed by the ids.
*
- * class_table_ptr: Pointer to an array of H5C_class_t of length
- * max_type_id + 1. Entry classes for the cache.
+ * type_name_table_ptr: Pointer to an array of pointer to char of length
+ * max_type_id + 1. The strings pointed to by the entries
+ * in the array are the names of the entry types associated
+ * with the indexing type IDs.
*
* max_cache_size: Nominal maximum number of bytes that may be stored in the
* cache. This value should be viewed as a soft limit, as the
@@ -4087,22 +3986,7 @@ typedef struct H5C_tag_info_t {
*
* size_decreased: Boolean flag set to TRUE whenever the maximum cache
* size is decreased. The flag triggers a call to
- * H5C__make_space_in_cache() on the next call to H5C_protect().
- *
- * resize_in_progress: As the metadata cache has become re-entrant, it is
- * possible that a protect may trigger a call to
- * H5C__auto_adjust_cache_size(), which may trigger a flush,
- * which may trigger a protect, which will result in another
- * call to H5C__auto_adjust_cache_size().
- *
- * The resize_in_progress boolean flag is used to detect this,
- * and to prevent the infinite recursion that would otherwise
- * occur.
- *
- * Note that this issue is not hypothetical -- this field
- * was added 12/29/15 to fix a bug exposed in the testing
- * of changes to the file driver info superblock extension
- * management code needed to support rings.
+ * H5C_make_space_in_cache() on the next call to H5C_protect().
*
* resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration
* data for automatic cache resizing.
@@ -4179,77 +4063,6 @@ typedef struct H5C_tag_info_t {
* this field will be reset every automatic resize epoch.
*
*
- * Metadata cache image management related fields.
- *
- * image_ctl: Instance of H5C_cache_image_ctl_t containing configuration
- * data for generation of a cache image on file close.
- *
- * serialization_in_progress: Boolean field that is set to TRUE iff
- * the cache is in the process of being serialized. This
- * field is needed to support the H5C_serialization_in_progress()
- * call, which is in turn required for sanity checks in some
- * cache clients.
- *
- * load_image: Boolean flag indicating that the metadata cache image
- * superblock extension message exists and should be
- * read, and the image block read and decoded on the next
- * call to H5C_protect().
- *
- * image_loaded: Boolean flag indicating that the metadata cache has
- * loaded the metadata cache image as directed by the
- * MDC cache image superblock extension message.
- *
- * delete_image: Boolean flag indicating whether the metadata cache image
- * superblock message should be deleted and the cache image
- * file space freed after they have been read and decoded.
- *
- * This flag should be set to TRUE iff the file is opened
- * R/W and there is a cache image to be read.
- *
- * image_addr: haddr_t containing the base address of the on disk
- * metadata cache image, or HADDR_UNDEF if that value is
- * undefined. Note that this field is used both in the
- * construction and write, and the read and decode of
- * metadata cache image blocks.
- *
- * image_len: hsize_t containing the size of the on disk metadata cache
- * image, or zero if that value is undefined. Note that this
- * field is used both in the construction and write, and the
- * read and decode of metadata cache image blocks.
- *
- * image_data_len: size_t containing the number of bytes of data in the
- * on disk metadata cache image, or zero if that value is
- * undefined.
- *
- * In most cases, this value is the same as the image_len
- * above. It exists to allow for metadata cache image blocks
- * that are larger than the actual image. Thus in all
- * cases image_data_len <= image_len.
- *
- * To create the metadata cache image, we must first serialize all the
- * entries in the metadata cache. This is done by a scan of the index.
- * As entries must be serialized in increasing flush dependency height
- * order, we scan the index repeatedly, once for each flush dependency
- * height in increasing order.
- *
- * This operation is complicated by the fact that entries other the the
- * target may be inserted, loaded, relocated, or removed from the cache
- * (either by eviction or the take ownership flag) as the result of a
- * pre_serialize or serialize callback. While entry removals are not
- * a problem for the scan of the index, insertions, loads, and relocations
- * are. Hence the entries loaded, inserted, and relocated counters
- * listed below have been implemented to allow these conditions to be
- * detected and dealt with by restarting the scan.
- *
- * The serialization operation is further complicated by the fact that
- * the flush dependency height of a given entry may increase (as the
- * result of an entry load or insert) or decrease (as the result of an
- * entry removal -- via either eviction or the take ownership flag). The
- * entry_fd_height_change_counter field is maintained to allow detection
- * of this condition, and a restart of the scan when it occurs.
- *
- * Note that all these new fields would work just as well as booleans.
- *
* entries_loaded_counter: Number of entries loaded into the cache
* since the last time this field was reset.
*
@@ -4259,29 +4072,6 @@ typedef struct H5C_tag_info_t {
* entries relocated_counter: Number of entries whose base address has
* been changed since the last time this field was reset.
*
- * entry_fd_height_change_counter: Number of entries whose flush dependency
- * height has changed since the last time this field was reset.
- *
- * The following fields are used assemble the cache image prior to
- * writing it to disk.
- *
- * num_entries_in_image: Unsigned integer field containing the number of entries
- * to be copied into the metadata cache image. Note that
- * this value will be less than the number of entries in
- * the cache, and the superblock and its related entries
- * are not written to the metadata cache image.
- *
- * image_entries: Pointer to a dynamically allocated array of instance of
- * H5C_image_entry_t of length num_entries_in_image, or NULL
- * if that array does not exist. This array is used to
- * assemble entry data to be included in the image, and to
- * sort them by flush dependency height and LRU rank.
- *
- * image_buffer: Pointer to the dynamically allocated buffer of length
- * image_len in which the metadata cache image is assembled,
- * or NULL if that buffer does not exist.
- *
- *
* Free Space Manager Related fields:
*
* The free space managers must be informed when we are about to close
@@ -4290,10 +4080,10 @@ typedef struct H5C_tag_info_t {
* page buffering, this is no longer viable, as we must finalize the on
* disk image of all metadata much sooner.
*
- * This is handled by the H5MF_settle_raw_data_fsm() and
- * H5MF_settle_meta_data_FSM() routines. As these calls are expensive,
+ * This is handled by the H5FS_settle_raw_data_fsm() and
+ * H5FS_settle_meta_data_fsm() routines. As these calls are expensive,
* the following fields are used to track whether the target free space
- * managers are clean.
+ * managers are clean.
*
* They are also used in sanity checking, as once a free space manager is
* settled, it should not become unsettled (i.e. be asked to allocate or
@@ -4310,7 +4100,7 @@ typedef struct H5C_tag_info_t {
* free space manager metadata.
*
* mdfsm_settled: Boolean flag indicating whether the meta data free space
- * manager is settled -- i.e. whether the correct space has
+ * manager is settled -- i.e. whether the correct space has
* been allocated for it in the file.
*
* Note that the name of this field is deceptive. In the
@@ -4495,63 +4285,23 @@ typedef struct H5C_tag_info_t {
* max_pel_size: Largest value attained by the pel_size field in the
* current epoch.
*
- * calls_to_msic: Total number of calls to H5C__make_space_in_cache
+ * calls_to_msic: Total number of calls to H5C_make_space_in_cache
*
* total_entries_skipped_in_msic: Number of clean entries skipped while
- * enforcing the min_clean_fraction in H5C__make_space_in_cache().
+ * enforcing the min_clean_fraction in H5C_make_space_in_cache().
*
* total_entries_scanned_in_msic: Number of clean entries skipped while
- * enforcing the min_clean_fraction in H5C__make_space_in_cache().
+ * enforcing the min_clean_fraction in H5C_make_space_in_cache().
*
* max_entries_skipped_in_msic: Maximum number of clean entries skipped
- * in any one call to H5C__make_space_in_cache().
+ * in any one call to H5C_make_space_in_cache().
*
* max_entries_scanned_in_msic: Maximum number of entries scanned over
- * in any one call to H5C__make_space_in_cache().
+ * in any one call to H5C_make_space_in_cache().
*
* entries_scanned_to_make_space: Number of entries scanned only when looking
* for entries to evict in order to make space in cache.
*
- *
- * The following fields track statistics on cache images.
- *
- * images_created: Integer field containing the number of cache images
- * created since the last time statistics were reset.
- *
- * At present, this field must always be either 0 or 1.
- * Further, since cache images are only created at file
- * close, this field should only be set at that time.
- *
- * images_loaded: Integer field containing the number of cache images
- * loaded since the last time statistics were reset.
- *
- * At present, this field must always be either 0 or 1.
- * Further, since cache images are only loaded at the
- * time of the first protect or on file close, this value
- * should only change on those events.
- *
- * last_image_size: Size of the most recently loaded metadata cache image
- * loaded into the cache, or zero if no image has been
- * loaded.
- *
- * At present, at most one cache image can be loaded into
- * the metadata cache for any given file, and this image
- * will be loaded either on the first protect, or on file
- * close if no entry is protected before then.
- *
- *
- * Fields for tracking prefetched entries. Note that flushes and evictions
- * of prefetched entries are tracked in the flushes and evictions arrays
- * discused above.
- *
- * prefetches: Number of prefetched entries that are loaded to the
- * cache.
- *
- * dirty_prefetches: Number of dirty prefetched entries that are loaded
- * into the cache.
- *
- * prefetch_hits: Number of prefetched entries that are actually used.
- *
*
* As entries are now capable of moving, loading, dirtying, and deleting
* other entries in their pre_serialize and serialize callbacks, it has
@@ -4622,11 +4372,6 @@ typedef struct H5C_tag_info_t {
* field is intended to allow marking of output of with
* the processes mpi rank.
*
- * get_entry_ptr_from_addr_counter: Counter used to track the number of
- * times the H5C_get_entry_ptr_from_addr() function has been
- * called successfully. This field is only defined when
- * NDEBUG is not #defined.
- *
****************************************************************************/
struct H5C_t {
uint32_t magic;
@@ -4637,7 +4382,7 @@ struct H5C_t {
FILE * log_file_ptr;
void * aux_ptr;
int32_t max_type_id;
- const H5C_class_t * const *class_table_ptr;
+ const char * (* type_name_table_ptr);
size_t max_cache_size;
size_t min_clean_size;
H5C_write_permitted_func_t check_write_permitted;
@@ -4647,16 +4392,16 @@ struct H5C_t {
hbool_t close_warning_received;
/* Fields for maintaining [hash table] index of entries */
- uint32_t index_len;
+ int32_t index_len;
size_t index_size;
- uint32_t index_ring_len[H5C_RING_NTYPES];
+ int32_t index_ring_len[H5C_RING_NTYPES];
size_t index_ring_size[H5C_RING_NTYPES];
size_t clean_index_size;
size_t clean_index_ring_size[H5C_RING_NTYPES];
size_t dirty_index_size;
size_t dirty_index_ring_size[H5C_RING_NTYPES];
H5C_cache_entry_t * index[H5C__HASH_TABLE_LEN];
- uint32_t il_len;
+ int32_t il_len;
size_t il_size;
H5C_cache_entry_t * il_head;
H5C_cache_entry_t * il_tail;
@@ -4668,15 +4413,15 @@ struct H5C_t {
/* Fields for maintaining list of in-order entries, for flushing */
hbool_t slist_changed;
- uint32_t slist_len;
+ int32_t slist_len;
size_t slist_size;
- uint32_t slist_ring_len[H5C_RING_NTYPES];
+ int32_t slist_ring_len[H5C_RING_NTYPES];
size_t slist_ring_size[H5C_RING_NTYPES];
H5SL_t * slist_ptr;
- uint32_t num_last_entries;
+ int32_t num_last_entries;
#if H5C_DO_SANITY_CHECKS
- int32_t slist_len_increase;
- ssize_t slist_size_increase;
+ int64_t slist_len_increase;
+ int64_t slist_size_increase;
#endif /* H5C_DO_SANITY_CHECKS */
/* Fields for maintaining list of tagged entries */
@@ -4684,38 +4429,38 @@ struct H5C_t {
hbool_t ignore_tags;
/* Fields for tracking protected entries */
- uint32_t pl_len;
+ int32_t pl_len;
size_t pl_size;
H5C_cache_entry_t * pl_head_ptr;
H5C_cache_entry_t * pl_tail_ptr;
/* Fields for tracking pinned entries */
- uint32_t pel_len;
+ int32_t pel_len;
size_t pel_size;
H5C_cache_entry_t * pel_head_ptr;
H5C_cache_entry_t * pel_tail_ptr;
/* Fields for complete LRU list of entries */
- uint32_t LRU_list_len;
+ int32_t LRU_list_len;
size_t LRU_list_size;
H5C_cache_entry_t * LRU_head_ptr;
H5C_cache_entry_t * LRU_tail_ptr;
/* Fields for clean LRU list of entries */
- uint32_t cLRU_list_len;
+ int32_t cLRU_list_len;
size_t cLRU_list_size;
H5C_cache_entry_t * cLRU_head_ptr;
H5C_cache_entry_t * cLRU_tail_ptr;
/* Fields for dirty LRU list of entries */
- uint32_t dLRU_list_len;
+ int32_t dLRU_list_len;
size_t dLRU_list_size;
H5C_cache_entry_t * dLRU_head_ptr;
H5C_cache_entry_t * dLRU_tail_ptr;
#ifdef H5_HAVE_PARALLEL
/* Fields for collective metadata reads */
- uint32_t coll_list_len;
+ int32_t coll_list_len;
size_t coll_list_size;
H5C_cache_entry_t * coll_head_ptr;
H5C_cache_entry_t * coll_tail_ptr;
@@ -4732,7 +4477,6 @@ struct H5C_t {
hbool_t resize_enabled;
hbool_t cache_full;
hbool_t size_decreased;
- hbool_t resize_in_progress;
H5C_auto_size_ctl_t resize_ctl;
/* Fields for epoch markers used in automatic cache size adjustment */
@@ -4748,23 +4492,9 @@ struct H5C_t {
int64_t cache_hits;
int64_t cache_accesses;
- /* fields supporting generation of a cache image on file close */
- H5C_cache_image_ctl_t image_ctl;
- hbool_t serialization_in_progress;
- hbool_t load_image;
- hbool_t image_loaded;
- hbool_t delete_image;
- haddr_t image_addr;
- hsize_t image_len;
- hsize_t image_data_len;
int64_t entries_loaded_counter;
int64_t entries_inserted_counter;
int64_t entries_relocated_counter;
- int64_t entry_fd_height_change_counter;
- uint32_t num_entries_in_image;
- H5C_image_entry_t * image_entries;
- void * image_buffer;
-
/* Free Space Manager Related fields */
hbool_t rdfsm_settled;
hbool_t mdfsm_settled;
@@ -4802,21 +4532,21 @@ struct H5C_t {
int64_t total_successful_ht_search_depth;
int64_t failed_ht_searches;
int64_t total_failed_ht_search_depth;
- uint32_t max_index_len;
+ int32_t max_index_len;
size_t max_index_size;
size_t max_clean_index_size;
size_t max_dirty_index_size;
/* Fields for in-order skip list */
- uint32_t max_slist_len;
+ int32_t max_slist_len;
size_t max_slist_size;
/* Fields for protected entry list */
- uint32_t max_pl_len;
+ int32_t max_pl_len;
size_t max_pl_size;
/* Fields for pinned entry list */
- uint32_t max_pel_len;
+ int32_t max_pel_len;
size_t max_pel_size;
/* Fields for tracking 'make space in cache' (msic) operations */
@@ -4832,16 +4562,6 @@ struct H5C_t {
int64_t LRU_scan_restarts;
int64_t index_scan_restarts;
- /* Fields for tracking cache image operations */
- int32_t images_created;
- int32_t images_loaded;
- hsize_t last_image_size;
-
- /* Fields for tracking prefetched entries */
- int64_t prefetches;
- int64_t dirty_prefetches;
- int64_t prefetch_hits;
-
#if H5C_COLLECT_CACHE_ENTRY_STATS
int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
@@ -4853,10 +4573,6 @@ struct H5C_t {
#endif /* H5C_COLLECT_CACHE_STATS */
char prefix[H5C__PREFIX_LEN];
-
-#ifndef NDEBUG
- int64_t get_entry_ptr_from_addr_counter;
-#endif /* NDEBUG */
};
/* Define typedef for tagged cache entry iteration callbacks */
@@ -4867,28 +4583,20 @@ typedef int (*H5C_tag_iter_cb_t)(H5C_cache_entry_t *entry, void *ctx);
/* Package Private Variables */
/*****************************/
+/* Metadata cache epoch class */
+H5_DLLVAR const H5C_class_t H5C__epoch_marker_class;
+
/******************************/
/* Package Private Prototypes */
/******************************/
-H5_DLL herr_t H5C__prep_image_for_file_close(H5F_t *f, hid_t dxpl_id);
-H5_DLL herr_t H5C__deserialize_prefetched_entry(H5F_t *f, hid_t dxpl_id,
- H5C_t * cache_ptr, H5C_cache_entry_t** entry_ptr_ptr,
- const H5C_class_t * type, haddr_t addr, void * udata);
/* General routines */
H5_DLL herr_t H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id,
H5C_cache_entry_t *entry_ptr, unsigned flags);
-H5_DLL herr_t H5C__generate_cache_image(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr);
-H5_DLL herr_t H5C__load_cache_image(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5C__mark_flush_dep_serialized(H5C_cache_entry_t * entry_ptr);
H5_DLL herr_t H5C__mark_flush_dep_unserialized(H5C_cache_entry_t * entry_ptr);
-H5_DLL herr_t H5C__make_space_in_cache(H5F_t * f, hid_t dxpl_id,
- size_t space_needed, hbool_t write_permitted);
H5_DLL herr_t H5C__flush_marked_entries(H5F_t * f, hid_t dxpl_id);
-H5_DLL herr_t H5C__generate_image(H5F_t *f, H5C_t *cache_ptr,
- H5C_cache_entry_t *entry_ptr, hid_t dxpl_id);
-H5_DLL herr_t H5C__serialize_cache(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_global,
H5C_tag_iter_cb_t cb, void *cb_ctx);
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index 28eacf2..654ce35 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -114,7 +114,6 @@
/* Cache configuration versions */
#define H5C__CURR_AUTO_SIZE_CTL_VER 1
#define H5C__CURR_AUTO_RESIZE_RPT_FCN_VER 1
-#define H5C__CURR_CACHE_IMAGE_CTL_VER 1
/* Default configuration settings */
#define H5C__DEF_AR_UPPER_THRESHHOLD 0.9999f
@@ -995,16 +994,16 @@ typedef int H5C_ring_t;
* just before the entry is freed.
*
* This is necessary, as the LRU list can be changed out
- * from under H5C__make_space_in_cache() by the serialize
+ * from under H5C_make_space_in_cache() by the serialize
* callback which may change the size of an existing entry,
* and/or load a new entry while serializing the target entry.
*
* This in turn can cause a recursive call to
- * H5C__make_space_in_cache() which may either flush or evict
+ * H5C_make_space_in_cache() which may either flush or evict
* the next entry that the first invocation of that function
* was about to examine.
*
- * The magic field allows H5C__make_space_in_cache() to
+ * The magic field allows H5C_make_space_in_cache() to
* detect this case, and re-start its scan from the bottom
* of the LRU when this situation occurs.
*
@@ -1339,187 +1338,6 @@ typedef int H5C_ring_t;
* In either case, when there is no previous item, it should
* be NULL.
*
- * Fields supporting the cache image feature:
- *
- * The following fields are used to store data about the entry which must
- * be stored in the cache image block, but which will typically be either
- * lost or heavily altered in the process of serializing the cache and
- * preparing its contents to be copied into the cache image block.
- *
- * Some fields are also used in loading the contents of the metadata cache
- * image back into the cache, and in managing such entries until they are
- * either protected by the library (at which point they become regular
- * entries) or are evicted. See discussion of the prefetched field for
- * further details.
- *
- * include_in_image: Boolean flag indicating whether this entry should
- * be included in the metadata cache image. This field should
- * always be false prior to the H5C_prep_for_file_close() call.
- * During that call, it should be set to TRUE for all entries
- * that are to be included in the metadata cache image. At
- * present, only the superblock, the superblock extension
- * object header and its chunks (if any) are omitted from
- * the image.
- *
- * lru_rank: Rank of the entry in the LRU just prior to file close.
- *
- * Note that the first entry on the LRU has lru_rank 1,
- * and that entries not on the LRU at that time will have
- * either lru_rank -1 (if pinned) or 0 (if loaded during
- * the process of flushing the cache.
- *
- * image_dirty: Boolean flag indicating whether the entry should be marked
- * as dirty in the metadata cache image. The flag is set to
- * TRUE iff the entry is dirty when H5C_prep_for_file_close()
- * is called.
- *
- * fd_parent_count: If the entry is a child in one or more flush dependency
- * relationships, this field contains the number of flush
- * dependency parents.
- *
- * In all other cases, the field is set to zero.
- *
- * Note that while this count is initially taken from the
- * flush dependency fields above, if the entry is in the
- * cache image (i.e. include_in_image is TRUE), any parents
- * that are not in the image are removed from this count and
- * from the fd_parent_addrs array below.
- *
- * Finally observe that if the entry is dirty and in the
- * cache image, and its parent is dirty and not in the cache
- * image, then the entry must be removed from the cache image
- * to avoid violating the flush dependency flush ordering.
- *
- * fd_parent_addrs: If the entry is a child in one or more flush dependency
- * relationship when H5C_prep_for_file_close() is called, this
- * field must contain a pointer to an array of size
- * fd_parent_count containing the on disk addresses of the
- * parent.
- *
- * In all other cases, the field is set to NULL.
- *
- * Note that while this list of addresses is initially taken
- * from the flush dependency fields above, if the entry is in the
- * cache image (i.e. include_in_image is TRUE), any parents
- * that are not in the image are removed from this list, and
- * and from the fd_parent_count above.
- *
- * Finally observe that if the entry is dirty and in the
- * cache image, and its parent is dirty and not in the cache
- * image, then the entry must be removed from the cache image
- * to avoid violating the flush dependency flush ordering.
- *
- * fd_child_count: If the entry is a parent in a flush dependency
- * relationship, this field contains the number of flush
- * dependency children.
- *
- * In all other cases, the field is set to zero.
- *
- * Note that while this count is initially taken from the
- * flush dependency fields above, if the entry is in the
- * cache image (i.e. include_in_image is TRUE), any children
- * that are not in the image are removed from this count.
- *
- * fd_dirty_child_count: If the entry is a parent in a flush dependency
- * relationship, this field contains the number of dirty flush
- * dependency children.
- *
- * In all other cases, the field is set to zero.
- *
- * Note that while this count is initially taken from the
- * flush dependency fields above, if the entry is in the
- * cache image (i.e. include_in_image is TRUE), any dirty
- * children that are not in the image are removed from this
- * count.
- *
- * image_fd_height: Flush dependency height of the entry in the cache image.
- *
- * The flush dependency height of any entry involved in a
- * flush dependency relationship is defined to be the
- * longest flush dependency path from that entry to an entry
- * with no flush depenency children.
- *
- * Since the image_fd_height is used to order entries in the
- * cache image so that fd parents preceed fd children, for
- * purposes of this field, and entry is at flush dependency
- * level 0 if it either has no children, or if all of its
- * children are not in the cache image.
- *
- * Note that if a child in a flush dependency relationship is
- * dirty and in the cache image, and its parent is dirty and
- * not in the cache image, then the child must be excluded
- * from the cache image to maintain flush ordering.
- *
- * prefetched: Boolean flag indicating that the on disk image of the entry
- * has been loaded into the cache prior any request for the
- * entry by the rest of the library.
- *
- * As of this writing (8/10/15), this can only happen through
- * the load of a cache image block, although other scenarios
- * are contemplated for the use of this feature. Note that
- * unlike the usual prefetch situation, this means that a
- * pre fetched entry can be dirty, and/or can be a party to
- * flush dependency relationship(s). This complicates matters
- * somewhat.
- *
- * The essential feature of a pre-fetched entry is that it
- * consists only of a buffer containing the on disk image of
- * the entry. Thus it must be deserialized before it can
- * be passed back to the library on a protect call. This
- * task is handled by H5C_deserialized_prefetched_entry().
- * In essence, this routine calls the deserialize callback
- * provided in the protect call with the on disk image,
- * deletes the prefetched entry from the cache, and replaces
- * it with the deserialized entry returned by the deserialize
- * callback.
- *
- * Further, if the prefetched entry is a flush dependency parent,
- * all its flush dependency children (which must also be
- * pre-fetched entries), must be tranfered to the new cache
- * entry returned by the deserailization callback.
- *
- * Finally, if the prefetched entry is a flush dependency child,
- * this flush dependency must be destroyed prior to the
- * deserialize call.
- *
- * In addition to the above special processing on the first
- * protect call on a prefetched entry (after which is no longer
- * a prefetched entry), prefetched entries also require special
- * tretment on flush and evict.
- *
- * On flush, a dirty prefetched entry must simply be written
- * to disk and marked clean without any call to any client
- * callback.
- *
- * On eviction, if a prefetched entry is a flush dependency
- * child, that flush dependency relationship must be destroyed
- * just prior to the eviction. If the flush dependency code
- * is working properly, it should be impossible for any entry
- * that is a flush dependency parent to be evicted.
- *
- * prefetch_type_id: Integer field containing the type ID of the prefetched
- * entry. This ID must match the ID of the type provided in any
- * protect call on the prefetched entry.
- *
- * The value of this field is undefined in prefetched is FALSE.
- *
- * age: Number of times a prefetched entry has appeared in
- * subsequent cache images. The field exists to allow
- * imposition of a limit on how many times a prefetched
- * entry can appear in subsequent cache images without being
- * converted to a regular entry.
- *
- * This field must be zero if prefetched is FALSE.
- *
- * serialization_count: Integer field used to maintain a count of the
- * number of times each entry is serialized during cache
- * serialization. While no entry should be serialized more than
- * once in any serialization call, throw an assertion if any
- * flush depencency parent is serialized more than once during
- * a single cache serialization.
- *
- * This is a debugging field, and thus is maintained only if
- * NDEBUG is undefined.
*
* Fields supporting tagged entries:
*
@@ -1615,23 +1433,6 @@ typedef struct H5C_cache_entry_t {
struct H5C_cache_entry_t *coll_prev;
#endif /* H5_HAVE_PARALLEL */
- /* fields supporting cache image */
- hbool_t include_in_image;
- int32_t lru_rank;
- hbool_t image_dirty;
- uint64_t fd_parent_count;
- haddr_t *fd_parent_addrs;
- uint64_t fd_child_count;
- uint64_t fd_dirty_child_count;
- uint32_t image_fd_height;
- hbool_t prefetched;
- int prefetch_type_id;
- int32_t age;
-
-#ifndef NDEBUG /* debugging field */
- int serialization_count;
-#endif /* NDEBUG */
-
/* fields supporting tag lists */
struct H5C_cache_entry_t *tl_next;
struct H5C_cache_entry_t *tl_prev;
@@ -1646,168 +1447,6 @@ typedef struct H5C_cache_entry_t {
#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
} H5C_cache_entry_t;
-
-/****************************************************************************
- *
- * structure H5C_image_entry_t
- *
- * Instances of the H5C_image_entry_t structure are used to store data on
- * metadata cache entries used in the construction of the metadata cache
- * image block. In essence this structure is a greatly simplified version
- * of H5C_cache_entry_t.
- *
- * The fields of this structure are discussed individually below:
- *
- * JRM - 8/5/15
- *
- * magic: Unsigned 32 bit integer that must always be set to
- * H5C_IMAGE_ENTRY_T_MAGIC when the entry is valid.
- * The field must be set to H5C_IMAGE_ENTRY_T_BAD_MAGIC
- * just before the entry is freed.
- *
- * addr: Base address of the cache entry on disk.
- *
- * size: Length of the cache entry on disk in bytes.
- *
- * ring: Instance of H5C_ring_t indicating the flush ordering ring
- * to which this entry is assigned.
- *
- * age: Number of times this prefetech entry has appeared in
- * the current sequence of cache images. This field is
- * initialized to 0 if the instance of H5C_image_entry_t
- * is constructed from a regular entry.
- *
- * If the instance is constructed from a prefetched entry
- * currently residing in the metadata cache, the field is
- * set to 1 + the age of the prefetched entry, or to
- * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX if that sum exceeds
- * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX.
- *
- * type_id: Integer field containing the type ID of the entry.
- *
- * lru_rank: Rank of the entry in the LRU just prior to file close.
- *
- * Note that the first entry on the LRU has lru_rank 1,
- * and that entries not on the LRU at that time will have
- * either lru_rank -1 (if pinned) or 0 (if loaded during
- * the process of flushing the cache.
- *
- * is_dirty: Boolean flag indicating whether the contents of the cache
- * entry has been modified since the last time it was written
- * to disk as a regular piece of metadata.
- *
- * image_fd_height: Flush dependency height of the entry in the cache image.
- *
- * The flush dependency height of any entry involved in a
- * flush dependency relationship is defined to be the
- * longest flush dependency path from that entry to an entry
- * with no flush depenency children.
- *
- * Since the image_fd_height is used to order entries in the
- * cache image so that fd parents preceed fd children, for
- * purposes of this field, an entry is at flush dependency
- * level 0 if it either has no children, or if all of its
- * children are not in the cache image.
- *
- * Note that if a child in a flush dependency relationship is
- * dirty and in the cache image, and its parent is dirty and
- * not in the cache image, then the child must be excluded
- * from the cache image to maintain flush ordering.
- *
- * fd_parent_count: If the entry is a child in one or more flush dependency
- * relationships, this field contains the number of flush
- * dependency parents.
- *
- * In all other cases, the field is set to zero.
- *
- * Note that while this count is initially taken from the
- * flush dependency fields in the associated instance of
- * H5C_cache_entry_t, if the entry is in the cache image
- * (i.e. include_in_image is TRUE), any parents that are
- * not in the image are removed from this count and
- * from the fd_parent_addrs array below.
- *
- * Finally observe that if the entry is dirty and in the
- * cache image, and its parent is dirty and not in the cache
- * image, then the entry must be removed from the cache image
- * to avoid violating the flush dependency flush ordering.
- * This should have happened before the construction of
- * the instance of H5C_image_entry_t.
- *
- * fd_parent_addrs: If the entry is a child in one or more flush dependency
- * relationship when H5C_prep_for_file_close() is called, this
- * field must contain a pointer to an array of size
- * fd_parent_count containing the on disk addresses of the
- * parents.
- *
- * In all other cases, the field is set to NULL.
- *
- * Note that while this list of addresses is initially taken
- * from the flush dependency fields in the associated instance of
- * H5C_cache_entry_t, if the entry is in the cache image
- * (i.e. include_in_image is TRUE), any parents that are not
- * in the image are removed from this list, and from the
- * fd_parent_count above.
- *
- * Finally observe that if the entry is dirty and in the
- * cache image, and its parent is dirty and not in the cache
- * image, then the entry must be removed from the cache image
- * to avoid violating the flush dependency flush ordering.
- * This should have happened before the construction of
- * the instance of H5C_image_entry_t.
- *
- * fd_child_count: If the entry is a parent in a flush dependency
- * relationship, this field contains the number of flush
- * dependency children.
- *
- * In all other cases, the field is set to zero.
- *
- * Note that while this count is initially taken from the
- * flush dependency fields in the associated instance of
- * H5C_cache_entry_t, if the entry is in the cache image
- * (i.e. include_in_image is TRUE), any children
- * that are not in the image are removed from this count.
- *
- * fd_dirty_child_count: If the entry is a parent in a flush dependency
- * relationship, this field contains the number of dirty flush
- * dependency children.
- *
- * In all other cases, the field is set to zero.
- *
- * Note that while this count is initially taken from the
- * flush dependency fields in the associated instance of
- * H5C_cache_entry_t, if the entry is in the cache image
- * (i.e. include_in_image is TRUE), any dirty children
- * that are not in the image are removed from this count.
- *
- * image_ptr: Pointer to void. When not NULL, this field points to a
- * dynamically allocated block of size bytes in which the
- * on disk image of the metadata cache entry is stored.
- *
- * If the entry is dirty, the pre-serialize and serialize
- * callbacks must be used to update this image before it is
- * written to disk
- *
- *
- ****************************************************************************/
-
-typedef struct H5C_image_entry_t {
- uint32_t magic;
- haddr_t addr;
- size_t size;
- H5C_ring_t ring;
- int32_t age;
- int32_t type_id;
- int32_t lru_rank;
- hbool_t is_dirty;
- unsigned image_fd_height;
- uint64_t fd_parent_count;
- haddr_t *fd_parent_addrs;
- uint64_t fd_child_count;
- uint64_t fd_dirty_child_count;
- void *image_ptr;
-} H5C_image_entry_t;
-
/****************************************************************************
*
* structure H5C_auto_size_ctl_t
@@ -2097,98 +1736,12 @@ typedef struct H5C_auto_size_ctl_t {
double empty_reserve;
} H5C_auto_size_ctl_t;
-/****************************************************************************
- *
- * structure H5C_cache_image_ctl_t
- *
- * Instances of H5C_image_ctl_t are used to get and set the control
- * fields for generation of a metadata cache image on file close.
- *
- * At present control of construction of a cache image is via a FAPL
- * property at file open / create.
- *
- * The fields of the structure are discussed individually below:
- *
- * version: Integer field containing the version number of this version
- * of the H5C_image_ctl_t structure. Any instance of
- * H5C_image_ctl_t passed to the cache must have a known
- * version number, or an error will be flagged.
- *
- * generate_image: Boolean flag indicating whether a cache image should
- * be created on file close.
- *
- * save_resize_status: Boolean flag indicating whether the cache image
- * should include the adaptive cache resize configuration and status.
- * Note that this field is ignored at present.
- *
- * entry_ageout: Integer field indicating the maximum number of
- * times a prefetched entry can appear in subsequent cache images.
- * This field exists to allow the user to avoid the buildup of
- * infrequently used entries in long sequences of cache images.
- *
- * The value of this field must lie in the range
- * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE (-1) to
- * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX (100).
- *
- * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE means that no limit
- * is imposed on number of times a prefeteched entry can appear
- * in subsequent cache images.
- *
- * A value of 0 prevents prefetched entries from being included
- * in cache images.
- *
- * Positive integers restrict prefetched entries to the specified
- * number of appearances.
- *
- * Note that the number of subsequent cache images that a prefetched
- * entry has appeared in is tracked in an 8 bit field. Thus, while
- * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX can be increased from its
- * current value, any value in excess of 255 will be the functional
- * equivalent of H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE.
- *
- * flags: Unsigned integer containing flags controling which aspects of the
- * cache image functinality is actually executed. The primary impetus
- * behind this field is to allow developement of tests for partial
- * implementations that will require little if any modification to run
- * with the full implementation. In normal operation, all flags should
- * be set.
- *
- ****************************************************************************/
-
-#define H5C_CI__GEN_MDCI_SBE_MESG ((unsigned)0x0001)
-#define H5C_CI__GEN_MDC_IMAGE_BLK ((unsigned)0x0002)
-#define H5C_CI__SUPRESS_ENTRY_WRITES ((unsigned)0x0004)
-#define H5C_CI__WRITE_CACHE_IMAGE ((unsigned)0x0008)
-
-/* This #define must set all defined H5C_CI flags. It is
- * used in the default value for instances of H5C_cache_image_ctl_t.
- * This value will only be modified in test code.
- */
-#define H5C_CI__ALL_FLAGS ((unsigned)0x000F)
-
-#define H5C__DEFAULT_CACHE_IMAGE_CTL \
-{ \
- /* version = */ H5C__CURR_CACHE_IMAGE_CTL_VER, \
- /* generate_image = */ FALSE, \
- /* save_resize_status = */ FALSE, \
- /* entry_ageout = */ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE, \
- /* flags = */ H5C_CI__ALL_FLAGS \
-}
-
-typedef struct H5C_cache_image_ctl_t {
- int32_t version;
- hbool_t generate_image;
- hbool_t save_resize_status;
- int32_t entry_ageout;
- unsigned flags;
-} H5C_cache_image_ctl_t;
-
/***************************************/
/* Library-private Function Prototypes */
/***************************************/
H5_DLL H5C_t *H5C_create(size_t max_cache_size, size_t min_clean_size,
- int max_type_id, const H5C_class_t * const *class_table_ptr,
+ int max_type_id, const char *(*type_name_table_ptr),
H5C_write_permitted_func_t check_write_permitted, hbool_t write_permitted,
H5C_log_flush_func_t log_flush, void *aux_ptr);
H5_DLL herr_t H5C_set_up_logging(H5C_t *cache_ptr, const char log_location[], hbool_t start_immediately);
@@ -2217,11 +1770,9 @@ herr_t H5C_verify_tag(int id, haddr_t tag);
H5_DLL herr_t H5C_flush_to_min_clean(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5C_get_cache_auto_resize_config(const H5C_t *cache_ptr,
H5C_auto_size_ctl_t *config_ptr);
-H5_DLL herr_t H5C_get_cache_image_config(const H5C_t * cache_ptr,
- H5C_cache_image_ctl_t *config_ptr);
H5_DLL herr_t H5C_get_cache_size(H5C_t *cache_ptr, size_t *max_size_ptr,
size_t *min_clean_size_ptr, size_t *cur_size_ptr,
- uint32_t *cur_num_entries_ptr);
+ int32_t *cur_num_entries_ptr);
H5_DLL herr_t H5C_get_cache_hit_rate(H5C_t *cache_ptr, double *hit_rate_ptr);
H5_DLL herr_t H5C_get_entry_status(const H5F_t *f, haddr_t addr,
size_t *size_ptr, hbool_t *in_cache_ptr, hbool_t *is_dirty_ptr,
@@ -2232,11 +1783,8 @@ H5_DLL herr_t H5C_get_evictions_enabled(const H5C_t *cache_ptr, hbool_t *evictio
H5_DLL void * H5C_get_aux_ptr(const H5C_t *cache_ptr);
H5_DLL FILE *H5C_get_trace_file_ptr(const H5C_t *cache_ptr);
H5_DLL FILE *H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr);
-H5_DLL herr_t H5C_image_stats(H5C_t * cache_ptr, hbool_t print_header);
H5_DLL herr_t H5C_insert_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
haddr_t addr, void *thing, unsigned int flags);
-H5_DLL herr_t H5C_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr,
- hsize_t len, hbool_t rw);
H5_DLL herr_t H5C_mark_entry_dirty(void *thing);
H5_DLL herr_t H5C_mark_entry_clean(void *thing);
H5_DLL herr_t H5C_mark_entry_unserialized(void *thing);
@@ -2251,8 +1799,6 @@ H5_DLL void * H5C_protect(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
H5_DLL herr_t H5C_reset_cache_hit_rate_stats(H5C_t *cache_ptr);
H5_DLL herr_t H5C_resize_entry(void *thing, size_t new_size);
H5_DLL herr_t H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_ptr);
-H5_DLL herr_t H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr,
- H5C_cache_image_ctl_t *config_ptr);
H5_DLL herr_t H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled);
H5_DLL herr_t H5C_set_prefix(H5C_t *cache_ptr, char *prefix);
H5_DLL herr_t H5C_set_trace_file_ptr(H5C_t *cache_ptr, FILE *trace_file_ptr);
@@ -2264,7 +1810,6 @@ H5_DLL herr_t H5C_unpin_entry(void *thing);
H5_DLL herr_t H5C_destroy_flush_dependency(void *parent_thing, void *child_thing);
H5_DLL herr_t H5C_unprotect(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *thing,
unsigned int flags);
-H5_DLL herr_t H5C_validate_cache_image_config(H5C_cache_image_ctl_t * ctl_ptr);
H5_DLL herr_t H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr,
unsigned int tests);
H5_DLL herr_t H5C_ignore_tags(H5C_t *cache_ptr);
@@ -2274,32 +1819,20 @@ H5_DLL herr_t H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hboo
H5_DLL herr_t H5C_get_entry_ring(const H5F_t *f, haddr_t addr, H5C_ring_t *ring);
H5_DLL herr_t H5C_unsettle_entry_ring(void *thing);
H5_DLL herr_t H5C_remove_entry(void *thing);
-H5_DLL herr_t H5C_cache_image_status(H5F_t * f, hbool_t *load_ci_ptr,
- hbool_t *write_ci_ptr);
#ifdef H5_HAVE_PARALLEL
H5_DLL herr_t H5C_apply_candidate_list(H5F_t *f, hid_t dxpl_id,
- H5C_t *cache_ptr, unsigned num_candidates, haddr_t *candidates_list_ptr,
+ H5C_t *cache_ptr, int num_candidates, haddr_t *candidates_list_ptr,
int mpi_rank, int mpi_size);
H5_DLL herr_t H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr);
H5_DLL herr_t H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr);
H5_DLL herr_t H5C_clear_coll_entries(H5C_t * cache_ptr, hbool_t partial);
-H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t *f, hid_t dxpl_id, unsigned ce_array_len,
+H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t *f, hid_t dxpl_id, int32_t ce_array_len,
haddr_t *ce_array_ptr);
#endif /* H5_HAVE_PARALLEL */
#ifndef NDEBUG /* debugging functions */
-H5_DLL hbool_t H5C_get_serialization_in_progress(const H5C_t *cache_ptr);
H5_DLL hbool_t H5C_cache_is_clean(const H5C_t *cache_ptr, H5C_ring_t inner_ring);
-H5_DLL herr_t H5C_dump_cache_skip_list(H5C_t *cache_ptr, char *calling_fcn);
-H5_DLL herr_t H5C_get_entry_ptr_from_addr(H5C_t *cache_ptr, haddr_t addr,
- void **entry_ptr_ptr);
-H5_DLL herr_t H5C_flush_dependency_exists(H5C_t *cache_ptr, haddr_t parent_addr,
- haddr_t child_addr, hbool_t *fd_exists_ptr);
-H5_DLL herr_t H5C_verify_entry_type(H5C_t *cache_ptr, haddr_t addr,
- const H5C_class_t *expected_type, hbool_t *in_cache_ptr,
- hbool_t *type_ok_ptr);
-H5_DLL herr_t H5C_validate_index_list(H5C_t *cache_ptr);
#endif /* NDEBUG */
#endif /* !_H5Cprivate_H */
diff --git a/src/H5Cquery.c b/src/H5Cquery.c
index 33a322d..f5409f7 100644
--- a/src/H5Cquery.c
+++ b/src/H5Cquery.c
@@ -131,7 +131,7 @@ H5C_get_cache_size(H5C_t * cache_ptr,
size_t * max_size_ptr,
size_t * min_clean_size_ptr,
size_t * cur_size_ptr,
- uint32_t * cur_num_entries_ptr)
+ int32_t * cur_num_entries_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -444,8 +444,7 @@ H5C_get_entry_ring(const H5F_t *f, haddr_t addr, H5C_ring_t *ring)
/* Locate the entry at the address */
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
- if(entry_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't find entry in index")
+ HDassert(entry_ptr);
/* Return the ring value */
*ring = entry_ptr->ring;
diff --git a/src/H5EApkg.h b/src/H5EApkg.h
index 7540ff2..093403c 100644
--- a/src/H5EApkg.h
+++ b/src/H5EApkg.h
@@ -378,6 +378,21 @@ typedef struct H5EA__ctx_cb_t {
/* Package Private Variables */
/*****************************/
+/* H5EA header inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_EARRAY_HDR[1];
+
+/* H5EA index block inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_EARRAY_IBLOCK[1];
+
+/* H5EA index block inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_EARRAY_SBLOCK[1];
+
+/* H5EA data block inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_EARRAY_DBLOCK[1];
+
+/* H5EA data block page inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_EARRAY_DBLK_PAGE[1];
+
/* Internal extensible array testing class */
H5_DLLVAR const H5EA_class_t H5EA_CLS_TEST[1];
diff --git a/src/H5F.c b/src/H5F.c
index 5fd3a7d..a43009b 100644
--- a/src/H5F.c
+++ b/src/H5F.c
@@ -445,8 +445,6 @@ done:
hid_t
H5Fcreate(const char *filename, unsigned flags, hid_t fcpl_id, hid_t fapl_id)
{
- hbool_t ci_load = FALSE; /* whether MDC ci load requested */
- hbool_t ci_write = FALSE; /* whether MDC CI write requested */
H5F_t *new_file = NULL; /*file struct for new file */
hid_t dxpl_id = H5AC_ind_read_dxpl_id; /*dxpl used by library */
hid_t ret_value; /*return value */
@@ -492,12 +490,6 @@ H5Fcreate(const char *filename, unsigned flags, hid_t fcpl_id, hid_t fapl_id)
if(NULL == (new_file = H5F_open(filename, flags, fcpl_id, fapl_id, dxpl_id)))
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to create file")
- /* Check to see if both SWMR and cache image are requested. Fail if so */
- if(H5C_cache_image_status(new_file, &ci_load, &ci_write) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't get MDC cache image status")
- if((ci_load || ci_write) && (flags & (H5F_ACC_SWMR_READ | H5F_ACC_SWMR_WRITE)))
- HGOTO_ERROR(H5E_FILE, H5E_UNSUPPORTED, FAIL, "can't have both SWMR and cache image")
-
/* Get an atom for the file */
if((ret_value = H5I_register(H5I_FILE, new_file, TRUE)) < 0)
HGOTO_ERROR(H5E_ATOM, H5E_CANTREGISTER, FAIL, "unable to atomize file")
@@ -506,8 +498,9 @@ H5Fcreate(const char *filename, unsigned flags, hid_t fcpl_id, hid_t fapl_id)
new_file->file_id = ret_value;
done:
- if(ret_value < 0 && new_file && H5F_try_close(new_file, NULL) < 0)
- HDONE_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL, "problems closing file")
+ if(ret_value < 0 && new_file)
+ if(H5F_close(new_file) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL, "problems closing file")
FUNC_LEAVE_API(ret_value)
} /* end H5Fcreate() */
@@ -556,8 +549,6 @@ done:
hid_t
H5Fopen(const char *filename, unsigned flags, hid_t fapl_id)
{
- hbool_t ci_load = FALSE; /* whether MDC ci load requested */
- hbool_t ci_write = FALSE; /* whether MDC CI write requested */
H5F_t *new_file = NULL; /*file struct for new file */
hid_t dxpl_id = H5AC_ind_read_dxpl_id; /*dxpl used by library */
hid_t ret_value; /*return value */
@@ -587,12 +578,6 @@ H5Fopen(const char *filename, unsigned flags, hid_t fapl_id)
if(NULL == (new_file = H5F_open(filename, flags, H5P_FILE_CREATE_DEFAULT, fapl_id, dxpl_id)))
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to open file")
- /* Check to see if both SWMR and cache image are requested. Fail if so */
- if(H5C_cache_image_status(new_file, &ci_load, &ci_write) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't get MDC cache image status")
- if((ci_load || ci_write) && (flags & (H5F_ACC_SWMR_READ | H5F_ACC_SWMR_WRITE)))
- HGOTO_ERROR(H5E_FILE, H5E_UNSUPPORTED, FAIL, "can't have both SWMR and cache image")
-
/* Get an atom for the file */
if((ret_value = H5I_register(H5I_FILE, new_file, TRUE)) < 0)
HGOTO_ERROR(H5E_ATOM, H5E_CANTREGISTER, FAIL, "unable to atomize file handle")
@@ -1192,7 +1177,7 @@ H5Fget_mdc_size(hid_t file_id, size_t *max_size_ptr, size_t *min_clean_size_ptr,
size_t *cur_size_ptr, int *cur_num_entries_ptr)
{
H5F_t *file; /* File object for file ID */
- uint32_t cur_num_entries;
+ int32_t cur_num_entries;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
@@ -1608,8 +1593,6 @@ done:
herr_t
H5Fstart_swmr_write(hid_t file_id)
{
- hbool_t ci_load = FALSE; /* whether MDC ci load requested */
- hbool_t ci_write = FALSE; /* whether MDC CI write requested */
H5F_t *file = NULL; /* File info */
size_t grp_dset_count=0; /* # of open objects: groups & datasets */
size_t nt_attr_count=0; /* # of opened named datatypes + opened attributes */
@@ -1643,12 +1626,6 @@ H5Fstart_swmr_write(hid_t file_id)
HDassert(file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS);
- /* Check to see if cache image is enabled. Fail if so */
- if(H5C_cache_image_status(file, &ci_load, &ci_write) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't get MDC cache image status")
- if(ci_load || ci_write )
- HGOTO_ERROR(H5E_FILE, H5E_UNSUPPORTED, FAIL, "can't have both SWMR and MDC cache image")
-
/* Flush data buffers */
if(H5F_flush(file, H5AC_ind_read_dxpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush file's cached information")
@@ -1891,48 +1868,7 @@ H5Fget_mdc_logging_status(hid_t file_id, hbool_t *is_enabled,
done:
FUNC_LEAVE_API(ret_value)
-} /* H5Fget_mdc_logging_status() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5Fset_latest_format
- *
- * Purpose: Enable switching the "latest format" flag while a file is open.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * Monday, September 21, 2015
- *-------------------------------------------------------------------------
- */
-herr_t
-H5Fset_latest_format(hid_t file_id, hbool_t latest_format)
-{
- H5F_t *f; /* File */
- unsigned latest_flags; /* Latest format flags for file */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_API(FAIL)
- H5TRACE2("e", "ib", file_id, latest_format);
-
- /* Check args */
- if(NULL == (f = (H5F_t *)H5I_object_verify(file_id, H5I_FILE)))
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "not a file ID")
-
- /* Check if the value is changing */
- latest_flags = H5F_USE_LATEST_FLAGS(f, H5F_LATEST_ALL_FLAGS);
- if(latest_format != (H5F_LATEST_ALL_FLAGS == latest_flags)) {
- /* Call the flush routine, for this file */
- if(H5F_flush(f, H5AC_ind_read_dxpl_id, FALSE) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush file's cached information")
-
- /* Toggle the 'latest format' flag */
- H5F_SET_LATEST_FLAGS(f, latest_format ? H5F_LATEST_ALL_FLAGS : 0);
- } /* end if */
-
-done:
- FUNC_LEAVE_API(ret_value)
-} /* end H5Fset_latest_format() */
+} /* H5Fstop_mdc_logging() */
/*-------------------------------------------------------------------------
diff --git a/src/H5FApkg.h b/src/H5FApkg.h
index 63eacff..ccef562 100644
--- a/src/H5FApkg.h
+++ b/src/H5FApkg.h
@@ -249,6 +249,15 @@ typedef struct H5FA_dblk_page_cache_ud_t {
/* Package Private Variables */
/*****************************/
+/* H5FA header inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_FARRAY_HDR[1];
+
+/* H5FA data block inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_FARRAY_DBLOCK[1];
+
+/* H5FA data block page inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_FARRAY_DBLK_PAGE[1];
+
/* Internal fixed array testing class */
H5_DLLVAR const H5FA_class_t H5FA_CLS_TEST[1];
diff --git a/src/H5FDmulti.c b/src/H5FDmulti.c
index 7e12869..befcaca 100644
--- a/src/H5FDmulti.c
+++ b/src/H5FDmulti.c
@@ -1151,18 +1151,14 @@ H5FD_multi_cmp(const H5FD_t *_f1, const H5FD_t *_f2)
ALL_MEMBERS(mt) {
out_mt = mt;
- if(f1->memb[mt] && f2->memb[mt])
- break;
- if(!cmp) {
- if(f1->memb[mt])
- cmp = -1;
- else if(f2->memb[mt])
- cmp = 1;
+ if (f1->memb[mt] && f2->memb[mt]) break;
+ if (!cmp) {
+ if (f1->memb[mt]) cmp = -1;
+ else if (f2->memb[mt]) cmp = 1;
}
} END_MEMBERS;
assert(cmp || out_mt<H5FD_MEM_NTYPES);
- if(out_mt>=H5FD_MEM_NTYPES)
- return cmp;
+ if (out_mt>=H5FD_MEM_NTYPES) return cmp;
return H5FDcmp(f1->memb[out_mt], f2->memb[out_mt]);
}
diff --git a/src/H5FSpkg.h b/src/H5FSpkg.h
index 4411236..f07ffad 100644
--- a/src/H5FSpkg.h
+++ b/src/H5FSpkg.h
@@ -199,6 +199,12 @@ struct H5FS_t {
/* Package Private Variables */
/*****************************/
+/* H5FS header inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_FSPACE_HDR[1];
+
+/* H5FS section info inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_FSPACE_SINFO[1];
+
/* Declare a free list to manage the H5FS_node_t struct */
H5FL_EXTERN(H5FS_node_t);
diff --git a/src/H5Fint.c b/src/H5Fint.c
index d122357..8ad97a8 100644
--- a/src/H5Fint.c
+++ b/src/H5Fint.c
@@ -188,8 +188,6 @@ H5F_get_access_plist(H5F_t *f, hbool_t app_ref)
if(H5P_set(new_plist, H5F_ACS_COLL_MD_WRITE_FLAG_NAME, &(f->coll_md_write)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't set collective metadata read flag")
#endif /* H5_HAVE_PARALLEL */
- if(H5P_set(new_plist, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME, &(f->shared->mdc_initCacheImageCfg)) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set initial metadata cache resize config.")
/* Prepare the driver property */
driver_prop.driver_id = f->shared->lf->driver_id;
@@ -669,8 +667,6 @@ H5F_new(H5F_file_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5FD_t
if(H5P_get(plist, H5F_ACS_COLL_MD_WRITE_FLAG_NAME, &(f->coll_md_write)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get collective metadata write flag")
#endif /* H5_HAVE_PARALLEL */
- if(H5P_get(plist, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME, &(f->shared->mdc_initCacheImageCfg)) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get initial metadata cache resize config")
/* Get the VFD values to cache */
f->shared->maxaddr = H5FD_get_maxaddr(lf);
@@ -754,7 +750,7 @@ H5F_new(H5F_file_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5FD_t
* The cache might be created with a different number of elements and
* the access property list should be updated to reflect that.
*/
- if(H5AC_create(f, &(f->shared->mdc_initCacheCfg), &(f->shared->mdc_initCacheImageCfg)) < 0)
+ if(H5AC_create(f, &(f->shared->mdc_initCacheCfg)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "unable to create metadata cache")
/* Create the file's "open object" information */
@@ -833,7 +829,7 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
if((H5F_ACC_RDWR & H5F_INTENT(f)) && flush)
if(H5F__flush_phase1(f, dxpl_id) < 0)
/* Push error, but keep going*/
- HDONE_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush cached data (phase 1)")
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
/* Notify the metadata cache that the file is about to be closed.
* This allows the cache to set up for creating a metadata cache
@@ -849,8 +845,8 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
*/
if((H5F_ACC_RDWR & H5F_INTENT(f)) && flush)
if(H5F__flush_phase2(f, dxpl_id, TRUE) < 0)
- /* Push error, but keep going */
- HDONE_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush cached data (phase 2)")
+ /* Push error, but keep going*/
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
/* With the shutdown modifications, the contents of the metadata cache
* should be clean at this point, with the possible exception of the
@@ -869,7 +865,7 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
} /* end if */
/* With the shutdown modifications, the contents of the metadata cache
- * should be clean at this point, with the possible exception of the
+ * should be clean at this point, with the possible exception of the
* the superblock and superblock extension.
*
* Verify this.
@@ -892,21 +888,18 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
* (assuming they are persistent). In this case, closing the
* free space managers should have no effect on EOA.
*
- * -- JRM
+ * -- JRM
*/
if(H5F_ACC_RDWR & H5F_INTENT(f)) {
if(H5MF_close(f, dxpl_id) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "can't release file free space info")
- /* at this point, only the superblock and superblock
+ /* at this point, only the superblock and superblock
* extension should be dirty.
*/
HDassert(H5AC_cache_is_clean(f, H5AC_RING_MDFSM));
- /* Flush the file again (if requested), as shutting down the
- * free space manager may dirty some data structures again.
- */
if(flush) {
/* Clear status_flags */
f->shared->sblock->status_flags &= (uint8_t)(~H5F_SUPER_WRITE_ACCESS);
@@ -921,7 +914,7 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
* so that the eoa value corresponds to the end of the
* space written to in the file.
*
- * At most, this should change the superblock or the
+ * At most, this should change the superblock or the
* superblock extension messages.
*/
if(H5MF_free_aggrs(f, dxpl_id) < 0)
@@ -933,7 +926,7 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "low level truncate failed")
- /* at this point, only the superblock and superblock
+ /* at this point, only the superblock and superblock
* extension should be dirty.
*/
HDassert(H5AC_cache_is_clean(f, H5AC_RING_MDFSM));
@@ -955,7 +948,7 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
f->shared->sblock = NULL;
} /* end if */
- /* with the possible exception of the superblock and superblock
+ /* with the possible exception of the superblock and superblock
* extension, the metadata cache should be clean at this point.
*
* Verify this.
@@ -1285,10 +1278,6 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
shared = file->shared;
lf = shared->lf;
- /* Get the file access property list, for future queries */
- if(NULL == (a_plist = (H5P_genplist_t *)H5I_object(fapl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not file access property list")
-
/*
* Read or write the file superblock, depending on whether the file is
* empty or not.
@@ -1322,6 +1311,10 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to read root group")
} /* end if */
+ /* Get the file access property list, for future queries */
+ if(NULL == (a_plist = (H5P_genplist_t *)H5I_object(fapl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not file access property list")
+
/*
* Decide the file close degree. If it's the first time to open the
* file, set the degree to access property list value; if it's the
@@ -1529,7 +1522,7 @@ H5F__flush_phase2(H5F_t *f, hid_t dxpl_id, hbool_t closing)
/* Flush file buffers to disk. */
if(H5FD_flush(f->shared->lf, dxpl_id, closing) < 0)
/* Push error, but keep going*/
- HDONE_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "low level flush failed")
+ HDONE_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "low level flush failed")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5F__flush_phase2() */
@@ -2678,33 +2671,3 @@ H5F_set_coll_md_read(H5F_t *f, H5P_coll_md_read_flag_t cmr)
} /* H5F_set_coll_md_read() */
#endif /* H5_HAVE_PARALLEL */
-
-/*-------------------------------------------------------------------------
- * Function: H5F_set_latest_flags
- *
- * Purpose: Set the latest_flags field with a new value.
- *
- * Return: Success: SUCCEED
- * Failure: FAIL
- *
- * Programmer: Quincey Koziol
- * 4/26/16
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5F_set_latest_flags(H5F_t *f, unsigned flags)
-{
- /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Sanity check */
- HDassert(f);
- HDassert(f->shared);
- HDassert(0 == ((~flags) & H5F_LATEST_ALL_FLAGS));
-
- f->shared->latest_flags = flags;
-
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5F_set_latest_flags() */
-
diff --git a/src/H5Fio.c b/src/H5Fio.c
index afe1278..e215666 100644
--- a/src/H5Fio.c
+++ b/src/H5Fio.c
@@ -302,7 +302,7 @@ H5F__evict_cache_entries(H5F_t *f, hid_t dxpl_id)
#ifndef NDEBUG
{
unsigned status = 0;
- uint32_t cur_num_entries;
+ int32_t cur_num_entries;
/* Retrieve status of the superblock */
if(H5AC_get_entry_status(f, (haddr_t)0, &status) < 0)
diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h
index 93a3978..11665f4 100644
--- a/src/H5Fpkg.h
+++ b/src/H5Fpkg.h
@@ -287,12 +287,6 @@ struct H5F_file_t {
/* metadata cache. This structure is */
/* fixed at creation time and should */
/* not change thereafter. */
- H5AC_cache_image_config_t
- mdc_initCacheImageCfg; /* initial configuration for the */
- /* generate metadata cache image on */
- /* close option. This structure is */
- /* fixed at creation time and should */
- /* not change thereafter. */
hbool_t use_mdc_logging; /* Set when metadata logging is desired */
hbool_t start_mdc_log_on_access; /* set when mdc logging should */
/* begin on file access/create */
@@ -375,6 +369,9 @@ H5FL_EXTERN(H5F_t);
/* Declare a free list to manage the H5F_file_t struct */
H5FL_EXTERN(H5F_file_t);
+H5_DLLVAR const H5AC_class_t H5AC_SUPERBLOCK[1];
+H5_DLLVAR const H5AC_class_t H5AC_DRVRINFO[1];
+
/******************************/
/* Package Private Prototypes */
@@ -403,8 +400,7 @@ H5_DLL herr_t H5F__super_free(H5F_super_t *sblock);
/* Superblock extension related routines */
H5_DLL herr_t H5F_super_ext_open(H5F_t *f, haddr_t ext_addr, H5O_loc_t *ext_ptr);
-H5_DLL herr_t H5F_super_ext_write_msg(H5F_t *f, hid_t dxpl_id, unsigned id,
- void *mesg, hbool_t may_create, unsigned mesg_flags);
+H5_DLL herr_t H5F_super_ext_write_msg(H5F_t *f, hid_t dxpl_id, unsigned id, void *mesg, hbool_t may_create);
H5_DLL herr_t H5F_super_ext_remove_msg(H5F_t *f, hid_t dxpl_id, unsigned id);
H5_DLL herr_t H5F_super_ext_close(H5F_t *f, H5O_loc_t *ext_ptr, hid_t dxpl_id,
hbool_t was_created);
diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h
index 7d288fa..bcc56c6 100644
--- a/src/H5Fprivate.h
+++ b/src/H5Fprivate.h
@@ -317,7 +317,6 @@
#define H5F_SET_GRP_BTREE_SHARED(F, RC) (((F)->shared->grp_btree_shared = (RC)) ? SUCCEED : FAIL)
#define H5F_USE_TMP_SPACE(F) ((F)->shared->use_tmp_space)
#define H5F_IS_TMP_ADDR(F, ADDR) (H5F_addr_le((F)->shared->tmp_addr, (ADDR)))
-#define H5F_SET_LATEST_FLAGS(F, FL) ((F)->shared->latest_flags = (FL))
#ifdef H5_HAVE_PARALLEL
#define H5F_COLL_MD_READ(F) ((F)->coll_md_read)
#endif /* H5_HAVE_PARALLEL */
@@ -368,7 +367,6 @@
#define H5F_SET_GRP_BTREE_SHARED(F, RC) (H5F_set_grp_btree_shared((F), (RC)))
#define H5F_USE_TMP_SPACE(F) (H5F_use_tmp_space(F))
#define H5F_IS_TMP_ADDR(F, ADDR) (H5F_is_tmp_addr((F), (ADDR)))
-#define H5F_SET_LATEST_FLAGS(F, FL) (H5F_set_latest_flags((F), (FL)))
#ifdef H5_HAVE_PARALLEL
#define H5F_COLL_MD_READ(F) (H5F_coll_md_read(F))
#endif /* H5_HAVE_PARALLEL */
@@ -483,7 +481,6 @@
#define H5F_ACS_EVICT_ON_CLOSE_FLAG_NAME "evict_on_close_flag" /* Whether or not the metadata cache will evict objects on close */
#define H5F_ACS_CORE_WRITE_TRACKING_PAGE_SIZE_NAME "core_write_tracking_page_size" /* The page size in kiB when core VFD write tracking is enabled */
#define H5F_ACS_COLL_MD_WRITE_FLAG_NAME "collective_metadata_write" /* property indicating whether metadata writes are done collectively or not */
-#define H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME "mdc_initCacheImageCfg" /* Initial metadata cache image creation configuration */
/* ======================== File Mount properties ====================*/
#define H5F_MNT_SYM_LOCAL_NAME "local" /* Whether absolute symlinks local to file. */
@@ -703,7 +700,6 @@ H5_DLL struct H5UC_t *H5F_grp_btree_shared(const H5F_t *f);
H5_DLL herr_t H5F_set_grp_btree_shared(H5F_t *f, struct H5UC_t *rc);
H5_DLL hbool_t H5F_use_tmp_space(const H5F_t *f);
H5_DLL hbool_t H5F_is_tmp_addr(const H5F_t *f, haddr_t addr);
-H5_DLL herr_t H5F_set_latest_flags(H5F_t *f, unsigned flags);
#ifdef H5_HAVE_PARALLEL
H5_DLL H5P_coll_md_read_flag_t H5F_coll_md_read(const H5F_t *f);
H5_DLL void H5F_set_coll_md_read(H5F_t *f, H5P_coll_md_read_flag_t flag);
diff --git a/src/H5Fpublic.h b/src/H5Fpublic.h
index c57a821..a79da75 100644
--- a/src/H5Fpublic.h
+++ b/src/H5Fpublic.h
@@ -246,7 +246,6 @@ H5_DLL herr_t H5Fstart_swmr_write(hid_t file_id);
H5_DLL ssize_t H5Fget_free_sections(hid_t file_id, H5F_mem_t type,
size_t nsects, H5F_sect_info_t *sect_info/*out*/);
H5_DLL herr_t H5Fclear_elink_file_cache(hid_t file_id);
-H5_DLL herr_t H5Fset_latest_format(hid_t file_id, hbool_t latest_format);
H5_DLL herr_t H5Fstart_mdc_logging(hid_t file_id);
H5_DLL herr_t H5Fstop_mdc_logging(hid_t file_id);
H5_DLL herr_t H5Fget_mdc_logging_status(hid_t file_id,
diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c
index 3b86dae..893ce26 100644
--- a/src/H5Fsuper.c
+++ b/src/H5Fsuper.c
@@ -240,7 +240,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+herr_t
H5F__update_super_ext_driver_msg(H5F_t *f, hid_t dxpl_id)
{
H5F_super_t *sblock; /* Pointer to the super block */
@@ -290,7 +290,7 @@ H5F__update_super_ext_driver_msg(H5F_t *f, hid_t dxpl_id)
*/
drvinfo.len = driver_size;
drvinfo.buf = dbuf;
- if(H5F_super_ext_write_msg(f, dxpl_id, H5O_DRVINFO_ID, &drvinfo, FALSE, H5O_MSG_NO_FLAGS_SET) < 0)
+ if(H5F_super_ext_write_msg(f, dxpl_id, H5O_DRVINFO_ID, &drvinfo, FALSE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "unable to update driver info header message")
} /* end if driver_size > 0 */
} /* end if !H5F_HAS_FEATURE(f, H5FD_FEAT_IGNORE_DRVRINFO) */
@@ -692,34 +692,6 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
f->shared->fs_addr[u] = fsinfo.fs_addr[u-1];
} /* end if */
- /* Check for the extension having a 'metadata cache image' message */
- if((status = H5O_msg_exists(&ext_loc, H5O_MDCI_MSG_ID, dxpl_id)) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_EXISTS, FAIL, "unable to read object header")
- if(status) {
- hbool_t rw = ((rw_flags & H5AC__READ_ONLY_FLAG) == 0);
- H5O_mdci_t mdci_msg;
-
- /* if the metadata cache image superblock extension message exists,
- * read its contents and pass the data on to the metadata cache.
- * Given this data, the cache will load and decode the metadata
- * cache image block, decoded it and load its contents into the
- * the cache on the test protect call.
- *
- * Further, if the file is opened R/W, the metadata cache will
- * delete the metadata cache image superblock extension and free
- * the cache image block. Don't do this now as f->shared
- * is not fully setup, which complicates matters.
- */
-
- /* Retrieve the 'metadata cache image message' structure */
- if(NULL == H5O_msg_read(&ext_loc, H5O_MDCI_MSG_ID, &mdci_msg, dxpl_id))
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get metadata cache image message")
-
- /* Indicate to the cache that there's an image to load on first protect call */
- if(H5AC_load_cache_image_on_next_protect(f, mdci_msg.addr, mdci_msg.size, rw) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTLOAD, FAIL, "call to H5AC_load_cache_image_on_next_protect failed");
- } /* end if */
-
/* Close superblock extension */
if(H5F_super_ext_close(f, &ext_loc, dxpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEOBJ, FAIL, "unable to close file's superblock extension")
@@ -765,7 +737,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
HDassert(f->shared->sblock == NULL);
f->shared->sblock = sblock;
#endif /* JRM */
- if(H5F_super_ext_write_msg(f, dxpl_id, H5O_DRVINFO_ID, &drvinfo, FALSE, H5O_MSG_NO_FLAGS_SET) < 0)
+ if(H5F_super_ext_write_msg(f, dxpl_id, H5O_DRVINFO_ID, &drvinfo, FALSE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "error in writing message to superblock extension")
#if 1 /* bug fix test code -- tidy this up if all goes well */ /* JRM */
@@ -861,7 +833,6 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
H5O_loc_t ext_loc; /* Superblock extension object location */
hbool_t need_ext; /* Whether the superblock extension is needed */
hbool_t ext_created = FALSE; /* Whether the extension has been created */
- hbool_t non_default_fs_settings = FALSE; /* Whether the file has non-default free-space settings */
herr_t ret_value = SUCCEED; /* Return Value */
FUNC_ENTER_PACKAGE_TAG(dxpl_id, H5AC__SUPERBLOCK_TAG, FAIL)
@@ -888,11 +859,6 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
if(H5P_get(plist, H5F_CRT_BTREE_RANK_NAME, &sblock->btree_k[0]) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get rank for btree internal nodes")
- /* Check for non-default free-space settings */
- if(!(f->shared->fs_strategy == H5F_FILE_SPACE_STRATEGY_DEF &&
- f->shared->fs_threshold == H5F_FREE_SPACE_THRESHOLD_DEF))
- non_default_fs_settings = TRUE;
-
/* Bump superblock version if latest superblock version support is enabled */
if(H5F_USE_LATEST_FLAGS(f, H5F_LATEST_SUPERBLOCK))
super_vers = HDF5_SUPERBLOCK_VERSION_LATEST;
@@ -902,7 +868,8 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
/* Bump superblock version to create superblock extension for
* non-default file space strategy or non-default free-space threshold
*/
- else if(non_default_fs_settings)
+ else if(f->shared->fs_strategy != H5F_FILE_SPACE_STRATEGY_DEF ||
+ f->shared->fs_threshold != H5F_FREE_SPACE_THRESHOLD_DEF)
super_vers = HDF5_SUPERBLOCK_VERSION_2;
/* Check for non-default indexed storage B-tree internal 'K' value
* and set the version # of the superblock to 1 if it is a non-default
@@ -1019,7 +986,8 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
need_ext = TRUE;
} /* end if */
/* Files with non-default free space settings always need the superblock extension */
- else if(non_default_fs_settings) {
+ else if(f->shared->fs_strategy != H5F_FILE_SPACE_STRATEGY_DEF ||
+ f->shared->fs_threshold != H5F_FREE_SPACE_THRESHOLD_DEF) {
HDassert(super_vers >= HDF5_SUPERBLOCK_VERSION_2);
need_ext = TRUE;
} /* end if */
@@ -1107,8 +1075,9 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
f->shared->drvinfo_sb_msg_exists = TRUE;
} /* end if */
- /* Check for non-default free-space info settings */
- if(non_default_fs_settings) {
+ /* Check for non-default free space settings */
+ if(f->shared->fs_strategy != H5F_FILE_SPACE_STRATEGY_DEF ||
+ f->shared->fs_threshold != H5F_FREE_SPACE_THRESHOLD_DEF) {
H5FD_mem_t type; /* Memory type for iteration */
H5O_fsinfo_t fsinfo; /* Free space manager info message */
@@ -1396,8 +1365,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F_super_ext_write_msg(H5F_t *f, hid_t dxpl_id, unsigned id, void *mesg,
- hbool_t may_create, unsigned mesg_flags)
+H5F_super_ext_write_msg(H5F_t *f, hid_t dxpl_id, unsigned id, void *mesg, hbool_t may_create)
{
H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
@@ -1442,7 +1410,7 @@ H5F_super_ext_write_msg(H5F_t *f, hid_t dxpl_id, unsigned id, void *mesg,
HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "Message should not exist")
/* Create the message with ID in the superblock extension */
- if(H5O_msg_create(&ext_loc, id, (mesg_flags | H5O_MSG_FLAG_DONTSHARE), H5O_UPDATE_TIME, mesg, dxpl_id) < 0)
+ if(H5O_msg_create(&ext_loc, id, H5O_MSG_FLAG_DONTSHARE, H5O_UPDATE_TIME, mesg, dxpl_id) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "unable to create the message in object header")
} /* end if */
else {
@@ -1450,7 +1418,7 @@ H5F_super_ext_write_msg(H5F_t *f, hid_t dxpl_id, unsigned id, void *mesg,
HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "Message should exist")
/* Update the message with ID in the superblock extension */
- if(H5O_msg_write(&ext_loc, id, (mesg_flags | H5O_MSG_FLAG_DONTSHARE), H5O_UPDATE_TIME, mesg, dxpl_id) < 0)
+ if(H5O_msg_write(&ext_loc, id, H5O_MSG_FLAG_DONTSHARE, H5O_UPDATE_TIME, mesg, dxpl_id) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "unable to write the message in object header")
} /* end else */
diff --git a/src/H5Gpkg.h b/src/H5Gpkg.h
index 1e8ad31..c994da3 100644
--- a/src/H5Gpkg.h
+++ b/src/H5Gpkg.h
@@ -312,6 +312,9 @@ typedef struct H5G_copy_file_ud_t {
*/
H5_DLLVAR H5B_class_t H5B_SNODE[1];
+/* The cache subclass */
+H5_DLLVAR const H5AC_class_t H5AC_SNODE[1];
+
/* The v2 B-tree class for indexing 'name' field on links */
H5_DLLVAR const H5B2_class_t H5G_BT2_NAME[1];
diff --git a/src/H5HFcache.c b/src/H5HFcache.c
index ffdac9a..302fe04 100644
--- a/src/H5HFcache.c
+++ b/src/H5HFcache.c
@@ -116,17 +116,14 @@ static herr_t H5HF__cache_dblock_free_icr(void *thing);
/* Debugging Function Prototypes */
#ifndef NDEBUG
-static herr_t H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, hid_t dxpl_id,
- H5HF_hdr_t *hdr, hbool_t *fd_clean, hbool_t *clean);
-static herr_t H5HF__cache_verify_iblock_descendants_clean(H5F_t *f,
- hid_t dxpl_id, haddr_t fd_parent_addr, H5HF_indirect_t *iblock,
- unsigned *iblock_status, hbool_t *fd_clean, hbool_t *clean);
-static herr_t H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f,
- haddr_t fd_parent_addr, H5HF_indirect_t *iblock, hbool_t *fd_clean,
- hbool_t *clean, hbool_t *has_dblocks);
-static herr_t H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f,
- hid_t dxpl_id, haddr_t fd_parent_addr, H5HF_indirect_t *iblock,
- hbool_t *fd_clean, hbool_t *clean, hbool_t *has_iblocks);
+static herr_t H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
+ hbool_t *clean);
+static herr_t H5HF__cache_verify_iblock_descendants_clean(H5F_t *f,
+ H5HF_indirect_t *iblock, unsigned *iblock_status, hbool_t *clean);
+static herr_t H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f,
+ H5HF_indirect_t *iblock, hbool_t *clean, hbool_t *has_dblocks);
+static herr_t H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f,
+ H5HF_indirect_t *iblock, hbool_t *clean, hbool_t *has_iblocks);
#endif /* NDEBUG */
@@ -690,7 +687,6 @@ H5HF__cache_hdr_pre_serialize(H5F_t *f, hid_t H5_ATTR_UNUSED dxpl_id,
#ifndef NDEBUG
{
hbool_t descendants_clean = TRUE;
- hbool_t fd_children_clean = TRUE;
/* Verify that flush dependencies are working correctly. Do this
* by verifying that either:
@@ -705,22 +701,10 @@ H5HF__cache_hdr_pre_serialize(H5F_t *f, hid_t H5_ATTR_UNUSED dxpl_id,
* constraint is met by default.
*
* Do this with a call to H5HF__cache_verify_hdr_descendants_clean().
- *
- * Note that decendants need not be clean if the pre_serialize call
- * is made during a cache serialization instead of an entry or cache
- * flush.
- *
- * Note also that with the recent change in the definition of flush
- * dependency, not all decendants need be clean -- only direct flush
- * dependency children.
- *
- * Finally, observe that the H5HF__cache_verify_hdr_descendants_clean()
- * call still looks for dirty descendants. At present we do not check
- * this value.
*/
- if(H5HF__cache_verify_hdr_descendants_clean((H5F_t *)f, dxpl_id, hdr, &fd_children_clean, &descendants_clean) < 0)
+ if(H5HF__cache_verify_hdr_descendants_clean((H5F_t *)f, hdr, &descendants_clean) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify hdr descendants clean.")
- HDassert(fd_children_clean);
+ HDassert(descendants_clean);
}
#endif /* NDEBUG */
@@ -1192,9 +1176,8 @@ H5HF__cache_iblock_image_len(const void *_thing, size_t *image_len)
* and if so, to move it to real file space before the entry is
* serialized.
*
- * In debug compiles, this function also verifies that all
- * immediate flush dependency children of this indirect block
- * are either clean or are not in cache.
+ * In debug compiles, this function also verifies that all children
+ * of this indirect block are either clean or are not in cache.
*
* Return: Success: SUCCEED
* Failure: FAIL
@@ -1234,12 +1217,10 @@ H5HF__cache_iblock_pre_serialize(H5F_t *f, hid_t dxpl_id, void *_thing,
#ifndef NDEBUG
{
hbool_t descendants_clean = TRUE;
- hbool_t fd_children_clean = TRUE;
unsigned iblock_status = 0;
/* verify that flush dependencies are working correctly. Do this
- * by verifying that all immediate flush dependency children of this
- * iblock are clean.
+ * by verifying that all children of this iblock are clean.
*/
if(H5AC_get_entry_status(f, iblock->addr, &iblock_status) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get iblock status")
@@ -1249,9 +1230,9 @@ H5HF__cache_iblock_pre_serialize(H5F_t *f, hid_t dxpl_id, void *_thing,
* there is no need to check to see if it is pinned or protected, or to
* protect it if it is not.
*/
- if(H5HF__cache_verify_iblock_descendants_clean((H5F_t *)f, dxpl_id, iblock->addr, iblock, &iblock_status, &fd_children_clean, &descendants_clean) < 0)
+ if(H5HF__cache_verify_iblock_descendants_clean((H5F_t *)f, iblock, &iblock_status, &descendants_clean) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify descendants clean.")
- HDassert(fd_children_clean);
+ HDassert(descendants_clean);
}
#endif /* NDEBUG */
@@ -2604,54 +2585,6 @@ done:
* instance of H5HF_hdr_t are clean. Set *clean to
* TRUE if this is the case, and to FALSE otherwise.
*
- * Update -- 8/24/15
- *
- * With the advent of the metadata cache image feature, it is
- * possible for the pre-serialize and serialize calls to be
- * invoked outside of a flush. While this serialization
- * observes flush dependencies for the order of serialization,
- * the entries are not written to disk, and hence dirty entries
- * remain dirty.
- *
- * To address this, updated the sanity checks in this function
- * to treat entries whose images are up to date as clean if
- * a cache serialization is in progress.
- *
- * Update -- 9/29/16
- *
- * The implementation of flush dependencies has been changed.
- * Prior to this change, a flush dependency parent could be
- * flushed if and only if all its flush dependency decendants
- * were clean. In the new definition, a flush dependency
- * parent can be flushed if all its immediate flush dependency
- * children are clean, regardless of any other dirty
- * decendants.
- *
- * Further, metadata cache entries are now allowed to have
- * multiple flush dependency parents.
- *
- * This means that the fractal heap is no longer ncessarily
- * flushed from the bottom up.
- *
- * For example, it is now possible for a dirty fractal heap
- * header to be flushed before a dirty dblock, as long as the
- * there in an interviening iblock, and the header has no
- * dirty immediate flush dependency children.
- *
- * Also, I gather that under some circumstances, a dblock
- * will be direct a flush dependency child both of the iblock
- * that points to it, and of the fractal heap header.
- *
- * As a result of these changes, the functionality of these
- * sanity checking routines has been modified significantly.
- * Instead of scanning the fractal heap from a starting point
- * down, and verifying that there were no dirty entries, the
- * functions now scan downward from the starting point and
- * verify that there are no dirty flush dependency children
- * of the specified flush dependency parent. In passing,
- * they also walk the data structure, and verify it.
- *
- *
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
@@ -2661,10 +2594,9 @@ done:
*/
#ifndef NDEBUG
static herr_t
-H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, hid_t dxpl_id,
- H5HF_hdr_t *hdr, hbool_t *fd_clean, hbool_t *clean)
+H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
+ hbool_t *clean)
{
- hbool_t fd_exists = FALSE; /* whether flush dependency exists. */
haddr_t hdr_addr; /* Address of header */
unsigned hdr_status = 0; /* Header cache entry status */
herr_t ret_value = SUCCEED; /* Return value */
@@ -2676,7 +2608,6 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, hid_t dxpl_id,
HDassert(hdr);
HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR);
- HDassert(fd_clean);
HDassert(clean);
hdr_addr = hdr->cache_info.addr;
HDassert(hdr_addr == hdr->heap_addr);
@@ -2741,165 +2672,15 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, hid_t dxpl_id,
root_iblock_in_cache = ( (root_iblock_status & H5AC_ES__IN_CACHE) != 0);
HDassert(root_iblock_in_cache || (root_iblock == NULL));
- if(!root_iblock_in_cache) { /* we are done */
- *clean = TRUE;
- *fd_clean = TRUE;
- } /* end if */
- else if((root_iblock_status & H5AC_ES__IS_DIRTY) &&
- (((root_iblock_status & H5AC_ES__IMAGE_IS_UP_TO_DATE) == 0) ||
- (!H5AC_get_serialization_in_progress(f)))) {
- *clean = FALSE;
-
- /* verify that a flush dependency exists between the header and
- * the root inode.
- */
- if(H5AC_flush_dependency_exists(f, hdr->heap_addr, root_iblock_addr, &fd_exists) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check flush dependency")
- HDassert(fd_exists);
-
- *fd_clean = FALSE;
- } /* end else-if */
- else { /* must examine children */
- hbool_t unprotect_root_iblock = FALSE;
-
- /* At this point, the root iblock may be pinned, protected,
- * both, or neither, and we may or may not have a pointer
- * to root iblock in memory.
- *
- * Before we call H5HF__cache_verify_iblock_descendants_clean(),
- * we must ensure that the root iblock is either pinned or
- * protected or both, and that we have a pointer to it.
- * Do this as follows:
- */
- if(root_iblock == NULL) { /* we don't have ptr to root iblock */
- if(0 == (root_iblock_status & H5AC_ES__IS_PROTECTED)) {
- /* just protect the root iblock -- this will give us
- * the pointer we need to proceed, and ensure that
- * it is locked into the metadata cache for the
- * duration.
- *
- * Note that the udata is only used in the load callback.
- * While the fractal heap makes heavy use of the udata
- * in this case, since we know that the entry is in cache,
- * we can pass NULL udata.
- *
- * The tag specified in the dxpl we received
- * as a parameter (via dxpl_id) may not be correct.
- * Grab the (hopefully) correct tag from the header,
- * and load it into the dxpl via the H5_BEGIN_TAG and
- * H5_END_TAG macros. Note that any error bracked by
- * these macros must be reported with HGOTO_ERROR_TAG.
- */
- H5_BEGIN_TAG(dxpl_id, hdr->heap_addr, FAIL)
-
- if(NULL == (root_iblock = (H5HF_indirect_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5AC__READ_ONLY_FLAG)))
- HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
-
- H5_END_TAG(FAIL)
-
- unprotect_root_iblock = TRUE;
- } /* end if */
- else {
- /* the root iblock is protected, and we have no
- * legitimate way of getting a pointer to it.
- *
- * We square this circle by using the
- * H5AC_get_entry_ptr_from_addr() to get the needed
- * pointer.
- *
- * WARNING: This call should be used only in debugging
- * routines, and it should be avoided there when
- * possible.
- *
- * Further, if we ever multi-thread the cache,
- * this routine will have to be either discarded
- * or heavily re-worked.
- *
- * Finally, keep in mind that the entry whose
- * pointer is obtained in this fashion may not
- * be in a stable state.
- *
- * Assuming that the flush dependency code is working
- * as it should, the only reason for the root iblock to
- * be unpinned is if none of its children are in cache.
- * This unfortunately means that if it is protected and
- * not pinned, the fractal heap is in the process of loading
- * or inserting one of its children. The obvious
- * implication is that there is a significant chance that
- * the root iblock is in an unstable state.
- *
- * All this suggests that using
- * H5AC_get_entry_ptr_from_addr() to obtain the pointer
- * to the protected root iblock is questionable here.
- * However, since this is test/debugging code, I expect
- * that we will use this approach until it causes problems,
- * or we think of a better way.
- */
- if(H5AC_get_entry_ptr_from_addr(f, root_iblock_addr, (void **)(&root_iblock)) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "H5AC_get_entry_ptr_from_addr() failed.")
- HDassert(root_iblock);
- } /* end else */
- } /* end if */
- else { /* root_iblock != NULL */
- /* we have the pointer to the root iblock. Protect it
- * if it is neither pinned nor protected -- otherwise we
- * are ready to go.
- */
- H5HF_indirect_t * iblock = NULL;
-
- if(((root_iblock_status & H5AC_ES__IS_PINNED) == 0) &&
- ((root_iblock_status & H5AC_ES__IS_PROTECTED) == 0)) {
- /* the root iblock is neither pinned nor protected -- hence
- * we must protect it before we proceed
- *
- * Note that the udata is only used in the load callback.
- * While the fractal heap makes heavy use of the udata
- * in this case, since we know that the entry is in cache,
- * we can pass NULL udata.
- *
- * The tag associated specified in the dxpl we received
- * as a parameter (via dxpl_id) may not be correct.
- * Grab the (hopefully) correct tag from the header,
- * and load it into the dxpl via the H5_BEGIN_TAG and
- * H5_END_TAG macros. Note that any error bracked by
- * these macros must be reported with HGOTO_ERROR_TAG.
- */
- H5_BEGIN_TAG(dxpl_id, hdr->heap_addr, FAIL)
-
- if(NULL == (iblock = (H5HF_indirect_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5AC__READ_ONLY_FLAG)))
- HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
-
- H5_END_TAG(FAIL)
-
- unprotect_root_iblock = TRUE;
- HDassert(iblock == root_iblock);
- } /* end if */
- } /* end else */
-
- /* at this point, one way or another, the root iblock is locked
- * in memory for the duration of the call. Do some sanity checks,
- * and then call H5HF__cache_verify_iblock_descendants_clean().
- */
- HDassert(root_iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(root_iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
-
- if(H5HF__cache_verify_iblock_descendants_clean(f, dxpl_id, hdr->heap_addr, root_iblock, &root_iblock_status, fd_clean, clean) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify root iblock & descendants clean.")
-
- /* Unprotect the root indirect block if required */
- if(unprotect_root_iblock) {
- HDassert(root_iblock);
- if(H5AC_unprotect(f, dxpl_id, H5AC_FHEAP_IBLOCK, root_iblock_addr, root_iblock, H5AC__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() faild.")
- } /* end if */
- } /* end else */
+ if(!root_iblock_in_cache) /* we are done */
+ *clean = TRUE;
+ else if(root_iblock_status & H5AC_ES__IS_DIRTY)
+ *clean = FALSE;
} /* end if */
else if((hdr->man_dtable.curr_root_rows == 0) &&
(HADDR_UNDEF != hdr->man_dtable.table_addr)) {
haddr_t root_dblock_addr;
unsigned root_dblock_status = 0;
- hbool_t in_cache;
- hbool_t type_ok;
/* this is scenario 2 -- we have a root dblock */
root_dblock_addr = hdr->man_dtable.table_addr;
@@ -2907,48 +2688,25 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, hid_t dxpl_id,
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get root dblock status")
if(root_dblock_status & H5AC_ES__IN_CACHE) {
- if(H5AC_verify_entry_type(f, root_dblock_addr, &H5AC_FHEAP_DBLOCK[0], &in_cache, &type_ok) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check dblock type")
- HDassert(in_cache);
- if(!type_ok)
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "root dblock addr doesn't refer to a dblock?!?")
-
/* If a root dblock is in cache, it must have a flush
- * dependency relationship with the header, and it
- * may not be the parent in any flush dependency
- * relationship.
- *
- * We don't test this fully, but we will verify that
- * the root iblock is a child in a flush dependency
- * relationship with the header.
+ * dependency relationship with the header.
*/
- if(H5AC_flush_dependency_exists(f, hdr->heap_addr, root_dblock_addr, &fd_exists) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check flush dependency")
- if(!fd_exists)
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "root dblock is not a flush dep parent of header.")
-
+ if(0 == (root_dblock_status & H5AC_ES__IS_FLUSH_DEP_CHILD))
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "root dblock in cache and not a flush dep child.")
if(0 != (root_dblock_status & H5AC_ES__IS_FLUSH_DEP_PARENT))
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "root dblock in cache and is a flush dep parent.")
- *clean = !((root_dblock_status & H5AC_ES__IS_DIRTY) &&
- (((root_dblock_status &
- H5AC_ES__IMAGE_IS_UP_TO_DATE) == 0) ||
- (!H5AC_get_serialization_in_progress(f))));
-
- *fd_clean = *clean;
- } /* end if */
- else { /* root dblock not in cache */
- *fd_clean = TRUE;
- *clean = TRUE;
- } /* end else */
+ if(root_dblock_status & H5AC_ES__IS_DIRTY)
+ *clean = FALSE;
+ } /* end if */
+ else /* root dblock not in cache */
+ *clean = TRUE;
} /* end else-if */
- else {
- /* this is scenario 3 -- the fractal heap is empty, and we
- * have nothing to do.
- */
- *fd_clean = TRUE;
- *clean = TRUE;
- } /* end else */
+ else
+ /* this is scenario 3 -- the fractal heap is empty, and we
+ * have nothing to do.
+ */
+ *clean = TRUE;
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -2983,40 +2741,6 @@ done:
* H5HF__cache_verify_descendant_iblocks_clean() are
* recursive co-routines.
*
- * Update -- 9/29/16
- *
- * The implementation of flush dependencies has been changed.
- * Prior to this change, a flush dependency parent could be
- * flushed if and only if all its flush dependency decendants
- * were clean. In the new definition, a flush dependency
- * parent can be flushed if all its immediate flush dependency
- * children are clean, regardless of any other dirty
- * decendants.
- *
- * Further, metadata cache entries are now allowed to have
- * multiple flush dependency parents.
- *
- * This means that the fractal heap is no longer ncessarily
- * flushed from the bottom up.
- *
- * For example, it is now possible for a dirty fractal heap
- * header to be flushed before a dirty dblock, as long as the
- * there in an interviening iblock, and the header has no
- * dirty immediate flush dependency children.
- *
- * Also, I gather that under some circumstances, a dblock
- * will be direct a flush dependency child both of the iblock
- * that points to it, and of the fractal heap header.
- *
- * As a result of these changes, the functionality of these
- * sanity checking routines has been modified significantly.
- * Instead of scanning the fractal heap from a starting point
- * down, and verifying that there were no dirty entries, the
- * functions now scan downward from the starting point and
- * verify that there are no dirty flush dependency children
- * of the specified flush dependency parent. In passing,
- * they also walk the data structure, and verify it.
- *
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
@@ -3026,9 +2750,8 @@ done:
*/
#ifndef NDEBUG
static herr_t
-H5HF__cache_verify_iblock_descendants_clean(H5F_t *f, hid_t dxpl_id,
- haddr_t fd_parent_addr, H5HF_indirect_t *iblock, unsigned *iblock_status,
- hbool_t * fd_clean, hbool_t *clean)
+H5HF__cache_verify_iblock_descendants_clean(H5F_t *f, H5HF_indirect_t *iblock,
+ unsigned *iblock_status, hbool_t *clean)
{
hbool_t has_dblocks = FALSE;
hbool_t has_iblocks = FALSE;
@@ -3038,19 +2761,17 @@ H5HF__cache_verify_iblock_descendants_clean(H5F_t *f, hid_t dxpl_id,
/* Sanity checks */
HDassert(f);
- HDassert(H5F_addr_defined(fd_parent_addr));
HDassert(iblock);
HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
HDassert(iblock_status);
- HDassert(fd_clean);
- HDassert(*fd_clean);
- HDassert(clean); /* note that *clean need not be TRUE */
+ HDassert(clean);
+ HDassert(*clean);
- if((*fd_clean) && H5HF__cache_verify_iblocks_dblocks_clean(f, fd_parent_addr, iblock, fd_clean, clean, &has_dblocks) < 0)
+ if((*clean) && H5HF__cache_verify_iblocks_dblocks_clean(f, iblock, clean, &has_dblocks) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify dblocks clean.")
- if((*fd_clean) && H5HF__cache_verify_descendant_iblocks_clean(f, dxpl_id, fd_parent_addr, iblock, fd_clean, clean, &has_iblocks) < 0)
+ if((*clean) && H5HF__cache_verify_descendant_iblocks_clean(f, iblock, clean, &has_iblocks) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify iblocks clean.")
/* verify that flush dependency setup is plausible */
@@ -3087,53 +2808,6 @@ done:
* during the call. Caller must ensure that this is
* the case before the call.
*
- * Update -- 8/24/15
- *
- * With the advent of the metadata cache image feature, it is
- * possible for the pre-serialize and serialize calls to be
- * invoked outside of a flush. While this serialization
- * observes flush dependencies for the order of serialization,
- * the entries are not written to disk, and hence dirty entries
- * remain dirty.
- *
- * To address this, updated the sanity checks in this function
- * to treat entries whose images are up to date as clean if
- * a cache serialization is in progress.
- *
- * Update -- 9/29/16
- *
- * The implementation of flush dependencies has been changed.
- * Prior to this change, a flush dependency parent could be
- * flushed if and only if all its flush dependency decendants
- * were clean. In the new definition, a flush dependency
- * parent can be flushed if all its immediate flush dependency
- * children are clean, regardless of any other dirty
- * decendants.
- *
- * Further, metadata cache entries are now allowed to have
- * multiple flush dependency parents.
- *
- * This means that the fractal heap is no longer ncessarily
- * flushed from the bottom up.
- *
- * For example, it is now possible for a dirty fractal heap
- * header to be flushed before a dirty dblock, as long as the
- * there in an interviening iblock, and the header has no
- * dirty immediate flush dependency children.
- *
- * Also, I gather that under some circumstances, a dblock
- * will be direct a flush dependency child both of the iblock
- * that points to it, and of the fractal heap header.
- *
- * As a result of these changes, the functionality of these
- * sanity checking routines has been modified significantly.
- * Instead of scanning the fractal heap from a starting point
- * down, and verifying that there were no dirty entries, the
- * functions now scan downward from the starting point and
- * verify that there are no dirty flush dependency children
- * of the specified flush dependency parent. In passing,
- * they also walk the data structure, and verify it.
- *
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
@@ -3143,82 +2817,53 @@ done:
*/
#ifndef NDEBUG
static herr_t
-H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f, haddr_t fd_parent_addr,
- H5HF_indirect_t *iblock, hbool_t *fd_clean, hbool_t *clean,
- hbool_t *has_dblocks)
+H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f, H5HF_indirect_t *iblock,
+ hbool_t *clean, hbool_t *has_dblocks)
{
unsigned num_direct_rows;
unsigned max_dblock_index;
unsigned i;
- haddr_t iblock_addr;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
/* Sanity checks */
HDassert(f);
- HDassert(H5F_addr_defined(fd_parent_addr));
HDassert(iblock);
HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
- HDassert(fd_clean);
- HDassert(*fd_clean);
- HDassert(clean); /* note that *clean need not be true */
+ HDassert(clean);
+ HDassert(*clean);
HDassert(has_dblocks);
i = 0;
num_direct_rows = MIN(iblock->nrows, iblock->hdr->man_dtable.max_direct_rows);
HDassert(num_direct_rows <= iblock->nrows);
max_dblock_index = (num_direct_rows * iblock->hdr->man_dtable.cparam.width) - 1;
- iblock_addr = iblock->addr;
- HDassert(H5F_addr_defined(iblock_addr));
-
- while((*fd_clean) && (i <= max_dblock_index)) {
+ while((*clean) && (i <= max_dblock_index)) {
haddr_t dblock_addr;
dblock_addr = iblock->ents[i].addr;
if(H5F_addr_defined(dblock_addr)) {
- hbool_t in_cache;
- hbool_t type_ok;
-
- if(H5AC_verify_entry_type(f, dblock_addr, &H5AC_FHEAP_DBLOCK[0], &in_cache, &type_ok) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check dblock type")
-
- if(in_cache) { /* dblock is in cache */
- hbool_t fd_exists;
- unsigned dblock_status = 0;
-
- if(!type_ok)
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "dblock addr doesn't refer to a dblock?!?")
-
- if(H5AC_get_entry_status(f, dblock_addr, &dblock_status) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get dblock status")
-
- HDassert(dblock_status & H5AC_ES__IN_CACHE);
+ unsigned dblock_status = 0;
+ if(H5AC_get_entry_status(f, dblock_addr, &dblock_status) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get dblock status")
+ if(dblock_status & H5AC_ES__IN_CACHE) {
*has_dblocks = TRUE;
- if((dblock_status & H5AC_ES__IS_DIRTY) &&
- (((dblock_status & H5AC_ES__IMAGE_IS_UP_TO_DATE) == 0) ||
- (!H5AC_get_serialization_in_progress(f)))) {
+ if(dblock_status & H5AC_ES__IS_DIRTY)
*clean = FALSE;
-
- if(H5AC_flush_dependency_exists(f, fd_parent_addr, dblock_addr, &fd_exists) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check flush dependency")
-
- if(fd_exists)
- *fd_clean = FALSE;
- } /* end if */
- /* If a child dblock is in cache, it must have a flush
- * dependency relationship with this iblock. Test this
- * here.
+ /* If a child dblock is in cache, it must have a flush
+ * dependency relationship with this iblock, and it
+ * may not be the parent in any flush dependency
+ * relationship.
*/
- if(H5AC_flush_dependency_exists(f, iblock_addr, dblock_addr, &fd_exists) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check flush dependency")
-
- if(!fd_exists)
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "dblock in cache and not a flush dep child of iblock.")
+ if(0 == (dblock_status & H5AC_ES__IS_FLUSH_DEP_CHILD))
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "dblock in cache and not a flush dep child.")
+ if(0 != (dblock_status & H5AC_ES__IS_FLUSH_DEP_PARENT))
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "dblock in cache and is a flush dep parent.")
} /* end if */
} /* end if */
@@ -3251,54 +2896,6 @@ done:
* during the call. Caller must ensure that this is
* the case before the call.
*
- * Update -- 8/24/15
- *
- * With the advent of the metadata cache image feature, it is
- * possible for the pre-serialize and serialize calls to be
- * invoked outside of a flush. While this serialization
- * observes flush dependencies for the order of serialization,
- * the entries are not written to disk, and hence dirty entries
- * remain dirty.
- *
- * To address this, updated the sanity checks in this function
- * to treat entries whose images are up to date as clean if
- * a cache serialization is in progress.
- *
- * Update -- 9/29/16
- *
- * The implementation of flush dependencies has been changed.
- * Prior to this change, a flush dependency parent could be
- * flushed if and only if all its flush dependency decendants
- * were clean. In the new definition, a flush dependency
- * parent can be flushed if all its immediate flush dependency
- * children are clean, regardless of any other dirty
- * decendants.
- *
- * Further, metadata cache entries are now allowed to have
- * multiple flush dependency parents.
- *
- * This means that the fractal heap is no longer ncessarily
- * flushed from the bottom up.
- *
- * For example, it is now possible for a dirty fractal heap
- * header to be flushed before a dirty dblock, as long as the
- * there in an interviening iblock, and the header has no
- * dirty immediate flush dependency children.
- *
- * Also, I gather that under some circumstances, a dblock
- * will be direct a flush dependency child both of the iblock
- * that points to it, and of the fractal heap header.
- *
- * As a result of these changes, the functionality of these
- * sanity checking routines has been modified significantly.
- * Instead of scanning the fractal heap from a starting point
- * down, and verifying that there were no dirty entries, the
- * functions now scan downward from the starting point and
- * verify that there are no dirty flush dependency children
- * of the specified flush dependency parent. In passing,
- * they also walk the data structure, and verify it.
- *
- *
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
@@ -3308,38 +2905,33 @@ done:
*/
#ifndef NDEBUG
static herr_t
-H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, hid_t dxpl_id,
- haddr_t fd_parent_addr, H5HF_indirect_t *iblock, hbool_t *fd_clean,
+H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, H5HF_indirect_t *iblock,
hbool_t *clean, hbool_t *has_iblocks)
{
unsigned first_iblock_index;
unsigned last_iblock_index;
unsigned num_direct_rows;
unsigned i;
- haddr_t iblock_addr;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
/* Sanity checks */
HDassert(f);
- HDassert(H5F_addr_defined(fd_parent_addr));
HDassert(iblock);
HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
- HDassert(fd_clean);
- HDassert(*fd_clean);
- HDassert(clean); /* note that *clean need not be true */
+ HDassert(clean);
+ HDassert(*clean);
HDassert(has_iblocks);
num_direct_rows = MIN(iblock->nrows, iblock->hdr->man_dtable.max_direct_rows);
HDassert(num_direct_rows <= iblock->nrows);
- iblock_addr = iblock->addr;
first_iblock_index = num_direct_rows * iblock->hdr->man_dtable.cparam.width;
last_iblock_index = (iblock->nrows * iblock->hdr->man_dtable.cparam.width) - 1;
i = first_iblock_index;
- while((*fd_clean) && (i <= last_iblock_index)) {
+ while((*clean) && (i <= last_iblock_index)) {
haddr_t child_iblock_addr = iblock->ents[i].addr;
if(H5F_addr_defined(child_iblock_addr)) {
@@ -3349,157 +2941,9 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, hid_t dxpl_id,
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get iblock status")
if(child_iblock_status & H5AC_ES__IN_CACHE) {
- hbool_t fd_exists;
-
*has_iblocks = TRUE;
-
- if((child_iblock_status & H5AC_ES__IS_DIRTY) &&
- (((child_iblock_status & H5AC_ES__IMAGE_IS_UP_TO_DATE) == 0) ||
- (!H5AC_get_serialization_in_progress(f)))) {
-
- *clean = FALSE;
-
- if(H5AC_flush_dependency_exists(f, fd_parent_addr, child_iblock_addr, &fd_exists) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check flush dependency")
-
- if(fd_exists)
- *fd_clean = FALSE;
- } /* end if */
-
- /* if the child iblock is in cache and *fd_clean is TRUE,
- * we must continue to explore down the fractal heap tree
- * structure to verify that all descendant blocks that are
- * flush dependency children of the entry at parent_addr are
- * either clean, or not in the metadata cache. We do this
- * with a recursive call to
- * H5HF__cache_verify_iblock_descendants_clean().
- * However, we can't make this call unless the child iblock
- * is somehow locked into the cache -- typically via either
- * pinning or protecting.
- *
- * If the child iblock is pinned, we can look up its pointer
- * on the current iblock's pinned child iblock list, and
- * and use that pointer in the recursive call.
- *
- * If the entry is unprotected and unpinned, we simply
- * protect it.
- *
- * If, however, the the child iblock is already protected,
- * but not pinned, we have a bit of a problem, as we have
- * no legitimate way of looking up its pointer in memory.
- *
- * To solve this problem, I have added a new metadata cache
- * call to obtain the pointer.
- *
- * WARNING: This call should be used only in debugging
- * routines, and it should be avoided there when
- * possible.
- *
- * Further, if we ever multi-thread the cache,
- * this routine will have to be either discarded
- * or heavily re-worked.
- *
- * Finally, keep in mind that the entry whose
- * pointer is obtained in this fashion may not
- * be in a stable state.
- *
- * Assuming that the flush dependency code is working
- * as it should, the only reason for the child entry to
- * be unpinned is if none of its children are in cache.
- * This unfortunately means that if it is protected and
- * not pinned, the fractal heap is in the process of loading
- * or inserting one of its children. The obvious implication
- * is that there is a significant chance that the child
- * iblock is in an unstable state.
- *
- * All this suggests that using the new call to obtain the
- * pointer to the protected child iblock is questionable
- * here. However, since this is test/debugging code, I
- * expect that we will use this approach until it causes
- * problems, or we think of a better way.
- */
- if(*fd_clean) {
- H5HF_indirect_t *child_iblock = NULL;
- hbool_t unprotect_child_iblock = FALSE;
-
- if(0 == (child_iblock_status & H5AC_ES__IS_PINNED)) {
- /* child iblock is not pinned */
- if(0 == (child_iblock_status & H5AC_ES__IS_PROTECTED)) {
- /* child iblock is unprotected, and unpinned */
- /* protect it. Note that the udata is only */
- /* used in the load callback. While the */
- /* fractal heap makes heavy use of the udata */
- /* in this case, since we know that the */
- /* entry is in cache, we can pass NULL udata */
- /* */
- /* The tag associated specified in the dxpl */
- /* we received as a parameter (via dxpl_id) */
- /* may not be correct. */
- /* */
- /* Grab the (hopefully) correct tag from the */
- /* parent iblock, and load it into the dxpl */
- /* via the H5_BEGIN_TAG and H5_END_TAG */
- /* macros. Note that any error bracked by */
- /* these macros must be reported with */
- /* HGOTO_ERROR_TAG. */
-
- H5_BEGIN_TAG(dxpl_id, iblock->hdr->heap_addr, FAIL)
-
- if(NULL == (child_iblock = (H5HF_indirect_t *) H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, child_iblock_addr, NULL, H5AC__READ_ONLY_FLAG)))
- HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
-
- H5_END_TAG(FAIL)
-
- unprotect_child_iblock = TRUE;
- } /* end if */
- else {
- /* child iblock is protected -- use */
- /* H5AC_get_entry_ptr_from_addr() to get a */
- /* pointer to the entry. This is very slimy -- */
- /* come up with a better solution. */
- if(H5AC_get_entry_ptr_from_addr(f, child_iblock_addr, (void **)(&child_iblock)) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "H5AC_get_entry_ptr_from_addr() faild.")
- HDassert(child_iblock);
- } /* end else */
- } /* end if */
- else {
- /* child iblock is pinned -- look it up in the */
- /* parent iblocks child_iblocks array. */
- HDassert(iblock->child_iblocks);
- child_iblock = iblock->child_iblocks[i - first_iblock_index];
- } /* end else */
-
- /* At this point, one way or another we should have
- * a pointer to the child iblock. Verify that we
- * that we have the correct one.
- */
- HDassert(child_iblock);
- HDassert(child_iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(child_iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
- HDassert(child_iblock->addr == child_iblock_addr);
-
- /* now make the recursive call */
- if(H5HF__cache_verify_iblock_descendants_clean(f, dxpl_id, fd_parent_addr, child_iblock, &child_iblock_status, fd_clean, clean) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify child iblock clean.")
-
- /* if iblock_addr != fd_parent_addr, verify that a flush
- * dependency relationship exists between iblock and
- * the child iblock.
- */
- if(fd_parent_addr != iblock_addr) {
- if(H5AC_flush_dependency_exists(f, iblock_addr, child_iblock_addr, &fd_exists) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check flush dependency")
-
- if(!fd_exists)
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "iblock is not a flush dep parent of child_iblock.")
- } /* end if */
-
- /* if we protected the child iblock, unprotect it now */
- if(unprotect_child_iblock) {
- if(H5AC_unprotect(f, dxpl_id, H5AC_FHEAP_IBLOCK, child_iblock_addr, child_iblock, H5AC__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() faild.")
- } /* end if */
- } /* end if */
+ if(child_iblock_status & H5AC_ES__IS_DIRTY)
+ *clean = FALSE;
} /* end if */
} /* end if */
diff --git a/src/H5HFpkg.h b/src/H5HFpkg.h
index 9c1d9a6..6abae65 100644
--- a/src/H5HFpkg.h
+++ b/src/H5HFpkg.h
@@ -559,6 +559,15 @@ typedef struct H5HF_dblock_cache_ud_t {
/* Package Private Variables */
/*****************************/
+/* H5HF header inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_FHEAP_HDR[1];
+
+/* H5HF indirect block inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_FHEAP_IBLOCK[1];
+
+/* H5HF direct block inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_FHEAP_DBLOCK[1];
+
/* The v2 B-tree class for tracking indirectly accessed 'huge' objects */
H5_DLLVAR const H5B2_class_t H5HF_HUGE_BT2_INDIR[1];
diff --git a/src/H5HGpkg.h b/src/H5HGpkg.h
index a5c80ea..e566ece 100644
--- a/src/H5HGpkg.h
+++ b/src/H5HGpkg.h
@@ -40,6 +40,9 @@
/* Package Private Variables */
/*****************************/
+/* The cache subclass */
+H5_DLLVAR const H5AC_class_t H5AC_GHEAP[1];
+
/* Declare extern the free list to manage the H5HG_t struct */
H5FL_EXTERN(H5HG_heap_t);
diff --git a/src/H5HLpkg.h b/src/H5HLpkg.h
index 06db696..7075b2a 100644
--- a/src/H5HLpkg.h
+++ b/src/H5HLpkg.h
@@ -39,6 +39,12 @@
/* Package Private Variables */
/*****************************/
+/* The local heap prefix cache subclass */
+H5_DLLVAR const H5AC_class_t H5AC_LHEAP_PRFX[1];
+
+/* The local heap data block cache subclass */
+H5_DLLVAR const H5AC_class_t H5AC_LHEAP_DBLK[1];
+
/* Declare extern the free list to manage the H5HL_free_t struct */
H5FL_EXTERN(H5HL_free_t);
diff --git a/src/H5MF.c b/src/H5MF.c
index 23f128f..3ed6d28 100644
--- a/src/H5MF.c
+++ b/src/H5MF.c
@@ -273,7 +273,7 @@ H5MF__alloc_open(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type)
/* Open an existing free space structure for the file */
if(NULL == (f->shared->fs_man[type] = H5FS_open(f, dxpl_id, f->shared->fs_addr[type],
NELMTS(classes), classes, f, f->shared->alignment, f->shared->threshold)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize free space info")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize free space info")
/* Set the state for the free space manager to "open", if it is now */
if(f->shared->fs_man[type])
@@ -419,7 +419,7 @@ H5MF__alloc_close(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type)
/* Close an existing free space structure for the file */
if(H5FS_close(f, dxpl_id, f->shared->fs_man[type]) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't release free space info")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTRELEASE, FAIL, "can't release free space info")
f->shared->fs_man[type] = NULL;
f->shared->fs_state[type] = H5F_FS_STATE_CLOSED;
@@ -548,7 +548,7 @@ HDfprintf(stderr, "%s: Check 2.0\n", FUNC);
/* Allocate from the metadata aggregator (or the VFD) */
if(HADDR_UNDEF == (ret_value = H5MF_aggr_vfd_alloc(f, alloc_type, dxpl_id, size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "allocation failed from aggr/vfd")
+ HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, HADDR_UNDEF, "allocation failed from aggr/vfd")
done:
/* Reset the ring in the DXPL */
@@ -663,7 +663,7 @@ HDfprintf(stderr, "%s: Entering - alloc_type = %u, addr = %a, size = %Hu\n", FUN
/* check arguments */
HDassert(f);
if(!H5F_addr_defined(addr) || 0 == size)
- HGOTO_DONE(SUCCEED)
+ HGOTO_DONE(SUCCEED);
HDassert(addr != 0); /* Can't deallocate the superblock :-) */
/* Check for attempting to free space that's a 'temporary' file address */
@@ -712,7 +712,7 @@ HDfprintf(stderr, "%s: Trying to avoid starting up free space manager\n", FUNC);
#endif /* H5MF_ALLOC_DEBUG_MORE */
/* Try to shrink the file or absorb the block into a block aggregator */
if((status = H5MF_try_shrink(f, alloc_type, dxpl_id, addr, size)) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTMERGE, FAIL, "can't check for absorbing block")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTMERGE, FAIL, "can't check for absorbing block")
else if(status > 0)
/* Indicate success */
HGOTO_DONE(SUCCEED)
@@ -1222,7 +1222,7 @@ H5MF__close_shrink_eoa(H5F_t *f, hid_t dxpl_id)
if(f->shared->fs_man[type]) {
udata.alloc_type = type;
if((status = H5FS_sect_try_shrink_eoa(f, dxpl_id, f->shared->fs_man[type], &udata)) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't check for shrinking eoa")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTSHRINK, FAIL, "can't check for shrinking eoa")
else if(status > 0)
eoa_shrank = TRUE;
} /* end if */
@@ -1251,18 +1251,18 @@ done:
* Purpose: Handle any tasks required before the metadata cache
* can serialize or flush the raw data free space manager
* and any metadata free space managers that reside in the
- * raw data free space manager ring.
+ * raw data free space manager ring.
*
* Specifically, any metadata managers that DON'T handle
- * space allocation for free space manager header or section
- * info will reside in the raw data free space manager ring.
- * As of this writing, the plan is to move to only two free space
+ * space allocation for free space manager header or section
+ * info will reside in the raw data free space manager ring.
+ * As of this writing, the plan is to move to only two free space
* managers, one for raw data and one for metadata -- which
* means that only the raw data free space manager will reside
* in the free space manager ring. However, this has not been
* fully implemented yet, so this code must support the
* possibilty of multiple metadata free space managers, at most
- * two of which handle free space manager header or section info,
+ * two of which handle free space manager header or section info,
* and thus reside in the metadata free space manager ring.
*
* At present, the task list is:
@@ -1271,20 +1271,20 @@ done:
*
* a) Free both aggregators. Space not at EOA will be
* added to the appropriate free space manager.
- *
+ *
* The raw data aggregator should not be restarted
* after this point. It is possible that the metadata
* aggregator will be.
- *
+ *
* b) Free all file space currently allocated to free
- * space managers.
+ * space managers.
*
* c) Delete the free space manager superblock
* extension message if allocated.
*
* This done, reduce the EOA by moving it to just before
* the last piece of free memory in the file.
- *
+ *
* 2) Ensure that space is allocated for the free space
* manager superblock extension message. Must do this
* now, before reallocating file space for free space
@@ -1309,7 +1309,6 @@ done:
* We will allocate space for free space managers involved
* in the allocation of file space for free space managers
* in H5MF_settle_meta_data_fsm()
- *
* Return: SUCCEED/FAIL
*
* Programmer: John Mainzer
@@ -1349,16 +1348,16 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
/* a) Free the space in aggregators:
*
- * (for space not at EOF, it may be put into free space managers)
+ * (for space not at EOF, it may be put into free space managers)
*
- * Do this now so that the raw data FSM (and any other FSM that isn't
+ * Do this now so that the raw data FSM (and any other FSM that isn't
* involved in space allocation for FSMs) will have no further activity.
*
* Note that while the raw data aggregator should not be restarted during
* the close process, this need not be the case for the metadata aggregator.
*/
if(H5MF_free_aggrs(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "can't free aggregators")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, "can't free aggregators")
/* Set the ring type in the DXPL. In most cases, we will
* need H5AC_RING_MDFSM first, so initialy set the ring in
@@ -1366,29 +1365,29 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
* needed.
*/
if(H5AC_set_ring(dxpl_id, H5AC_RING_MDFSM, &dxpl, &orig_ring) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value(0)")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTSET, FAIL, "unable to set ring value(0)")
reset_ring = TRUE;
curr_ring = H5AC_RING_MDFSM;
/* b) Free the file space (if any) allocated to each free space manager.
*
- * Do this to facilitate reduction of the size of the file to the
- * extent possible. We will re-allocate space to free space managers
+ * Do this to facilitate reduction of the size of the file to the
+ * extent possible. We will re-allocate space to free space managers
* that have free space to save after this reduction.
*
* In the case of the raw data free space manager, and any other free
* space manager that does not allocate space for free space managers,
* allocations should be complete at this point, as all raw data should
* have space allocated and be flushed to file at this point. Thus we
- * can examine such free space managers and only re-allocate space for
+ * can examine such free space managers and only re-allocate space for
* them if they contain free space. Do this later in this function after
* the EOA has been reduced to the extent possible.
*
- * For free space managers that allocate file space for free space
- * managers (usually just a single metadata free space manager, but for
- * now at least, free space managers for different types of metadata
+ * For free space managers that allocate file space for free space
+ * managers (usually just a single metadata free space manager, but for
+ * now at least, free space managers for different types of metadata
* are possible), the matter is more ticklish due to the self-
- * referential nature of the problem. These FSMs are dealt with in
+ * referential nature of the problem. These FSMs are dealt with in
* H5MF_settle_meta_data_fsm().
*/
for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type)) {
@@ -1404,8 +1403,8 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
if(!fsm_visited[fsm_type]) {
fsm_visited[fsm_type] = TRUE;
- /* If there is no active FSM for this type, but such a FSM has
- * space allocated in file, open it so that we can free its file
+ /* If there is no active FSM for this type, but such a FSM has
+ * space allocated in file, open it so that we can free its file
* space.
*/
if(NULL == f->shared->fs_man[fsm_type]) {
@@ -1415,7 +1414,7 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
/* Start up FSM for the file memory type */
if(H5MF__alloc_open(f, dxpl_id, fsm_type) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize file free space manager")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTINIT, FAIL, "can't initialize file free space manager")
fsm_opened[fsm_type] = TRUE;
} /* end if */
} /* end if */
@@ -1430,14 +1429,14 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
else
needed_ring = H5AC_RING_RDFSM;
if(needed_ring != curr_ring) {
- if(H5AC_set_ring(dxpl_id, needed_ring, &dxpl, &curr_ring) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ if(H5AC_set_ring(dxpl_id, needed_ring, &dxpl, &curr_ring)< 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTSET, FAIL, "unable to set ring value.")
curr_ring = needed_ring;
} /* end if */
/* Query free space manager info for this type */
if(H5FS_stat_info(f, f->shared->fs_man[fsm_type], &fs_stat) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't get free-space info")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTRELEASE, FAIL, "can't get free-space info")
/* Check if the free space manager has space in the file */
if(H5F_addr_defined(fs_stat.addr) || H5F_addr_defined(fs_stat.sect_addr)) {
@@ -1446,7 +1445,7 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
* any free space.
*/
if(H5FS_free(f, f->shared->fs_man[fsm_type], dxpl_id) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't release free-space headers")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTRELEASE, FAIL, "can't release free-space headers")
f->shared->fs_addr[fsm_type] = HADDR_UNDEF;
} /* end if */
} /* end if */
@@ -1457,54 +1456,54 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
} /* end if */
} /* end for */
- /* c) Delete the free space manager superblock extension message
+ /* c) Delete the free space manager superblock extension message
* if allocated.
*
* Must do this since the routine that writes / creates superblock
- * extension messages will choke if the target message is
+ * extension messages will choke if the target message is
* unexpectedly either absent or present.
*/
if(H5F_addr_defined(f->shared->sblock->ext_addr))
if(H5F_super_ext_remove_msg(f, dxpl_id, H5O_FSINFO_ID) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "error in removing message from superblock extension")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTRELEASE, FAIL, "error in removing message from superblock extension")
/* As the final element in 1), shrink the EOA for the file */
if(H5MF__close_shrink_eoa(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't shrink eoa")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTSHRINK, FAIL, "can't shrink eoa")
-
- /* 2) Ensure that space is allocated for the free space manager superblock
- * extension message. Must do this now, before reallocating file space
+
+ /* 2) Ensure that space is allocated for the free space manager superblock
+ * extension message. Must do this now, before reallocating file space
* for free space managers, as it is possible that this allocation may
- * grab the last section in a FSM -- making it unnecessary to
+ * grab the last section in a FSM -- making it unnecessary to
* re-allocate file space for it.
*
* Do this by writing a free space manager superblock extension message.
- *
- * Since no free space manager has file space allocated for it, this
+ *
+ * Since no free space manager has file space allocated for it, this
* message must be invalid since we can't save addresses of FSMs when
* those addresses are unknown. This is OK -- we will write the correct
* values to the message at free space manager shutdown.
*/
- for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type))
- fsinfo.fs_addr[type - 1] = HADDR_UNDEF;
+ for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type))
+ fsinfo.fs_addr[type-1] = HADDR_UNDEF;
fsinfo.strategy = f->shared->fs_strategy;
fsinfo.threshold = f->shared->fs_threshold;
- if(H5F_super_ext_write_msg(f, dxpl_id, H5O_FSINFO_ID, &fsinfo, TRUE, H5O_MSG_NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_WRITEERROR, FAIL, "error in writing fsinfo message to superblock extension")
+ if(H5F_super_ext_write_msg(f, dxpl_id, H5O_FSINFO_ID, &fsinfo, TRUE) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_WRITEERROR, FAIL, "error in writing message to superblock extension")
/* 3) Scan all free space managers not involved in allocating
* space for free space managers. For each such free space
- * manager, test to see if it contains free space. If
+ * manager, test to see if it contains free space. If
* it does, allocate file space for its header and section
- * data. If it contains no free space, leave it without
- * allocated file space as there is no need to save it to
+ * data. If it contains no free space, leave it without
+ * allocated file space as there is no need to save it to
* file.
*
* Note that all free space managers in this class should
- * see no further space allocations / deallocations as
- * at this point, all raw data allocations should be
+ * see no further space allocations / deallocations as
+ * at this point, all raw data allocations should be
* finalized, as should all metadata allocations not involving
* free space managers.
*
@@ -1530,11 +1529,11 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
if(needed_ring != curr_ring) {
if(H5AC_set_ring(dxpl_id, needed_ring, &dxpl, &curr_ring)< 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTSET, FAIL, "unable to set ring value.")
curr_ring = needed_ring;
} /* end if */
- /* Since there can be a many-to-one mapping from memory types
+ /* Since there can be a many-to-one mapping from memory types
* to free space managers, ensure that we don't visit any FSM
* more than once.
*/
@@ -1556,10 +1555,10 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
/* Query free space manager info for this type */
if(H5FS_stat_info(f, f->shared->fs_man[fsm_type], &fs_stat) < 0 )
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't get free-space info")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTRELEASE, FAIL, "can't get free-space info")
- /* If the free space manager contains section info,
- * allocate space for the header and sinfo (note that
+ /* If the free space manager contains section info,
+ * allocate space for the header and sinfo (note that
* space must not be allocated at present -- verify
* verify this with assertions).
*/
@@ -1569,18 +1568,18 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
/* Allocate FSM header */
if(H5FS_alloc_hdr(f, f->shared->fs_man[fsm_type], &f->shared->fs_addr[fsm_type], dxpl_id) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't allocated free-space header")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTALLOC, FAIL, "can't allocated free-space header")
/* Allocate FSM section info */
HDassert(!H5F_addr_defined(fs_stat.sect_addr));
HDassert(fs_stat.alloc_sect_size == 0);
if(H5FS_alloc_sect(f, f->shared->fs_man[fsm_type], dxpl_id) < 0 )
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't allocate free-space section info")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTALLOC, FAIL, "can't allocate free-space section info")
#ifndef NDEBUG
/* Re-Query free space manager info for this type */
if(H5FS_stat_info(f, f->shared->fs_man[fsm_type], &fs_stat) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't get free-space info")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTRELEASE, FAIL, "can't get free-space info")
HDassert(H5F_addr_defined(fs_stat.addr));
HDassert(H5F_addr_defined(fs_stat.sect_addr));
@@ -1601,7 +1600,7 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
/* Close any opened FSMs */
if(fsm_opened[fsm_type]) {
if(H5MF__alloc_close(f, dxpl_id, fsm_type) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't close file free space manager")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTINIT, FAIL, "can't close file free space manager")
fsm_opened[fsm_type] = FALSE;
} /* end if */
} /* end if */
@@ -1619,7 +1618,7 @@ done:
/* Reset the ring in the DXPL */
if(reset_ring)
if(H5AC_reset_ring(dxpl, orig_ring) < 0)
- HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+ HDONE_ERROR(H5E_FSPACE, H5E_CANTSET, FAIL, "unable to set property value")
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* H5MF_settle_raw_data_fsm() */
@@ -1762,7 +1761,7 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
/* Set the ring in the dxpl appropriately for subsequent calls */
if(H5AC_set_ring(dxpl_id, H5AC_RING_MDFSM, &dxpl, &orig_ring) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTSET, FAIL, "unable to set ring value")
reset_ring = TRUE;
#ifndef NDEBUG
@@ -1770,7 +1769,7 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
if(hdr_fspace) {
/* Query free space manager info for this type */
if(H5FS_stat_info(f, hdr_fspace, &fs_stat) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't get free-space info")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get free-space info")
HDassert(!H5F_addr_defined(fs_stat.addr));
HDassert(!H5F_addr_defined(fs_stat.sect_addr));
@@ -1781,7 +1780,7 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
if((sinfo_fspace) && (hdr_fspace != sinfo_fspace)) {
/* Query free space manager info for this type */
if(H5FS_stat_info(f, sinfo_fspace, &fs_stat) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't get free-space info")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get free-space info")
HDassert(!H5F_addr_defined(fs_stat.addr));
HDassert(!H5F_addr_defined(fs_stat.sect_addr));
@@ -1789,32 +1788,32 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
} /* end if */
#endif /* NDEBUG */
- /* Free the space in the metadata aggregator. Do this via the
+ /* Free the space in the metadata aggregator. Do this via the
* H5MF_free_aggrs() call. Note that the raw data aggregator must
* have already been freed. Sanity checks for this?
*/
/* (for space not at EOF, it may be put into free space managers) */
if(H5MF_free_aggrs(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "can't free aggregators")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, "can't free aggregators")
/* Trying shrinking the EOA for the file */
if(H5MF__close_shrink_eoa(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't shrink eoa")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTSHRINK, FAIL, "can't shrink eoa")
/* ******************* PROBLEM: ********************
*
- * If the file has an alignement other than 1, and if
- * the EOA is not a multiple of this alignment, allocating sapce
- * for the section via the VFD info has the potential of generating
- * a fragment that will be added to the free space manager. This
+ * If the file has an alignement other than 1, and if
+ * the EOA is not a multiple of this alignment, allocating sapce
+ * for the section via the VFD info has the potential of generating
+ * a fragment that will be added to the free space manager. This
* of course undoes everything we have been doing here.
*
- * Need a way around this. Obvious solution is to force the EOA to
- * be a multiple of the alignment.
+ * Need a way around this. Obvious solution is to force the EOA to
+ * be a multiple of the alignment.
*
* Fortunately, alignment is typically 1, so this is a non-issue in
- * most cases. In cases where the alignment is not 1, for now we
- * have decided to drop the fragment on the floor.
+ * most cases. In cases where the alignment is not 1, for now we
+ * have decided to drop the fragment on the floor.
*
* Eventually, we should fix this by modifying the on disk representations
* of free space managers to allow for empty space, so as to bypass the
@@ -1824,40 +1823,40 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
/* HDassert(f->shared->alignment == 1); */
- /* The free space manager(s) that handle space allocations for free
- * space managers should be settled now, albeit without file space
+ /* The free space manager(s) that handle space allocations for free
+ * space managers should be settled now, albeit without file space
* allocated to them. To avoid the possibility of changing the sizes
- * of their section info blocks, allocate space for them now at the
+ * of their section info blocks, allocate space for them now at the
* end of file via H5FD_alloc().
*
- * In the past, this issue of allocating space without touching the
- * free space managers has been deal with by calling
- * H5MF_aggr_vfd_alloc(), which in turn calls H5MF_aggr_alloc().
- * This is problematic since (if I read the code correctly) it will
- * re-constitute the metadata aggregator, which will add any leftover
- * space to one of the free space managers when freed.
+ * In the past, this issue of allocating space without touching the
+ * free space managers has been deal with by calling
+ * H5MF_aggr_vfd_alloc(), which in turn calls H5MF_aggr_alloc().
+ * This is problematic since (if I read the code correctly) it will
+ * re-constitute the metadata aggregator, which will add any left
+ * over space to one of the free space managers when freed.
*
* This is a non-starter, since the entire objective is to settle the
* free space managers.
*
- * Hence the decision to call H5FD_alloc() directly.
- *
- * As discussed in PROBLEM above, if f->shared->alignment is not 1,
+ * Hence the decision to call H5FD_alloc() directly.
+ *
+ * As discussed in PROBLEM above, if f->shared->alignment is not 1,
* this has the possibility of generating a fragment of file space
* that would typically be inserted into one of the free space managers.
*
* This is isn't good, but due to schedule pressure, we will just drop
- * the fragment on the floor for now.
+ * the fragement on the floor for now.
*/
if(hdr_fspace)
if(H5FS_alloc_vfd_alloc_hdr_and_section_info(f, dxpl_id, hdr_fspace,
&(f->shared->fs_addr[hdr_fsm_alloc_type])) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't vfd allocate hdr FSM file space")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTALLOC, FAIL, "can't vfd allocate hdr FSM file space")
if(sinfo_fspace && (sinfo_fspace != hdr_fspace))
if(H5FS_alloc_vfd_alloc_hdr_and_section_info(f, dxpl_id, sinfo_fspace,
&(f->shared->fs_addr[sinfo_fsm_alloc_type])) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't vfd allocate sinfo FSM file space")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTALLOC, FAIL, "can't vfd allocate sinfo FSM file space")
/* Indicate that the FSM was settled successfully */
*fsm_settled = TRUE;
@@ -1866,7 +1865,7 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
done:
if(reset_ring)
if(H5AC_reset_ring(dxpl, orig_ring) < 0)
- HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+ HDONE_ERROR(H5E_FSPACE, H5E_CANTSET, FAIL, "unable to set property value")
FUNC_LEAVE_NOAPI(ret_value)
} /* H5MF_settle_meta_data_fsm() */
@@ -1928,7 +1927,7 @@ HDfprintf(stderr, "%s: Check 1.0 - f->shared->fs_man[%u] = %p, f->shared->fs_add
HDfprintf(stderr, "%s: Before closing free space manager\n", FUNC);
#endif /* H5MF_ALLOC_DEBUG_MORE */
if(H5FS_close(f, dxpl_id, f->shared->fs_man[type]) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't release free space info")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTRELEASE, FAIL, "can't release free space info")
f->shared->fs_man[type] = NULL;
f->shared->fs_state[type] = H5F_FS_STATE_CLOSED;
} /* end if */
@@ -1956,7 +1955,7 @@ HDfprintf(stderr, "%s: Before deleting free space manager\n", FUNC);
/* Delete free space manager for this type */
if(H5FS_delete(f, dxpl_id, tmp_fs_addr) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "can't delete free space manager")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, "can't delete free space manager")
/* Shift [back] to closed state */
HDassert(f->shared->fs_state[type] == H5F_FS_STATE_DELETING);
@@ -2114,7 +2113,7 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
fsinfo.threshold = f->shared->fs_threshold;
/* Write the free space manager message -- message must already exist */
- if(H5F_super_ext_write_msg(f, dxpl_id, H5O_FSINFO_ID, &fsinfo, FALSE, H5O_MSG_NO_FLAGS_SET) < 0)
+ if(H5F_super_ext_write_msg(f, dxpl_id, H5O_FSINFO_ID, &fsinfo, FALSE) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_WRITEERROR, FAIL, "error in writing message to superblock extension")
/* Final close of free-space managers */
@@ -2137,7 +2136,7 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
HDassert(f->shared->fs_state[type] == H5F_FS_STATE_OPEN);
if(H5FS_close(f, dxpl_id, f->shared->fs_man[type]) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't close free space manager")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTRELEASE, FAIL, "can't close free space manager")
f->shared->fs_man[type] = NULL;
f->shared->fs_state[type] = H5F_FS_STATE_CLOSED;
} /* end if */
diff --git a/src/H5MFaggr.c b/src/H5MFaggr.c
index 1510645..7b5a298 100644
--- a/src/H5MFaggr.c
+++ b/src/H5MFaggr.c
@@ -116,15 +116,15 @@ HDfprintf(stderr, "%s: alloc_type = %u, size = %Hu\n", FUNC, (unsigned)alloc_typ
/* Get the EOA for the file -- need for sanity check below */
if(HADDR_UNDEF == (eoa = H5F_get_eoa(f, alloc_type)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, HADDR_UNDEF, "Unable to get eoa")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, HADDR_UNDEF, "Unable to get eoa")
/* Check for overlap into temporary allocation space */
if(H5F_addr_gt((eoa + size), f->shared->tmp_addr))
- HGOTO_ERROR(H5E_RESOURCE, H5E_BADRANGE, HADDR_UNDEF, "hdr file space alloc will overlap into 'temporary' file space")
+ HGOTO_ERROR(H5E_FSPACE, H5E_BADRANGE, HADDR_UNDEF, "hdr file space alloc will overlap into 'temporary' file space")
/* Allocate space for the header */
if(HADDR_UNDEF == (ret_value = H5FD_alloc(f->shared->lf, dxpl_id, alloc_type, f, size, &eoa_frag_addr, &eoa_frag_size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "can't allocate file space for hdr")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTALLOC, HADDR_UNDEF, "can't allocate file space for hdr")
/* Sanity check for overlapping into file's temporary allocation space */
HDassert(H5F_addr_le((ret_value + size), f->shared->tmp_addr));
@@ -232,9 +232,9 @@ haddr_t
H5MF_aggr_alloc(H5F_t *f, hid_t dxpl_id, H5F_blk_aggr_t *aggr,
H5F_blk_aggr_t *other_aggr, H5FD_mem_t type, hsize_t size)
{
- haddr_t eoa_frag_addr = HADDR_UNDEF; /* Address of fragment at EOA */
- hsize_t eoa_frag_size = 0; /* Size of fragment at EOA */
- haddr_t eoa = HADDR_UNDEF; /* Initial EOA for the file */
+ haddr_t eoa_frag_addr = HADDR_UNDEF; /* Address of fragment at EOA */
+ hsize_t eoa_frag_size = 0; /* Size of fragment at EOA */
+ haddr_t eoa = HADDR_UNDEF; /* Initial EOA for the file */
haddr_t ret_value = HADDR_UNDEF; /* Return value */
FUNC_ENTER_NOAPI(HADDR_UNDEF)
@@ -543,8 +543,7 @@ H5MF_aggr_try_extend(H5F_t *f, hid_t dxpl_id, H5F_blk_aggr_t *aggr,
} /* end else-if */
} /* end else */
} /* end if */
- else {
- /* The aggreator is not at end of file */
+ else { /* The aggreator is not at end of file */
/* Check if aggregator has enough internal space to satisfy the extension. */
if(aggr->size >= extra_requested) {
/* Extend block into aggregator */
diff --git a/src/H5MFprivate.h b/src/H5MFprivate.h
index e258677..330fe80 100644
--- a/src/H5MFprivate.h
+++ b/src/H5MFprivate.h
@@ -51,13 +51,15 @@
/* File space manager routines */
H5_DLL herr_t H5MF_init_merge_flags(H5F_t *f);
-H5_DLL herr_t H5MF_get_freespace(H5F_t *f, hid_t dxpl_id, hsize_t *tot_space, hsize_t *meta_size);
+H5_DLL herr_t H5MF_get_freespace(H5F_t *f, hid_t dxpl_id, hsize_t *tot_space,
+ hsize_t *meta_size);
H5_DLL herr_t H5MF_close(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5MF_try_close(H5F_t *f, hid_t dxpl_id);
/* File space allocation routines */
H5_DLL haddr_t H5MF_alloc(H5F_t *f, H5FD_mem_t type, hid_t dxpl_id, hsize_t size);
-H5_DLL haddr_t H5MF_aggr_vfd_alloc(H5F_t *f, H5FD_mem_t type, hid_t dxpl_id, hsize_t size);
+H5_DLL haddr_t H5MF_aggr_vfd_alloc(H5F_t *f, H5FD_mem_t type, hid_t dxpl_id,
+ hsize_t size);
H5_DLL haddr_t H5MF_vfd_alloc(H5F_t *f, hid_t dxpl_id, H5FD_mem_t alloc_type,
hsize_t size, hbool_t keep_fragment);
H5_DLL herr_t H5MF_xfree(H5F_t *f, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
@@ -76,7 +78,7 @@ H5_DLL haddr_t H5MF_alloc_tmp(H5F_t *f, hsize_t size);
H5_DLL herr_t H5MF_free_aggrs(H5F_t *f, hid_t dxpl_id);
H5_DLL htri_t H5MF_aggrs_try_shrink_eoa(H5F_t *f, hid_t dxpl_id);
-/* Free space manager settling routines */
+/* Settling routines */
H5_DLL herr_t H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled);
H5_DLL herr_t H5MF_settle_meta_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled);
diff --git a/src/H5O.c b/src/H5O.c
index 1b58703..e0c0f0b 100644
--- a/src/H5O.c
+++ b/src/H5O.c
@@ -130,12 +130,11 @@ const H5O_msg_class_t *const H5O_msg_class_g[] = {
H5O_MSG_AINFO, /*0x0015 Attribute information */
H5O_MSG_REFCOUNT, /*0x0016 Object's ref. count */
H5O_MSG_FSINFO, /*0x0017 Free-space manager info */
- H5O_MSG_MDCI, /*0x0018 Metadata cache image */
- H5O_MSG_UNKNOWN, /*0x0019 Placeholder for unknown message */
+ H5O_MSG_UNKNOWN, /*0x0018 Placeholder for unknown message */
#ifdef H5O_ENABLE_BOGUS
- H5O_MSG_BOGUS_INVALID, /*0x001A "Bogus invalid" (for testing) */
+ H5O_MSG_BOGUS_INVALID, /*0x0019 "Bogus invalid" (for testing) */
#else /* H5O_ENABLE_BOGUS */
- NULL, /*0x001A "Bogus invalid" (for testing) */
+ NULL, /*0x0019 "Bogus invalid" (for testing) */
#endif /* H5O_ENABLE_BOGUS */
};
diff --git a/src/H5Opkg.h b/src/H5Opkg.h
index e1fa0be..ef49535 100644
--- a/src/H5Opkg.h
+++ b/src/H5Opkg.h
@@ -31,7 +31,7 @@
#define H5O_NMESGS 8 /*initial number of messages */
#define H5O_NCHUNKS 2 /*initial number of chunks */
#define H5O_MIN_SIZE 22 /* Min. obj header data size (must be big enough for a message prefix and a continuation message) */
-#define H5O_MSG_TYPES 27 /* # of types of messages */
+#define H5O_MSG_TYPES 26 /* # of types of messages */
#define H5O_MAX_CRT_ORDER_IDX 65535 /* Max. creation order index value */
/* Versions of object header structure */
@@ -419,6 +419,12 @@ typedef struct H5O_chk_cache_ud_t {
} H5O_chk_cache_ud_t;
+/* H5O object header inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_OHDR[1];
+
+/* H5O object header chunk inherits cache-like properties from H5AC */
+H5_DLLVAR const H5AC_class_t H5AC_OHDR_CHK[1];
+
/* Header message ID to class mapping */
H5_DLLVAR const H5O_msg_class_t *const H5O_msg_class_g[H5O_MSG_TYPES];
@@ -538,10 +544,7 @@ H5_DLLVAR const H5O_msg_class_t H5O_MSG_REFCOUNT[1];
/* Free-space Manager Info message. (0x0017) */
H5_DLLVAR const H5O_msg_class_t H5O_MSG_FSINFO[1];
-/* Metadata Cache Image message. (0x0018) */
-H5_DLLVAR const H5O_msg_class_t H5O_MSG_MDCI[1];
-
-/* Placeholder for unknown message. (0x0019) */
+/* Placeholder for unknown message. (0x0018) */
H5_DLLVAR const H5O_msg_class_t H5O_MSG_UNKNOWN[1];
diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h
index 71c512c..f6df874 100644
--- a/src/H5Oprivate.h
+++ b/src/H5Oprivate.h
@@ -69,7 +69,6 @@ typedef struct H5O_t H5O_t;
#define H5O_FIRST (-2) /* Operate on first message of type */
/* Flags needed when encoding messages */
-#define H5O_MSG_NO_FLAGS_SET 0x00u
#define H5O_MSG_FLAG_CONSTANT 0x01u
#define H5O_MSG_FLAG_SHARED 0x02u
#define H5O_MSG_FLAG_DONTSHARE 0x04u
@@ -205,8 +204,7 @@ typedef struct H5O_copy_t {
#define H5O_AINFO_ID 0x0015 /* Attribute info message. */
#define H5O_REFCOUNT_ID 0x0016 /* Reference count message. */
#define H5O_FSINFO_ID 0x0017 /* File space info message. */
-#define H5O_MDCI_MSG_ID 0x0018 /* Metadata Cache Image Message */
-#define H5O_UNKNOWN_ID 0x0019 /* Placeholder message ID for unknown message. */
+#define H5O_UNKNOWN_ID 0x0018 /* Placeholder message ID for unknown message. */
/* (this should never exist in a file) */
/*
* Note: Must increment H5O_MSG_TYPES in H5Opkg.h and update H5O_msg_class_g
@@ -216,7 +214,7 @@ typedef struct H5O_copy_t {
*
* (this should never exist in a file)
*/
-#define H5O_BOGUS_INVALID_ID 0x001A /* "Bogus invalid" Message. */
+#define H5O_BOGUS_INVALID_ID 0x0019 /* "Bogus invalid" Message. */
/* Shared object message types.
* Shared objects can be committed, in which case the shared message contains
@@ -796,16 +794,6 @@ typedef struct H5O_fsinfo_t {
haddr_t fs_addr[H5FD_MEM_NTYPES-1]; /* Addresses of free space managers */
} H5O_fsinfo_t;
-/*
- * Metadata Cache Image Message.
- * Contains base address and length of the metadata cache image.
- * (Data structure in memory)
- */
-typedef struct H5O_mdci_t {
- haddr_t addr; /* address of MDC image block */
- hsize_t size; /* size of MDC image block */
-} H5O_mdci_t;
-
/* Typedef for "application" iteration operations */
typedef herr_t (*H5O_operator_t)(const void *mesg/*in*/, unsigned idx,
void *operator_data/*in,out*/);
diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c
index 7ea0fe0..91a0e03 100644
--- a/src/H5Pfapl.c
+++ b/src/H5Pfapl.c
@@ -225,13 +225,6 @@
#define H5F_ACS_COLL_MD_WRITE_FLAG_ENC H5P__encode_hbool_t
#define H5F_ACS_COLL_MD_WRITE_FLAG_DEC H5P__decode_hbool_t
#endif /* H5_HAVE_PARALLEL */
-/* Definitions for the initial metadata cache image configuration */
-#define H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_SIZE sizeof(H5AC_cache_image_config_t)
-#define H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_DEF H5AC__DEFAULT_CACHE_IMAGE_CONFIG
-#define H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_ENC H5P__facc_cache_image_config_enc
-#define H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_DEC H5P__facc_cache_image_config_dec
-#define H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_CMP H5P__facc_cache_image_config_cmp
-
/******************/
/* Local Typedefs */
@@ -286,11 +279,6 @@ static herr_t H5P_facc_mdc_log_location_copy(const char *name, size_t size, void
static int H5P_facc_mdc_log_location_cmp(const void *value1, const void *value2, size_t size);
static herr_t H5P_facc_mdc_log_location_close(const char *name, size_t size, void *value);
-/* Metadata cache image property callbacks */
-static int H5P__facc_cache_image_config_cmp(const void *_config1, const void *_config2, size_t H5_ATTR_UNUSED size);
-static herr_t H5P__facc_cache_image_config_enc(const void *value, void **_pp, size_t *size);
-static herr_t H5P__facc_cache_image_config_dec(const void **_pp, void *_value);
-
/*********************/
/* Package Variables */
@@ -358,7 +346,6 @@ static const hbool_t H5F_def_evict_on_close_flag_g = H5F_ACS_EVICT_ON_CLOSE_FLAG
static const H5P_coll_md_read_flag_t H5F_def_coll_md_read_flag_g = H5F_ACS_COLL_MD_READ_FLAG_DEF; /* Default setting for the collective metedata read flag */
static const hbool_t H5F_def_coll_md_write_flag_g = H5F_ACS_COLL_MD_WRITE_FLAG_DEF; /* Default setting for the collective metedata write flag */
#endif /* H5_HAVE_PARALLEL */
-static const H5AC_cache_image_config_t H5F_def_mdc_initCacheImageCfg_g = H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_DEF; /* Default metadata cache image settings */
/*-------------------------------------------------------------------------
@@ -568,12 +555,6 @@ H5P__facc_reg_prop(H5P_genclass_t *pclass)
HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
#endif /* H5_HAVE_PARALLEL */
- /* Register the initial metadata cache image configuration */
- if(H5P_register_real(pclass, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_SIZE, &H5F_def_mdc_initCacheImageCfg_g,
- NULL, NULL, NULL, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_ENC, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_DEC,
- NULL, NULL, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_CMP, NULL) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
-
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5P__facc_reg_prop() */
@@ -1562,101 +1543,6 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5Pset_mdc_image_config
- *
- * Purpose: Set the initial metadata cache image configuration in the
- * target FAPL.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: J. Mainzer
- * Thursday, June 25, 2015
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5Pset_mdc_image_config(hid_t plist_id, H5AC_cache_image_config_t *config_ptr)
-{
- H5P_genplist_t *plist; /* Property list pointer */
- herr_t ret_value = SUCCEED; /* return value */
-
- FUNC_ENTER_API(FAIL)
- H5TRACE2("e", "i*x", plist_id, config_ptr);
-
- /* Get the plist structure */
- if(NULL == (plist = H5P_object_verify(plist_id,H5P_FILE_ACCESS)))
- HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
-
- /* validate the new configuration */
- if(H5AC_validate_cache_image_config(config_ptr) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid metadata cache image configuration")
-
- /* set the modified metadata cache image config */
-
- /* If we ever support multiple versions of H5AC_cache_image_config_t, we
- * will have to test the version and do translation here.
- */
-
- if(H5P_set(plist, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME, config_ptr) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set metadata cache image initial config")
-
-done:
- FUNC_LEAVE_API(ret_value)
-} /* H5Pset_mdc_image_config() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5Pget_mdc_image_config
- *
- * Purpose: Retrieve the metadata cache initial image configuration
- * from the target FAPL.
- *
- * Observe that the function will fail if config_ptr is
- * NULL, or if config_ptr->version specifies an unknown
- * version of H5AC_cache_image_config_t.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: J. Mainzer
- * Friday, June 26, 2015
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5Pget_mdc_image_config(hid_t plist_id, H5AC_cache_image_config_t *config_ptr)
-{
- H5P_genplist_t *plist; /* Property list pointer */
- herr_t ret_value = SUCCEED; /* return value */
-
- FUNC_ENTER_API(FAIL)
- H5TRACE2("e", "i*x", plist_id, config_ptr);
-
- /* Get the plist structure */
- if(NULL == (plist = H5P_object_verify(plist_id,H5P_FILE_ACCESS)))
- HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
-
- /* validate the config_ptr */
- if(config_ptr == NULL)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL config_ptr on entry.")
-
- if(config_ptr->version != H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Unknown image config version.")
-
- /* If we ever support multiple versions of H5AC_cache_config_t, we
- * will have to get the cannonical version here, and then translate
- * to the version of the structure supplied.
- */
-
- /* Get the current initial metadata cache resize configuration */
- if(H5P_get(plist, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME, config_ptr) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET,FAIL, "can't get metadata cache initial image config")
-
-done:
- FUNC_LEAVE_API(ret_value)
-} /* H5Pget_mdc_image_config() */
-
-
-/*-------------------------------------------------------------------------
* Function: H5Pset_mdc_config
*
* Purpose: Set the initial metadata cache resize configuration in the
@@ -2868,147 +2754,6 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5P__facc_cache_image_config_cmp
- *
- * Purpose: Compare two cache image configurations.
- *
- * Return: positive if VALUE1 is greater than VALUE2, negative if VALUE2 is
- * greater than VALUE1 and zero if VALUE1 and VALUE2 are equal.
- *
- * Programmer: John Mainzer
- * June 26, 2015
- *
- *-------------------------------------------------------------------------
- */
-static int
-H5P__facc_cache_image_config_cmp(const void *_config1, const void *_config2, size_t H5_ATTR_UNUSED size)
-{
- const H5AC_cache_image_config_t *config1 = (const H5AC_cache_image_config_t *)_config1; /* Create local aliases for values */
- const H5AC_cache_image_config_t *config2 = (const H5AC_cache_image_config_t *)_config2; /* Create local aliases for values */
- int ret_value = 0; /* Return value */
-
- FUNC_ENTER_STATIC_NOERR
-
- /* Check for a property being set */
- if(config1 == NULL && config2 != NULL) HGOTO_DONE(-1);
- if(config1 != NULL && config2 == NULL) HGOTO_DONE(1);
-
- if(config1->version < config2->version) HGOTO_DONE(-1);
- if(config1->version > config2->version) HGOTO_DONE(1);
-
- if(config1->generate_image < config2->generate_image) HGOTO_DONE(-1);
- if(config1->generate_image > config2->generate_image) HGOTO_DONE(1);
-
- if(config1->save_resize_status < config2->save_resize_status) HGOTO_DONE(-1);
- if(config1->save_resize_status > config2->save_resize_status) HGOTO_DONE(1);
-
- if(config1->entry_ageout < config2->entry_ageout) HGOTO_DONE(-1);
- if(config1->entry_ageout > config2->entry_ageout) HGOTO_DONE(1);
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5P__facc_cache_image_config_cmp() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5P__facc_cache_image_config_enc
- *
- * Purpose: Callback routine which is called whenever the default
- * cache image config property in the file creation
- * property list is encoded.
- *
- * Return: Success: Non-negative
- * Failure: Negative
- *
- * Programmer: John Mainzer
- * June 26, 2015
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5P__facc_cache_image_config_enc(const void *value, void **_pp, size_t *size)
-{
- const H5AC_cache_image_config_t *config = (const H5AC_cache_image_config_t *)value; /* Create local aliases for value */
- uint8_t **pp = (uint8_t **)_pp;
-
- FUNC_ENTER_STATIC_NOERR
-
- /* Sanity check */
- HDassert(value);
-
- if(NULL != *pp) {
- /* Encode type sizes (as a safety check) */
- *(*pp)++ = (uint8_t)sizeof(unsigned);
-
- INT32ENCODE(*pp, (int32_t)config->version);
-
- H5_ENCODE_UNSIGNED(*pp, config->generate_image);
-
- H5_ENCODE_UNSIGNED(*pp, config->save_resize_status);
-
- INT32ENCODE(*pp, (int32_t)config->entry_ageout);
- } /* end if */
-
- /* Compute encoded size of fixed-size values */
- *size += (1 + (2 * sizeof(unsigned)) + (2 * sizeof(int32_t)));
-
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* end H5P__facc_cache_image_config_enc() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5P__facc_cache_image_config_dec
- *
- * Purpose: Callback routine which is called whenever the default
- * cache image config property in the file creation property
- * list is decoded.
- *
- * Return: Success: Non-negative
- * Failure: Negative
- *
- * Programmer: John Mainzer
- * June 26, 2015
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5P__facc_cache_image_config_dec(const void **_pp, void *_value)
-{
- H5AC_cache_image_config_t *config = (H5AC_cache_image_config_t *)_value;
- const uint8_t **pp = (const uint8_t **)_pp;
- unsigned enc_size;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_STATIC
-
- /* Sanity checks */
- HDassert(pp);
- HDassert(*pp);
- HDassert(config);
- HDcompile_assert(sizeof(size_t) <= sizeof(uint64_t));
-
- /* Set property to default value */
- HDmemcpy(config, &H5F_def_mdc_initCacheImageCfg_g, sizeof(H5AC_cache_image_config_t));
-
- /* Decode type sizes */
- enc_size = *(*pp)++;
- if(enc_size != sizeof(unsigned))
- HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "unsigned value can't be decoded")
-
- INT32DECODE(*pp, config->version);
-
- H5_DECODE_UNSIGNED(*pp, config->generate_image);
-
- H5_DECODE_UNSIGNED(*pp, config->save_resize_status);
-
- INT32DECODE(*pp, config->entry_ageout);
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5P__facc_cache_image_config_dec() */
-
-
-/*-------------------------------------------------------------------------
* Function: H5P__facc_file_image_info_set
*
* Purpose: Copies a file image property when it's set for a property list
diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h
index 5aa8301..c736d7b 100644
--- a/src/H5Ppublic.h
+++ b/src/H5Ppublic.h
@@ -365,8 +365,6 @@ H5_DLL herr_t H5Pget_all_coll_metadata_ops(hid_t plist_id, hbool_t *is_collectiv
H5_DLL herr_t H5Pset_coll_metadata_write(hid_t plist_id, hbool_t is_collective);
H5_DLL herr_t H5Pget_coll_metadata_write(hid_t plist_id, hbool_t *is_collective);
#endif /* H5_HAVE_PARALLEL */
-H5_DLL herr_t H5Pset_mdc_image_config(hid_t plist_id, H5AC_cache_image_config_t *config_ptr);
-H5_DLL herr_t H5Pget_mdc_image_config(hid_t plist_id, H5AC_cache_image_config_t *config_ptr /*out*/);
/* Dataset creation property list (DCPL) routines */
H5_DLL herr_t H5Pset_layout(hid_t plist_id, H5D_layout_t layout);
diff --git a/src/H5SMpkg.h b/src/H5SMpkg.h
index 342543d..3b13e23 100644
--- a/src/H5SMpkg.h
+++ b/src/H5SMpkg.h
@@ -256,6 +256,8 @@ H5FL_ARR_EXTERN(H5SM_index_header_t);
H5FL_EXTERN(H5SM_list_t);
H5FL_ARR_EXTERN(H5SM_sohm_t);
+H5_DLLVAR const H5AC_class_t H5AC_SOHM_TABLE[1];
+H5_DLLVAR const H5AC_class_t H5AC_SOHM_LIST[1];
H5_DLLVAR const H5B2_class_t H5SM_INDEX[1];
/****************************/
diff --git a/src/H5win32defs.h b/src/H5win32defs.h
index b419f06..63c3a16 100644
--- a/src/H5win32defs.h
+++ b/src/H5win32defs.h
@@ -75,8 +75,8 @@ struct timezone {
#if (_MSC_VER < 1900)
struct timespec
{
- time_t tv_sec; /* Seconds - >= 0 */
- long tv_nsec; /* Nanoseconds - [0, 999999999] */
+ time_t tv_sec; // Seconds - >= 0
+ long tv_nsec; // Nanoseconds - [0, 999999999]
};
#endif /* MSC_VER < 1900 */
diff --git a/src/Makefile.am b/src/Makefile.am
index 69b54b4..939e151 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -46,8 +46,7 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5system.c H5timer.c H5trace.c \
H5B.c H5Bcache.c H5Bdbg.c \
H5B2.c H5B2cache.c H5B2dbg.c H5B2hdr.c H5B2int.c H5B2internal.c \
H5B2leaf.c H5B2stat.c H5B2test.c \
- H5C.c H5Cdbg.c H5Cepoch.c H5Cimage.c H5Clog.c H5Cprefetched.c \
- H5Cquery.c H5Ctag.c H5Ctest.c \
+ H5C.c H5Cdbg.c H5Cepoch.c H5Clog.c H5Cquery.c H5Ctag.c H5Ctest.c \
H5CS.c \
H5D.c H5Dbtree.c H5Dbtree2.c H5Dchunk.c H5Dcompact.c H5Dcontig.c H5Ddbg.c \
H5Ddeprec.c H5Dearray.c H5Defl.c H5Dfarray.c H5Dfill.c H5Dint.c \
@@ -82,8 +81,7 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5system.c H5timer.c H5trace.c \
H5MF.c H5MFaggr.c H5MFdbg.c H5MFsection.c \
H5MM.c H5MP.c H5MPtest.c \
H5O.c H5Oainfo.c H5Oalloc.c H5Oattr.c \
- H5Oattribute.c H5Obogus.c H5Obtreek.c H5Ocache.c H5Ocache_image.c \
- H5Ochunk.c \
+ H5Oattribute.c H5Obogus.c H5Obtreek.c H5Ocache.c H5Ochunk.c \
H5Ocont.c H5Ocopy.c H5Odbg.c H5Odrvinfo.c H5Odtype.c H5Oefl.c \
H5Ofill.c H5Oflush.c H5Ofsinfo.c H5Oginfo.c \
H5Olayout.c \