summaryrefslogtreecommitdiffstats
path: root/src/H5Faccum.c
diff options
context:
space:
mode:
authorhdftest <hdftest@hdfgroup.org>2018-01-06 23:00:00 (GMT)
committerhdftest <hdftest@hdfgroup.org>2018-01-06 23:00:00 (GMT)
commit9c07ff0a0356cab805406b4b314a89dda16d9dcf (patch)
tree732ec2124dd94ac76cc4cd4efa245208ba4a4057 /src/H5Faccum.c
parentefa2a470c9d8cf2d584f6b22f5b80cd6687887d0 (diff)
parent7efa31d45e4dc03f17f28f0e8a21aeeabd213e86 (diff)
downloadhdf5-9c07ff0a0356cab805406b4b314a89dda16d9dcf.zip
hdf5-9c07ff0a0356cab805406b4b314a89dda16d9dcf.tar.gz
hdf5-9c07ff0a0356cab805406b4b314a89dda16d9dcf.tar.bz2
Merge pull request #850 in HDFFV/hdf5 from ~HDFTEST/hdf5_hft:hdf5_1_10 to hdf5_1_10
* commit '7efa31d45e4dc03f17f28f0e8a21aeeabd213e86': Snapshot version 1.10 release 2 (snap7) Snapshot version 1.10 release 2 (snap6)
Diffstat (limited to 'src/H5Faccum.c')
0 files changed, 0 insertions, 0 deletions
| 120 +- src/H5VLint.c | 30 +- src/H5VLnative.c | 6 +- src/H5VLnative_attr.c | 2 +- src/H5VLnative_blob.c | 10 +- src/H5VLnative_dataset.c | 13 +- src/H5VLnative_file.c | 4 +- src/H5VLnative_group.c | 4 +- src/H5VLpassthru.c | 2 +- src/H5VM.c | 10 +- src/H5VMprivate.h | 50 +- src/H5WB.c | 2 +- src/H5Z.c | 26 +- src/H5Znbit.c | 10 +- src/H5Zscaleoffset.c | 149 +- src/H5Ztrans.c | 70 +- src/H5checksum.c | 8 +- src/H5dbg.c | 2 +- src/H5detect.c | 54 +- src/H5encode.h | 287 ++ src/H5make_libsettings.c | 38 +- src/H5mpi.c | 6 +- src/H5private.h | 595 +-- src/H5timer.c | 6 +- src/H5trace.c | 12 +- src/H5win32defs.h | 50 +- src/Makefile.am | 3 +- src/libhdf5.settings.in | 4 +- test/cache_common.h | 56 +- test/cache_image.c | 8 +- test/hdfs.c | 43 +- test/ros3.c | 6 +- testpar/t_cache_image.c | 4 +- testpar/t_subfiling_vfd.c | 52 +- utils/Makefile.am | 2 + 289 files changed, 12802 insertions(+), 15173 deletions(-) create mode 100644 src/H5Centry.c create mode 100644 src/H5Cint.c create mode 100644 src/H5encode.h diff --git a/config/cmake/ConfigureChecks.cmake b/config/cmake/ConfigureChecks.cmake index e518b85..e5192f5 100644 --- a/config/cmake/ConfigureChecks.cmake +++ b/config/cmake/ConfigureChecks.cmake @@ -122,9 +122,6 @@ CHECK_INCLUDE_FILE_CONCAT ("globus/common.h" ${HDF_PREFIX}_HAVE_GLOBUS_COMMON_H) CHECK_INCLUDE_FILE_CONCAT ("pdb.h" ${HDF_PREFIX}_HAVE_PDB_H) CHECK_INCLUDE_FILE_CONCAT ("pthread.h" ${HDF_PREFIX}_HAVE_PTHREAD_H) CHECK_INCLUDE_FILE_CONCAT ("srbclient.h" ${HDF_PREFIX}_HAVE_SRBCLIENT_H) -CHECK_INCLUDE_FILE_CONCAT ("string.h" ${HDF_PREFIX}_HAVE_STRING_H) -CHECK_INCLUDE_FILE_CONCAT ("strings.h" ${HDF_PREFIX}_HAVE_STRINGS_H) -CHECK_INCLUDE_FILE_CONCAT ("stdlib.h" ${HDF_PREFIX}_HAVE_STDLIB_H) CHECK_INCLUDE_FILE_CONCAT ("dlfcn.h" ${HDF_PREFIX}_HAVE_DLFCN_H) CHECK_INCLUDE_FILE_CONCAT ("netinet/in.h" ${HDF_PREFIX}_HAVE_NETINET_IN_H) CHECK_INCLUDE_FILE_CONCAT ("netdb.h" ${HDF_PREFIX}_HAVE_NETDB_H) @@ -447,19 +444,12 @@ CHECK_FUNCTION_EXISTS (fork ${HDF_PREFIX}_HAVE_FORK) CHECK_FUNCTION_EXISTS (gethostname ${HDF_PREFIX}_HAVE_GETHOSTNAME) CHECK_FUNCTION_EXISTS (getrusage ${HDF_PREFIX}_HAVE_GETRUSAGE) -CHECK_FUNCTION_EXISTS (lstat ${HDF_PREFIX}_HAVE_LSTAT) CHECK_FUNCTION_EXISTS (pread ${HDF_PREFIX}_HAVE_PREAD) CHECK_FUNCTION_EXISTS (pwrite ${HDF_PREFIX}_HAVE_PWRITE) CHECK_FUNCTION_EXISTS (rand_r ${HDF_PREFIX}_HAVE_RAND_R) CHECK_FUNCTION_EXISTS (random ${HDF_PREFIX}_HAVE_RANDOM) -CHECK_FUNCTION_EXISTS (setsysinfo ${HDF_PREFIX}_HAVE_SETSYSINFO) -CHECK_FUNCTION_EXISTS (siglongjmp ${HDF_PREFIX}_HAVE_SIGLONGJMP) -CHECK_FUNCTION_EXISTS (sigsetjmp ${HDF_PREFIX}_HAVE_SIGSETJMP) -CHECK_FUNCTION_EXISTS (sigprocmask ${HDF_PREFIX}_HAVE_SIGPROCMASK) - -CHECK_FUNCTION_EXISTS (srandom ${HDF_PREFIX}_HAVE_SRANDOM) CHECK_FUNCTION_EXISTS (strcasestr ${HDF_PREFIX}_HAVE_STRCASESTR) CHECK_FUNCTION_EXISTS (strdup ${HDF_PREFIX}_HAVE_STRDUP) CHECK_FUNCTION_EXISTS (symlink ${HDF_PREFIX}_HAVE_SYMLINK) diff --git a/config/cmake/H5pubconf.h.in b/config/cmake/H5pubconf.h.in index ca26447..ae56d2c 100644 --- a/config/cmake/H5pubconf.h.in +++ b/config/cmake/H5pubconf.h.in @@ -216,9 +216,6 @@ /* Define to 1 if you have the `lseek64' function. */ #cmakedefine H5_HAVE_LSEEK64 @H5_HAVE_LSEEK64@ -/* Define to 1 if you have the `lstat' function. */ -#cmakedefine H5_HAVE_LSTAT @H5_HAVE_LSTAT@ - /* Define if the map API (H5M) should be compiled */ #cmakedefine H5_HAVE_MAP_API @H5_HAVE_MAP_API@ @@ -280,42 +277,18 @@ compiled */ #cmakedefine H5_HAVE_ROS3_VFD @H5_HAVE_ROS3_VFD@ -/* Define to 1 if you have the `setsysinfo' function. */ -#cmakedefine H5_HAVE_SETSYSINFO @H5_HAVE_SETSYSINFO@ - -/* Define to 1 if you have the `siglongjmp' function. */ -#cmakedefine H5_HAVE_SIGLONGJMP @H5_HAVE_SIGLONGJMP@ - -/* Define to 1 if you have the `sigprocmask' function. */ -#cmakedefine H5_HAVE_SIGPROCMASK @H5_HAVE_SIGPROCMASK@ - -/* Define to 1 if you have the `sigsetjmp' function. */ -#cmakedefine H5_HAVE_SIGSETJMP @H5_HAVE_SIGSETJMP@ - -/* Define to 1 if you have the `srandom' function. */ -#cmakedefine H5_HAVE_SRANDOM @H5_HAVE_SRANDOM@ - /* Define to 1 if you have the `stat64' function. */ #cmakedefine H5_HAVE_STAT64 @H5_HAVE_STAT64@ /* Define if struct stat has the st_blocks field */ #cmakedefine H5_HAVE_STAT_ST_BLOCKS @H5_HAVE_STAT_ST_BLOCKS@ -/* Define to 1 if you have the header file. */ -#cmakedefine H5_HAVE_STDLIB_H @H5_HAVE_STDLIB_H@ - /* Define to 1 if you have the `strcasestr' function. */ #cmakedefine H5_HAVE_STRCASESTR @H5_HAVE_STRCASESTR@ /* Define to 1 if you have the `strdup' function. */ #cmakedefine H5_HAVE_STRDUP @H5_HAVE_STRDUP@ -/* Define to 1 if you have the header file. */ -#cmakedefine H5_HAVE_STRINGS_H @H5_HAVE_STRINGS_H@ - -/* Define to 1 if you have the header file. */ -#cmakedefine H5_HAVE_STRING_H @H5_HAVE_STRING_H@ - /* Define if struct text_info is defined */ #cmakedefine H5_HAVE_STRUCT_TEXT_INFO @H5_HAVE_STRUCT_TEXT_INFO@ diff --git a/configure.ac b/configure.ac index a212825..e7cb8aa 100644 --- a/configure.ac +++ b/configure.ac @@ -2068,9 +2068,8 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ AC_SEARCH_LIBS([clock_gettime], [rt posix4]) AC_CHECK_FUNCS([alarm asprintf clock_gettime fcntl flock fork]) AC_CHECK_FUNCS([gethostname getrusage gettimeofday]) -AC_CHECK_FUNCS([lstat rand_r random setsysinfo]) -AC_CHECK_FUNCS([siglongjmp sigsetjmp sigprocmask]) -AC_CHECK_FUNCS([srandom strcasestr strdup symlink]) +AC_CHECK_FUNCS([rand_r random]) +AC_CHECK_FUNCS([strcasestr strdup symlink]) AC_CHECK_FUNCS([tmpfile vasprintf waitpid]) ## ---------------------------------------------------------------------- @@ -4045,10 +4044,10 @@ fi ## AM_CONDITIONAL([HAVE_SHARED_CONDITIONAL], [test "X$enable_shared" = "Xyes"]) -AC_CONFIG_FILES([src/libhdf5.settings - Makefile +AC_CONFIG_FILES([Makefile doxygen/Doxyfile src/Makefile + src/libhdf5.settings test/Makefile test/H5srcdir_str.h test/test_abort_fail.sh diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 7187147..92c74f7 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -17,6 +17,7 @@ set (H5_SOURCES set (H5_HDRS ${HDF5_SRC_DIR}/hdf5.h ${HDF5_SRC_DIR}/H5api_adpt.h + ${HDF5_SRC_DIR}/H5encode.h ${HDF5_SRC_DIR}/H5public.h #${HDF5_SRC_DIR}/H5version.h #${HDF5_SRC_DIR}/H5overflow.h @@ -78,8 +79,10 @@ IDE_GENERATED_PROPERTIES ("H5B2" "${H5B2_HDRS}" "${H5B2_SOURCES}" ) set (H5C_SOURCES ${HDF5_SRC_DIR}/H5C.c ${HDF5_SRC_DIR}/H5Cdbg.c + ${HDF5_SRC_DIR}/H5Centry.c ${HDF5_SRC_DIR}/H5Cepoch.c ${HDF5_SRC_DIR}/H5Cimage.c + ${HDF5_SRC_DIR}/H5Cint.c ${HDF5_SRC_DIR}/H5Clog.c ${HDF5_SRC_DIR}/H5Clog_json.c ${HDF5_SRC_DIR}/H5Clog_trace.c @@ -1409,17 +1412,7 @@ endif () # Option to build documentation #----------------------------------------------------------------------------- if (DOXYGEN_FOUND) -# This cmake function requires that the non-default doxyfile settings are provided with set (DOXYGEN_xxx) commands -# In addition the doxyfile aliases @INCLUDE option is not supported and would need to be provided in a set (DOXYGEN_ALIASES) command. -# doxygen_add_docs (hdf5lib_doc -## ${common_SRCS} ${shared_gen_SRCS} ${H5_PUBLIC_HEADERS} ${H5_PRIVATE_HEADERS} ${H5_GENERATED_HEADERS} ${HDF5_DOXYGEN_DIR}/dox -# ${DOXYGEN_INPUT_DIRECTORY} -# ALL -# WORKING_DIRECTORY ${HDF5_SRC_DIR} -# COMMENT "Generating HDF5 library Source Documentation" -# ) - -# This custom target and doxygen/configure work together + # This custom target and doxygen/configure work together # Replace variables inside @@ with the current values add_custom_target (hdf5lib_doc ALL COMMAND ${DOXYGEN_EXECUTABLE} ${HDF5_BINARY_DIR}/Doxyfile diff --git a/src/H5.c b/src/H5.c index 10d3115..56beb8a 100644 --- a/src/H5.c +++ b/src/H5.c @@ -70,8 +70,8 @@ static int H5__mpi_delete_cb(MPI_Comm comm, int keyval, void *attr_val, int *fla /*****************************/ /* Library incompatible release versions, develop releases are incompatible by design */ -const unsigned VERS_RELEASE_EXCEPTIONS[] = {0}; -const unsigned VERS_RELEASE_EXCEPTIONS_SIZE = 0; +static const unsigned VERS_RELEASE_EXCEPTIONS[] = {0}; +static const unsigned VERS_RELEASE_EXCEPTIONS_SIZE = 0; /* statically initialize block for pthread_once call used in initializing */ /* the first global mutex */ @@ -145,7 +145,7 @@ H5_init_library(void) /* Run the library initialization routine, if it hasn't already run */ if (H5_INIT_GLOBAL || H5_TERM_GLOBAL) - HGOTO_DONE(SUCCEED) + HGOTO_DONE(SUCCEED); /* Set the 'library initialized' flag as early as possible, to avoid * possible re-entrancy. @@ -218,11 +218,11 @@ H5_init_library(void) * This must be entered before the library cleanup code so it's * executed in LIFO order (i.e., last). */ - (void)HDatexit(H5TS_win32_process_exit); + (void)atexit(H5TS_win32_process_exit); #endif /* H5_HAVE_THREADSAFE && H5_HAVE_WIN_THREADS */ /* Normal library termination code */ - (void)HDatexit(H5_term_library); + (void)atexit(H5_term_library); H5_dont_atexit_g = TRUE; } /* end if */ @@ -492,7 +492,7 @@ H5_term_library(void) H5_debug_open_stream_t *tmp_open_stream; tmp_open_stream = H5_debug_g.open_stream; - (void)HDfclose(H5_debug_g.open_stream->stream); + (void)fclose(H5_debug_g.open_stream->stream); H5_debug_g.open_stream = H5_debug_g.open_stream->next; (void)H5MM_free(tmp_open_stream); } /* end while */ @@ -697,7 +697,7 @@ H5__debug_mask(const char *s) while (s && *s) { - if (HDisalpha(*s) || '-' == *s || '+' == *s) { + if (isalpha(*s) || '-' == *s || '+' == *s) { /* Enable or Disable debugging? */ if ('-' == *s) { @@ -713,7 +713,7 @@ H5__debug_mask(const char *s) } /* end if */ /* Get the name */ - for (i = 0; HDisalpha(*s); i++, s++) + for (i = 0; isalpha(*s); i++, s++) if (i < sizeof pkg_name) pkg_name[i] = *s; pkg_name[MIN(sizeof(pkg_name) - 1, i)] = '\0'; @@ -745,8 +745,8 @@ H5__debug_mask(const char *s) fprintf(stderr, "HDF5_DEBUG: ignored %s\n", pkg_name); } /* end if-else */ } - else if (HDisdigit(*s)) { - int fd = (int)HDstrtol(s, &rest, 0); + else if (isdigit(*s)) { + int fd = (int)strtol(s, &rest, 0); H5_debug_open_stream_t *open_stream; if ((stream = HDfdopen(fd, "w")) != NULL) { @@ -754,7 +754,7 @@ H5__debug_mask(const char *s) if (NULL == (open_stream = (H5_debug_open_stream_t *)H5MM_malloc(sizeof(H5_debug_open_stream_t)))) { - (void)HDfclose(stream); + (void)fclose(stream); return; } /* end if */ @@ -881,7 +881,7 @@ H5check_version(unsigned majnum, unsigned minnum, unsigned relnum) /* Don't check again, if we already have */ if (checked) - HGOTO_DONE(SUCCEED) + HGOTO_DONE(SUCCEED); { const char *s; /* Environment string for disabling version check */ @@ -889,8 +889,8 @@ H5check_version(unsigned majnum, unsigned minnum, unsigned relnum) /* Allow different versions of the header files and library? */ s = HDgetenv("HDF5_DISABLE_VERSION_CHECK"); - if (s && HDisdigit(*s)) - disable_version_check = (unsigned int)HDstrtol(s, NULL, 0); + if (s && isdigit(*s)) + disable_version_check = (unsigned int)strtol(s, NULL, 0); } /* H5_VERS_MAJOR and H5_VERS_MINOR must match */ diff --git a/src/H5A.c b/src/H5A.c index 4bbd799..5f394dd 100644 --- a/src/H5A.c +++ b/src/H5A.c @@ -136,7 +136,7 @@ done: /* Cleanup on failure */ if (H5I_INVALID_HID == ret_value) if (attr && H5VL_attr_close(vol_obj, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, H5I_INVALID_HID, "can't close attribute") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, H5I_INVALID_HID, "can't close attribute"); FUNC_LEAVE_NOAPI(ret_value) } /* H5A__create_common() */ @@ -278,7 +278,7 @@ H5Acreate_async(const char *app_file, const char *app_func, unsigned app_line, h H5ARG_TRACE10(__func__, "*s*sIui*siiiii", app_file, app_func, app_line, loc_id, attr_name, type_id, space_id, acpl_id, aapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref(ret_value) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, H5I_INVALID_HID, "can't decrement count on attribute ID") + HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, H5I_INVALID_HID, "can't decrement count on attribute ID"); HGOTO_ERROR(H5E_ATTR, H5E_CANTINSERT, H5I_INVALID_HID, "can't insert token into event set") } /* end if */ @@ -427,7 +427,7 @@ H5Acreate_by_name_async(const char *app_file, const char *app_func, unsigned app H5ARG_TRACE12(__func__, "*s*sIui*s*siiiiii", app_file, app_func, app_line, loc_id, obj_name, attr_name, type_id, space_id, acpl_id, aapl_id, lapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref(ret_value) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, H5I_INVALID_HID, "can't decrement count on attribute ID") + HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, H5I_INVALID_HID, "can't decrement count on attribute ID"); HGOTO_ERROR(H5E_ATTR, H5E_CANTINSERT, H5I_INVALID_HID, "can't insert token into event set") } /* end if */ @@ -471,7 +471,7 @@ done: /* Cleanup on failure */ if (H5I_INVALID_HID == ret_value) if (attr && H5VL_attr_close(vol_obj, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, H5I_INVALID_HID, "can't close attribute") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, H5I_INVALID_HID, "can't close attribute"); FUNC_LEAVE_NOAPI(ret_value) } /* H5A__open_common() */ @@ -590,7 +590,7 @@ H5Aopen_async(const char *app_file, const char *app_func, unsigned app_line, hid H5ARG_TRACE7(__func__, "*s*sIui*sii", app_file, app_func, app_line, loc_id, attr_name, aapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref(ret_value) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, H5I_INVALID_HID, "can't decrement count on attribute ID") + HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, H5I_INVALID_HID, "can't decrement count on attribute ID"); HGOTO_ERROR(H5E_ATTR, H5E_CANTINSERT, H5I_INVALID_HID, "can't insert token into event set") } /* end if */ @@ -721,7 +721,7 @@ H5Aopen_by_name_async(const char *app_file, const char *app_func, unsigned app_l H5ARG_TRACE9(__func__, "*s*sIui*s*siii", app_file, app_func, app_line, loc_id, obj_name, attr_name, aapl_id, lapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref(ret_value) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, H5I_INVALID_HID, "can't decrement count on attribute ID") + HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, H5I_INVALID_HID, "can't decrement count on attribute ID"); HGOTO_ERROR(H5E_ATTR, H5E_CANTINSERT, H5I_INVALID_HID, "can't insert token into event set") } /* end if */ @@ -861,7 +861,7 @@ H5Aopen_by_idx_async(const char *app_file, const char *app_func, unsigned app_li H5ARG_TRACE11(__func__, "*s*sIui*sIiIohiii", app_file, app_func, app_line, loc_id, obj_name, idx_type, order, n, aapl_id, lapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref(ret_value) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, H5I_INVALID_HID, "can't decrement count on attribute ID") + HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, H5I_INVALID_HID, "can't decrement count on attribute ID"); HGOTO_ERROR(H5E_ATTR, H5E_CANTINSERT, H5I_INVALID_HID, "can't insert token into event set") } /* end if */ @@ -2288,7 +2288,7 @@ H5Aclose_async(const char *app_file, const char *app_func, unsigned app_line, hi done: if (connector && H5VL_conn_dec_rc(connector) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, FAIL, "can't decrement ref count on connector") + HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, FAIL, "can't decrement ref count on connector"); FUNC_LEAVE_API(ret_value) } /* H5Aclose_async() */ diff --git a/src/H5AC.c b/src/H5AC.c index 39b9502..09b6835 100644 --- a/src/H5AC.c +++ b/src/H5AC.c @@ -144,8 +144,8 @@ H5AC_init(void) const char *s; /* String for environment variables */ s = HDgetenv("H5_COLL_API_SANITY_CHECK"); - if (s && HDisdigit(*s)) { - long env_val = HDstrtol(s, NULL, 0); + if (s && isdigit(*s)) { + long env_val = strtol(s, NULL, 0); H5_coll_api_sanity_check_g = (0 == env_val) ? FALSE : TRUE; } } @@ -265,7 +265,6 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co if (NULL == (aux_ptr = H5FL_CALLOC(H5AC_aux_t))) HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate H5AC auxiliary structure") - aux_ptr->magic = H5AC__H5AC_AUX_T_MAGIC; aux_ptr->mpi_comm = mpi_comm; aux_ptr->mpi_rank = mpi_rank; aux_ptr->mpi_size = mpi_size; @@ -367,7 +366,7 @@ done: /* If currently logging, generate a message */ if (f->shared->cache->log_info->logging) if (H5C_log_write_create_cache_msg(f->shared->cache, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); #ifdef H5_HAVE_PARALLEL /* if there is a failure, try to tidy up the auxiliary structure */ @@ -379,8 +378,7 @@ done: H5SL_close(aux_ptr->c_slist_ptr); if (aux_ptr->candidate_slist_ptr != NULL) H5SL_close(aux_ptr->candidate_slist_ptr); - aux_ptr->magic = 0; - aux_ptr = H5FL_FREE(H5AC_aux_t, aux_ptr); + aux_ptr = H5FL_FREE(H5AC_aux_t, aux_ptr); } /* end if */ } /* end if */ #endif /* H5_HAVE_PARALLEL */ @@ -442,9 +440,6 @@ H5AC_dest(H5F_t *f) aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(f->shared->cache); if (aux_ptr) { - /* Sanity check */ - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); - /* If the file was opened R/W, attempt to flush all entries * from rank 0 & Bcast clean list to other ranks. * @@ -500,8 +495,7 @@ H5AC_dest(H5F_t *f) H5SL_close(aux_ptr->candidate_slist_ptr); } /* end if */ - aux_ptr->magic = 0; - aux_ptr = H5FL_FREE(H5AC_aux_t, aux_ptr); + aux_ptr = H5FL_FREE(H5AC_aux_t, aux_ptr); } /* end if */ #endif /* H5_HAVE_PARALLEL */ @@ -539,7 +533,7 @@ done: /* If currently logging, generate a message */ if (f->shared->cache->log_info->logging) if (H5C_log_write_evict_cache_msg(f->shared->cache, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_evict() */ @@ -577,7 +571,7 @@ done: /* If currently logging, generate a message */ if (f->shared->cache->log_info->logging) if (H5C_log_write_expunge_entry_msg(f->shared->cache, addr, type->id, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_expunge_entry() */ @@ -629,7 +623,7 @@ done: /* If currently logging, generate a message */ if (f->shared->cache->log_info->logging) if (H5C_log_write_flush_cache_msg(f->shared->cache, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_flush() */ @@ -760,7 +754,7 @@ done: if (f->shared->cache->log_info->logging) if (H5C_log_write_insert_entry_msg(f->shared->cache, addr, type->id, flags, ((H5C_cache_entry_t *)thing)->size, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_insert_entry() */ @@ -841,7 +835,7 @@ done: if (cache_ptr != NULL && cache_ptr->log_info != NULL) if (cache_ptr->log_info->logging) if (H5C_log_write_mark_entry_dirty_msg(cache_ptr, entry_ptr, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_mark_entry_dirty() */ @@ -891,7 +885,7 @@ done: if (cache_ptr != NULL && cache_ptr->log_info != NULL) if (cache_ptr->log_info->logging) if (H5C_log_write_mark_entry_clean_msg(cache_ptr, entry_ptr, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_mark_entry_clean() */ @@ -930,7 +924,7 @@ done: if (cache_ptr != NULL && cache_ptr->log_info != NULL) if (cache_ptr->log_info->logging) if (H5C_log_write_mark_unserialized_entry_msg(cache_ptr, entry_ptr, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_mark_entry_unserialized() */ @@ -968,7 +962,7 @@ done: if (cache_ptr != NULL && cache_ptr->log_info != NULL) if (cache_ptr->log_info->logging) if (H5C_log_write_mark_serialized_entry_msg(cache_ptr, entry_ptr, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_mark_entry_serialized() */ @@ -1022,7 +1016,7 @@ done: /* If currently logging, generate a message */ if (f->shared->cache->log_info->logging) if (H5C_log_write_move_entry_msg(f->shared->cache, old_addr, new_addr, type->id, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_move_entry() */ @@ -1062,7 +1056,7 @@ done: if (cache_ptr != NULL && cache_ptr->log_info != NULL) if (cache_ptr->log_info->logging) if (H5C_log_write_pin_entry_msg(cache_ptr, entry_ptr, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_pin_protected_entry() */ @@ -1219,7 +1213,7 @@ done: if (cache_ptr->log_info->logging) if (H5C_log_write_create_fd_msg(cache_ptr, (H5AC_info_t *)parent_thing, (H5AC_info_t *)child_thing, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_create_flush_dependency() */ @@ -1293,7 +1287,7 @@ done: if (f->shared->cache->log_info->logging) if (H5C_log_write_protect_entry_msg(f->shared->cache, (H5AC_info_t *)thing, type->id, flags, fake_ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, NULL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, NULL, "unable to emit log message"); } FUNC_LEAVE_NOAPI(ret_value) @@ -1387,7 +1381,7 @@ done: if (cache_ptr != NULL && cache_ptr->log_info != NULL) if (cache_ptr->log_info->logging) if (H5C_log_write_resize_entry_msg(cache_ptr, entry_ptr, new_size, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_resize_entry() */ @@ -1427,7 +1421,7 @@ done: if (cache_ptr != NULL && cache_ptr->log_info != NULL) if (cache_ptr->log_info->logging) if (H5C_log_write_unpin_entry_msg(cache_ptr, entry_ptr, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_unpin_entry() */ @@ -1468,7 +1462,7 @@ done: if (cache_ptr->log_info->logging) if (H5C_log_write_destroy_fd_msg(cache_ptr, (H5AC_info_t *)parent_thing, (H5AC_info_t *)child_thing, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_destroy_flush_dependency() */ @@ -1559,7 +1553,7 @@ H5AC_unprotect(H5F_t *f, const H5AC_class_t *type, haddr_t addr, void *thing, un /* If we fail to log the deleted entry, push an error but still * participate in a possible sync point ahead */ - HDONE_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5AC__log_deleted_entry() failed") + HDONE_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5AC__log_deleted_entry() failed"); } } } /* end if */ @@ -1579,7 +1573,7 @@ done: /* If currently logging, generate a message */ if (f->shared->cache->log_info->logging) if (H5C_log_write_unprotect_entry_msg(f->shared->cache, addr, type->id, flags, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_unprotect() */ @@ -1606,15 +1600,6 @@ H5AC_get_cache_auto_resize_config(const H5AC_t *cache_ptr, H5AC_cache_config_t * if ((cache_ptr == NULL) || (config_ptr == NULL) || (config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION)) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr or config_ptr on entry") -#ifdef H5_HAVE_PARALLEL - { - H5AC_aux_t *aux_ptr; - - aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); - if ((aux_ptr != NULL) && (aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad aux_ptr on entry") - } -#endif /* H5_HAVE_PARALLEL */ /* Retrieve the configuration */ if (H5C_get_cache_auto_resize_config((const H5C_t *)cache_ptr, &internal_config) < 0) @@ -1791,15 +1776,6 @@ H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr, const H5AC_cache_config_t * if (cache_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "bad cache_ptr on entry") -#ifdef H5_HAVE_PARALLEL - { - H5AC_aux_t *aux_ptr; - - aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); - if ((aux_ptr != NULL) && (aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "bad aux_ptr on entry") - } -#endif /* H5_HAVE_PARALLEL */ /* Validate external configuration */ if (H5AC_validate_config(config_ptr) != SUCCEED) @@ -1852,7 +1828,7 @@ done: /* If currently logging, generate a message */ if (cache_ptr->log_info->logging) if (H5C_log_write_set_cache_config_msg(cache_ptr, config_ptr, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_set_cache_auto_resize_config() */ @@ -2019,8 +1995,6 @@ H5AC__check_if_write_permitted(const H5F_t assert(f->shared->cache != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(f->shared->cache); if (aux_ptr != NULL) { - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); - if ((aux_ptr->mpi_rank == 0) || (aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED)) write_permitted = aux_ptr->write_permitted; @@ -2215,14 +2189,15 @@ done: /*------------------------------------------------------------------------------ * Function: H5AC_evict_tagged_metadata() * - * Purpose: Wrapper for cache level function which flushes all metadata + * Purpose: Wrapper for cache level function which evicts all metadata * that contains the specific tag. * - * Return: SUCCEED on success, FAIL otherwise. - * - * Programmer: Mike McGreevy - * May 19, 2010 + * The match_global parameter determines if the global file + * data (e.g., global heaps, shared object header messages) + * should be checked. This is false when closing objects + * and true when flushing. * + * Return: SUCCEED on success, FAIL otherwise. *------------------------------------------------------------------------------ */ herr_t @@ -2339,7 +2314,7 @@ H5AC_cork(H5F_t *f, haddr_t obj_addr, unsigned action, hbool_t *corked) assert(corked); if (H5C_get_num_objs_corked(f->shared->cache) == 0) { *corked = FALSE; - HGOTO_DONE(SUCCEED) + HGOTO_DONE(SUCCEED); } } @@ -2557,7 +2532,7 @@ done: if (cache != NULL && cache->log_info != NULL) if (cache->log_info->logging) if (H5C_log_write_remove_entry_msg(cache, entry, ret_value) < 0) - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") + HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message"); FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_remove_entry() */ diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c index d2d67be..3be2bc6 100644 --- a/src/H5ACmpio.c +++ b/src/H5ACmpio.c @@ -141,7 +141,6 @@ H5AC__set_sync_point_done_callback(H5C_t *cache_ptr, H5AC_sync_point_done_cb_t s assert(cache_ptr); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); aux_ptr->sync_point_done = sync_point_done; @@ -170,7 +169,6 @@ H5AC__set_write_done_callback(H5C_t *cache_ptr, H5AC_write_done_cb_t write_done) assert(cache_ptr); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); aux_ptr->write_done = write_done; @@ -205,7 +203,6 @@ H5AC_add_candidate(H5AC_t *cache_ptr, haddr_t addr) assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); assert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); assert(aux_ptr->candidate_slist_ptr != NULL); @@ -265,7 +262,6 @@ H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, unsigned *num_entries_ptr, had assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); assert(aux_ptr->mpi_rank == 0); assert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); assert(aux_ptr->candidate_slist_ptr != NULL); @@ -292,7 +288,7 @@ H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, unsigned *num_entries_ptr, had */ if (H5AC__copy_candidate_list_to_buffer(cache_ptr, &chk_num_entries, &haddr_buf_ptr) < 0) { /* Push an error, but still participate in following MPI_Bcast */ - HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate buffer.") + HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate buffer."); } assert(chk_num_entries == num_entries); assert(haddr_buf_ptr != NULL); @@ -394,7 +390,6 @@ H5AC__broadcast_clean_list(H5AC_t *cache_ptr) assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); assert(aux_ptr->mpi_rank == 0); assert(aux_ptr->c_slist_ptr != NULL); @@ -414,7 +409,7 @@ H5AC__broadcast_clean_list(H5AC_t *cache_ptr) buf_size = sizeof(haddr_t) * num_entries; if (NULL == (addr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size))) { /* Push an error, but still participate in following MPI_Bcast */ - HDONE_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for addr buffer") + HDONE_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for addr buffer"); } else { /* Set up user data for callback */ @@ -426,7 +421,7 @@ H5AC__broadcast_clean_list(H5AC_t *cache_ptr) /* (Callback also removes the matching entries from the dirtied list) */ if (H5SL_free(aux_ptr->c_slist_ptr, H5AC__broadcast_clean_list_cb, &udata) < 0) { /* Push an error, but still participate in following MPI_Bcast */ - HDONE_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "Can't build address list for clean entries") + HDONE_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "Can't build address list for clean entries"); } } @@ -477,7 +472,6 @@ H5AC__construct_candidate_list(H5AC_t *cache_ptr, H5AC_aux_t H5_ATTR_NDEBUG_UNUS /* Sanity checks */ assert(cache_ptr != NULL); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); assert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); assert((sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_CACHE) || (aux_ptr->mpi_rank == 0)); assert(aux_ptr->d_slist_ptr != NULL); @@ -588,7 +582,6 @@ H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr, unsigned *num_entri assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); assert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); assert(aux_ptr->candidate_slist_ptr != NULL); assert(H5SL_count(aux_ptr->candidate_slist_ptr) > 0); @@ -661,7 +654,6 @@ H5AC__log_deleted_entry(const H5AC_info_t *entry_ptr) assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); assert(aux_ptr->mpi_rank == 0); assert(aux_ptr->d_slist_ptr != NULL); assert(aux_ptr->c_slist_ptr != NULL); @@ -712,7 +704,6 @@ H5AC__log_dirtied_entry(const H5AC_info_t *entry_ptr) assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); if (aux_ptr->mpi_rank == 0) { H5AC_slist_entry_t *slist_entry_ptr; @@ -786,7 +777,6 @@ H5AC__log_cleaned_entry(const H5AC_info_t *entry_ptr) assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); if (aux_ptr->mpi_rank == 0) { H5AC_slist_entry_t *slist_entry_ptr; @@ -845,7 +835,6 @@ H5AC__log_flushed_entry(H5C_t *cache_ptr, haddr_t addr, hbool_t was_dirty, unsig assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); assert(aux_ptr->mpi_rank == 0); assert(aux_ptr->c_slist_ptr != NULL); @@ -912,7 +901,6 @@ H5AC__log_inserted_entry(const H5AC_info_t *entry_ptr) assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); if (aux_ptr->mpi_rank == 0) { H5AC_slist_entry_t *slist_entry_ptr; @@ -1012,7 +1000,6 @@ H5AC__log_moved_entry(const H5F_t *f, haddr_t old_addr, haddr_t new_addr) assert(cache_ptr); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); /* get entry status, size, etc here */ if (H5C_get_entry_status(f, old_addr, &entry_size, &entry_in_cache, &entry_dirty, NULL, NULL, NULL, NULL, @@ -1188,7 +1175,6 @@ H5AC__propagate_and_apply_candidate_list(H5F_t *f) assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); assert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); /* to prevent "messages from the future" we must synchronize all @@ -1348,7 +1334,6 @@ H5AC__propagate_flushed_and_still_clean_entries_list(H5F_t *f) assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); assert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY); if (aux_ptr->mpi_rank == 0) { @@ -1411,7 +1396,7 @@ H5AC__receive_haddr_list(MPI_Comm mpi_comm, unsigned *num_entries_ptr, haddr_t * buf_size = sizeof(haddr_t) * num_entries; if (NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size))) { /* Push an error, but still participate in following MPI_Bcast */ - HDONE_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for haddr buffer") + HDONE_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for haddr buffer"); } /* Now receive the list of candidate entries */ @@ -1467,7 +1452,6 @@ H5AC__receive_and_apply_clean_list(H5F_t *f) assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); assert(aux_ptr->mpi_rank != 0); /* Retrieve the clean list from process 0 */ @@ -1523,7 +1507,6 @@ H5AC__receive_candidate_list(const H5AC_t *cache_ptr, unsigned *num_entries_ptr, assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); assert(aux_ptr->mpi_rank != 0); assert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); assert(num_entries_ptr != NULL); @@ -1603,7 +1586,6 @@ H5AC__rsp__dist_md_write__flush(H5F_t *f) assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); assert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); /* first construct the candidate list -- initially, this will be in the @@ -1744,7 +1726,6 @@ H5AC__rsp__dist_md_write__flush_to_min_clean(H5F_t *f) assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); assert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); /* Query if evictions are allowed */ @@ -1759,7 +1740,7 @@ H5AC__rsp__dist_md_write__flush_to_min_clean(H5F_t *f) */ if (H5AC__construct_candidate_list(cache_ptr, aux_ptr, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0) - HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate list.") + HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate list."); } /* propagate and apply candidate list -- all processes */ @@ -1820,7 +1801,6 @@ H5AC__rsp__p0_only__flush(H5F_t *f) assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); assert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY); /* To prevent "messages from the future" we must @@ -1853,7 +1833,7 @@ H5AC__rsp__p0_only__flush(H5F_t *f) * in collective operations during following cache entry * propagation */ - HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.") + HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush."); } else { /* this code exists primarily for the test bed -- it allows us to @@ -1928,7 +1908,6 @@ H5AC__rsp__p0_only__flush_to_min_clean(H5F_t *f) assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); assert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY); /* Query if evictions are allowed */ @@ -1976,7 +1955,7 @@ H5AC__rsp__p0_only__flush_to_min_clean(H5F_t *f) * in collective operations during following cache entry * propagation */ - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_flush_to_min_clean() failed.") + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_flush_to_min_clean() failed."); } else { /* this call exists primarily for the test code -- it is used @@ -2039,13 +2018,12 @@ H5AC__run_sync_point(H5F_t *f, int sync_point_op) assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); assert((sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) || (sync_point_op == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED)); #ifdef H5AC_DEBUG_DIRTY_BYTES_CREATION - fprintf(stdout, "%d:%s...:%u: (u/uu/i/iu/m/mu) = %zu/%u/%zu/%u/%zu/%u\n", aux_ptr->mpi_rank, __func__, - aux_ptr->dirty_bytes_propagations, aux_ptr->unprotect_dirty_bytes, + fprintf(stdout, "%d:%s...:%u: (u/uu/i/iu/m/mu) = %zu/%u/%zu/%u/%zu/%u\n", aux_ptr->mpi_rank, + __func__ aux_ptr->dirty_bytes_propagations, aux_ptr->unprotect_dirty_bytes, aux_ptr->unprotect_dirty_bytes_updates, aux_ptr->insert_dirty_bytes, aux_ptr->insert_dirty_bytes_updates, aux_ptr->move_dirty_bytes, aux_ptr->move_dirty_bytes_updates); @@ -2165,7 +2143,6 @@ H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, unsigned num_candidates, haddr_t *ca assert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); assert(aux_ptr != NULL); - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); assert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); assert(aux_ptr->mpi_rank == 0); assert(num_candidates > 0); diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h index beb7ba7..535eabd 100644 --- a/src/H5ACpkg.h +++ b/src/H5ACpkg.h @@ -165,10 +165,6 @@ H5FL_EXTERN(H5AC_aux_t); * * JRM -- 1/6/15 * - * magic: Unsigned 32 bit integer always set to - * H5AC__H5AC_AUX_T_MAGIC. This field is used to validate - * pointers to instances of H5AC_aux_t. - * * mpi_comm: MPI communicator associated with the file for which the * cache has been created. * @@ -350,52 +346,34 @@ H5FL_EXTERN(H5AC_aux_t); #ifdef H5_HAVE_PARALLEL -#define H5AC__H5AC_AUX_T_MAGIC (unsigned)0x00D0A01 - typedef struct H5AC_aux_t { - uint32_t magic; - MPI_Comm mpi_comm; - - int mpi_rank; - - int mpi_size; + int mpi_rank; + int mpi_size; hbool_t write_permitted; - - size_t dirty_bytes_threshold; - - size_t dirty_bytes; - + size_t dirty_bytes_threshold; + size_t dirty_bytes; int32_t metadata_write_strategy; #ifdef H5AC_DEBUG_DIRTY_BYTES_CREATION - unsigned dirty_bytes_propagations; - size_t unprotect_dirty_bytes; unsigned unprotect_dirty_bytes_updates; - size_t insert_dirty_bytes; unsigned insert_dirty_bytes_updates; - size_t move_dirty_bytes; unsigned move_dirty_bytes_updates; - #endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */ H5SL_t *d_slist_ptr; - H5SL_t *c_slist_ptr; - H5SL_t *candidate_slist_ptr; void (*write_done)(void); - void (*sync_point_done)(unsigned num_writes, haddr_t *written_entries_tbl); unsigned p0_image_len; - } H5AC_aux_t; /* struct H5AC_aux_t */ /* Typedefs for debugging function pointers */ diff --git a/src/H5Abtree2.c b/src/H5Abtree2.c index 2468a98..1de8a27 100644 --- a/src/H5Abtree2.c +++ b/src/H5Abtree2.c @@ -292,8 +292,8 @@ H5A__dense_btree2_name_encode(uint8_t *raw, const void *_nrecord, void H5_ATTR_U H5MM_memcpy(raw, nrecord->id.id, (size_t)H5O_FHEAP_ID_LEN); raw += H5O_FHEAP_ID_LEN; *raw++ = nrecord->flags; - UINT32ENCODE(raw, nrecord->corder) - UINT32ENCODE(raw, nrecord->hash) + UINT32ENCODE(raw, nrecord->corder); + UINT32ENCODE(raw, nrecord->hash); FUNC_LEAVE_NOAPI(SUCCEED) } /* H5A__dense_btree2_name_encode() */ @@ -319,8 +319,8 @@ H5A__dense_btree2_name_decode(const uint8_t *raw, void *_nrecord, void H5_ATTR_U H5MM_memcpy(nrecord->id.id, raw, (size_t)H5O_FHEAP_ID_LEN); raw += H5O_FHEAP_ID_LEN; nrecord->flags = *raw++; - UINT32DECODE(raw, nrecord->corder) - UINT32DECODE(raw, nrecord->hash) + UINT32DECODE(raw, nrecord->corder); + UINT32DECODE(raw, nrecord->hash); FUNC_LEAVE_NOAPI(SUCCEED) } /* H5A__dense_btree2_name_decode() */ @@ -430,7 +430,7 @@ H5A__dense_btree2_corder_encode(uint8_t *raw, const void *_nrecord, void H5_ATTR H5MM_memcpy(raw, nrecord->id.id, (size_t)H5O_FHEAP_ID_LEN); raw += H5O_FHEAP_ID_LEN; *raw++ = nrecord->flags; - UINT32ENCODE(raw, nrecord->corder) + UINT32ENCODE(raw, nrecord->corder); FUNC_LEAVE_NOAPI(SUCCEED) } /* H5A__dense_btree2_corder_encode() */ @@ -456,7 +456,7 @@ H5A__dense_btree2_corder_decode(const uint8_t *raw, void *_nrecord, void H5_ATTR H5MM_memcpy(nrecord->id.id, raw, (size_t)H5O_FHEAP_ID_LEN); raw += H5O_FHEAP_ID_LEN; nrecord->flags = *raw++; - UINT32DECODE(raw, nrecord->corder) + UINT32DECODE(raw, nrecord->corder); FUNC_LEAVE_NOAPI(SUCCEED) } /* H5A__dense_btree2_corder_decode() */ diff --git a/src/H5Adense.c b/src/H5Adense.c index 248f5a7..0f8924e 100644 --- a/src/H5Adense.c +++ b/src/H5Adense.c @@ -244,11 +244,11 @@ H5A__dense_create(H5F_t *f, H5O_ainfo_t *ainfo) done: /* Release resources */ if (fheap && H5HF_close(fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap"); if (bt2_name && H5B2_close(bt2_name) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for name index") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for name index"); if (bt2_corder && H5B2_close(bt2_corder) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for creation order index") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for creation order index"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5A__dense_create() */ @@ -385,11 +385,11 @@ H5A__dense_open(H5F_t *f, const H5O_ainfo_t *ainfo, const char *name) done: /* Release resources */ if (shared_fheap && H5HF_close(shared_fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, NULL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, NULL, "can't close fractal heap"); if (fheap && H5HF_close(fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, NULL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, NULL, "can't close fractal heap"); if (bt2_name && H5B2_close(bt2_name) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, NULL, "can't close v2 B-tree for name index") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, NULL, "can't close v2 B-tree for name index"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5A__dense_open() */ @@ -533,15 +533,15 @@ H5A__dense_insert(H5F_t *f, const H5O_ainfo_t *ainfo, H5A_t *attr) done: /* Release resources */ if (shared_fheap && H5HF_close(shared_fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap"); if (fheap && H5HF_close(fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap"); if (bt2_name && H5B2_close(bt2_name) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for name index") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for name index"); if (bt2_corder && H5B2_close(bt2_corder) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for creation order index") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for creation order index"); if (wb && H5WB_unwrap(wb) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close wrapped buffer") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close wrapped buffer"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5A__dense_insert() */ @@ -679,9 +679,9 @@ H5A__dense_write_bt2_cb(void *_record, void *_op_data, hbool_t *changed) done: /* Release resources */ if (bt2_corder && H5B2_close(bt2_corder) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for creation order index") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for creation order index"); if (wb && H5WB_unwrap(wb) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close wrapped buffer") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close wrapped buffer"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5A__dense_write_bt2_cb() */ @@ -768,11 +768,11 @@ H5A__dense_write(H5F_t *f, const H5O_ainfo_t *ainfo, H5A_t *attr) done: /* Release resources */ if (shared_fheap && H5HF_close(shared_fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap"); if (fheap && H5HF_close(fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap"); if (bt2_name && H5B2_close(bt2_name) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for name index") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for name index"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5A__dense_write() */ @@ -983,13 +983,13 @@ H5A__dense_rename(H5F_t *f, const H5O_ainfo_t *ainfo, const char *old_name, cons done: /* Release resources */ if (shared_fheap && H5HF_close(shared_fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap"); if (fheap && H5HF_close(fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap"); if (bt2_name && H5B2_close(bt2_name) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for name index") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for name index"); if (bt2_corder && H5B2_close(bt2_corder) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for creation order index") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for creation order index"); if (attr_copy) H5O_msg_free(H5O_ATTR_ID, attr_copy); @@ -1208,13 +1208,13 @@ H5A__dense_iterate(H5F_t *f, hid_t loc_id, const H5O_ainfo_t *ainfo, H5_index_t done: /* Release resources */ if (shared_fheap && H5HF_close(shared_fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap"); if (fheap && H5HF_close(fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap"); if (bt2 && H5B2_close(bt2) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for index") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for index"); if (atable.attrs && H5A__attr_release_table(&atable) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, FAIL, "unable to release attribute table") + HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, FAIL, "unable to release attribute table"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5A__dense_iterate() */ @@ -1274,7 +1274,7 @@ H5A__dense_remove_bt2_cb(const void *_record, void *_udata) done: /* Release resources */ if (bt2_corder && H5B2_close(bt2_corder) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for creation order index") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for creation order index"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5A__dense_remove_bt2_cb() */ @@ -1351,11 +1351,11 @@ H5A__dense_remove(H5F_t *f, const H5O_ainfo_t *ainfo, const char *name) done: /* Release resources */ if (shared_fheap && H5HF_close(shared_fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap"); if (fheap && H5HF_close(fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap"); if (bt2_name && H5B2_close(bt2_name) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for name index") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for name index"); if (attr_copy) H5O_msg_free_real(H5O_MSG_ATTR, attr_copy); @@ -1477,7 +1477,7 @@ H5A__dense_remove_by_idx_bt2_cb(const void *_record, void *_bt2_udata) done: /* Release resources */ if (bt2 && H5B2_close(bt2) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for index") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for index"); if (fh_udata.attr) H5O_msg_free(H5O_ATTR_ID, fh_udata.attr); @@ -1596,13 +1596,13 @@ H5A__dense_remove_by_idx(H5F_t *f, const H5O_ainfo_t *ainfo, H5_index_t idx_type done: /* Release resources */ if (shared_fheap && H5HF_close(shared_fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap"); if (fheap && H5HF_close(fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap"); if (bt2 && H5B2_close(bt2) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for index") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for index"); if (atable.attrs && H5A__attr_release_table(&atable) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, FAIL, "unable to release attribute table") + HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, FAIL, "unable to release attribute table"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5A__dense_remove_by_idx() */ @@ -1681,11 +1681,11 @@ H5A__dense_exists(H5F_t *f, const H5O_ainfo_t *ainfo, const char *name, hbool_t done: /* Release resources */ if (shared_fheap && H5HF_close(shared_fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap"); if (fheap && H5HF_close(fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap"); if (bt2_name && H5B2_close(bt2_name) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for name index") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for name index"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5A__dense_exists() */ @@ -1812,7 +1812,7 @@ H5A__dense_delete(H5F_t *f, H5O_ainfo_t *ainfo) done: /* Release resources */ if (fheap && H5HF_close(fheap) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5A__dense_delete() */ diff --git a/src/H5Adeprec.c b/src/H5Adeprec.c index 2aa5205..f996774 100644 --- a/src/H5Adeprec.c +++ b/src/H5Adeprec.c @@ -147,7 +147,7 @@ done: /* Clean up on failure */ if (H5I_INVALID_HID == ret_value) if (attr && H5VL_attr_close(vol_obj, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, H5I_INVALID_HID, "can't close attribute") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, H5I_INVALID_HID, "can't close attribute"); FUNC_LEAVE_API(ret_value) } /* end H5Acreate1() */ @@ -212,7 +212,7 @@ done: /* Clean up on failure */ if (H5I_INVALID_HID == ret_value) if (attr && H5VL_attr_close(vol_obj, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, H5I_INVALID_HID, "can't close attribute") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, H5I_INVALID_HID, "can't close attribute"); FUNC_LEAVE_API(ret_value) } /* H5Aopen_name() */ @@ -280,7 +280,7 @@ done: /* Clean up on failure */ if (H5I_INVALID_HID == ret_value) if (attr && H5VL_attr_close(vol_obj, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, H5I_INVALID_HID, "can't close attribute") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, H5I_INVALID_HID, "can't close attribute"); FUNC_LEAVE_API(ret_value) } /* H5Aopen_idx() */ diff --git a/src/H5Aint.c b/src/H5Aint.c index 62a9592..6a78e0c 100644 --- a/src/H5Aint.c +++ b/src/H5Aint.c @@ -381,7 +381,7 @@ H5A__create(const H5G_loc_t *loc, const char *attr_name, const H5T_t *type, cons done: /* Cleanup on failure */ if (NULL == ret_value && attr && H5A__close(attr)) - HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "can't close attribute") + HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "can't close attribute"); FUNC_LEAVE_NOAPI_TAG(ret_value) } /* H5A__create() */ @@ -433,12 +433,12 @@ H5A__create_by_name(const H5G_loc_t *loc, const char *obj_name, const char *attr done: /* Release resources */ if (loc_found && H5G_loc_free(&obj_loc) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, NULL, "can't free location") + HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, NULL, "can't free location"); /* Cleanup on failure */ if (ret_value == NULL) if (attr && H5A__close(attr) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "can't close attribute") + HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "can't close attribute"); FUNC_LEAVE_NOAPI(ret_value) } /* H5A__create_by_name() */ @@ -532,7 +532,7 @@ done: /* Cleanup on failure */ if (ret_value == NULL) if (attr && H5A__close(attr) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "can't close attribute") + HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "can't close attribute"); FUNC_LEAVE_NOAPI(ret_value) } /* H5A__open() */ @@ -587,12 +587,12 @@ H5A__open_by_idx(const H5G_loc_t *loc, const char *obj_name, H5_index_t idx_type done: /* Release resources */ if (loc_found && H5G_loc_free(&obj_loc) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, NULL, "can't free location") + HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, NULL, "can't free location"); /* Cleanup on failure */ if (ret_value == NULL) if (attr && H5A__close(attr) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "can't close attribute") + HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "can't close attribute"); FUNC_LEAVE_NOAPI(ret_value) } /* H5A__open_by_idx() */ @@ -647,12 +647,12 @@ H5A__open_by_name(const H5G_loc_t *loc, const char *obj_name, const char *attr_n done: /* Release resources */ if (loc_found && H5G_loc_free(&obj_loc) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, NULL, "can't free location") + HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, NULL, "can't free location"); /* Cleanup on failure */ if (ret_value == NULL) if (attr && H5A__close(attr) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "can't close attribute") + HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "can't close attribute"); FUNC_LEAVE_NOAPI(ret_value) } /* H5A__open_by_name() */ @@ -769,9 +769,9 @@ H5A__read(const H5A_t *attr, const H5T_t *mem_type, void *buf) done: /* Release resources */ if (src_id >= 0 && H5I_dec_ref(src_id) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, FAIL, "unable to close temporary object") + HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, FAIL, "unable to close temporary object"); if (dst_id >= 0 && H5I_dec_ref(dst_id) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, FAIL, "unable to close temporary object") + HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, FAIL, "unable to close temporary object"); if (tconv_buf) tconv_buf = H5FL_BLK_FREE(attr_buf, tconv_buf); if (bkg_buf) @@ -907,9 +907,9 @@ H5A__write(H5A_t *attr, const H5T_t *mem_type, const void *buf) done: /* Release resources */ if (src_id >= 0 && H5I_dec_ref(src_id) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, FAIL, "unable to close temporary object") + HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, FAIL, "unable to close temporary object"); if (dst_id >= 0 && H5I_dec_ref(dst_id) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, FAIL, "unable to close temporary object") + HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, FAIL, "unable to close temporary object"); if (tconv_buf) tconv_buf = H5FL_BLK_FREE(attr_buf, tconv_buf); if (bkg_buf) @@ -996,7 +996,7 @@ H5A_get_space(H5A_t *attr) done: if (H5I_INVALID_HID == ret_value && ds && H5S_close(ds) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, H5I_INVALID_HID, "unable to release dataspace") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, H5I_INVALID_HID, "unable to release dataspace"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5A_get_space() */ @@ -1057,7 +1057,7 @@ H5A__get_type(H5A_t *attr) done: if (H5I_INVALID_HID == ret_value) if (dt && H5T_close(dt) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, H5I_INVALID_HID, "unable to release datatype") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, H5I_INVALID_HID, "unable to release datatype"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5A__get_type() */ @@ -1193,7 +1193,7 @@ H5A__copy(H5A_t *_new_attr, const H5A_t *old_attr) done: if (ret_value == NULL) if (allocated_attr && new_attr && H5A__close(new_attr) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "can't close attribute") + HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "can't close attribute"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5A__copy() */ @@ -1229,12 +1229,12 @@ H5A__shared_free(H5A_t *attr) } if (attr->shared->dt) { if (H5T_close_real(attr->shared->dt) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't release datatype info") + HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't release datatype info"); attr->shared->dt = NULL; } if (attr->shared->ds) { if (H5S_close(attr->shared->ds) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't release dataspace info") + HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't release dataspace info"); attr->shared->ds = NULL; } if (attr->shared->data) @@ -1445,7 +1445,7 @@ H5A__exists_by_name(H5G_loc_t loc, const char *obj_name, const char *attr_name, done: /* Release resources */ if (loc_found && H5G_loc_free(&obj_loc) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't free location") + HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't free location"); FUNC_LEAVE_NOAPI(ret_value) } /* H5A__exists_by_name() */ @@ -1675,7 +1675,7 @@ H5A__dense_build_table(H5F_t *f, const H5O_ainfo_t *ainfo, H5_index_t idx_type, done: /* Release resources */ if (bt2_name && H5B2_close(bt2_name) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for name index") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for name index"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5A__dense_build_table() */ @@ -1808,18 +1808,18 @@ H5A__attr_sort_table(H5A_attr_table_t *atable, H5_index_t idx_type, H5_iter_orde /* Pick appropriate comparison routine */ if (idx_type == H5_INDEX_NAME) { if (order == H5_ITER_INC) - HDqsort(atable->attrs, atable->nattrs, sizeof(H5A_t *), H5A__attr_cmp_name_inc); + qsort(atable->attrs, atable->nattrs, sizeof(H5A_t *), H5A__attr_cmp_name_inc); else if (order == H5_ITER_DEC) - HDqsort(atable->attrs, atable->nattrs, sizeof(H5A_t *), H5A__attr_cmp_name_dec); + qsort(atable->attrs, atable->nattrs, sizeof(H5A_t *), H5A__attr_cmp_name_dec); else assert(order == H5_ITER_NATIVE); } /* end if */ else { assert(idx_type == H5_INDEX_CRT_ORDER); if (order == H5_ITER_INC) - HDqsort(atable->attrs, atable->nattrs, sizeof(H5A_t *), H5A__attr_cmp_corder_inc); + qsort(atable->attrs, atable->nattrs, sizeof(H5A_t *), H5A__attr_cmp_corder_inc); else if (order == H5_ITER_DEC) - HDqsort(atable->attrs, atable->nattrs, sizeof(H5A_t *), H5A__attr_cmp_corder_dec); + qsort(atable->attrs, atable->nattrs, sizeof(H5A_t *), H5A__attr_cmp_corder_dec); else assert(order == H5_ITER_NATIVE); } /* end else */ @@ -1855,7 +1855,7 @@ H5A__attr_iterate_table(const H5A_attr_table_t *atable, hsize_t skip, hsize_t *l *last_attr = skip; /* Iterate over attribute messages */ - H5_CHECKED_ASSIGN(u, size_t, skip, hsize_t) + H5_CHECKED_ASSIGN(u, size_t, skip, hsize_t); for (; u < atable->nattrs && !ret_value; u++) { /* Check which type of callback to make */ switch (attr_op->op_type) { @@ -1993,7 +1993,7 @@ H5A__get_ainfo(H5F_t *f, H5O_t *oh, H5O_ainfo_t *ainfo) done: /* Release resources */ if (bt2_name && H5B2_close(bt2_name) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for name index") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for name index"); FUNC_LEAVE_NOAPI_TAG(ret_value) } /* end H5A__get_ainfo() */ @@ -2313,19 +2313,19 @@ H5A__attr_copy_file(const H5A_t *attr_src, H5F_t *file_dst, hbool_t *recompute_s done: if (buf_sid > 0 && H5I_dec_ref(buf_sid) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "Can't decrement temporary dataspace ID") + HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "Can't decrement temporary dataspace ID"); if (tid_src > 0) /* Don't decrement ID, we want to keep underlying datatype */ if (NULL == H5I_remove(tid_src)) - HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "Can't decrement temporary datatype ID") + HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "Can't decrement temporary datatype ID"); if (tid_dst > 0) /* Don't decrement ID, we want to keep underlying datatype */ if (NULL == H5I_remove(tid_dst)) - HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "Can't decrement temporary datatype ID") + HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "Can't decrement temporary datatype ID"); if (tid_mem > 0) /* Decrement the memory datatype ID, it's transient */ if (H5I_dec_ref(tid_mem) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "Can't decrement temporary datatype ID") + HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "Can't decrement temporary datatype ID"); if (buf) buf = H5FL_BLK_FREE(attr_buf, buf); if (reclaim_buf) @@ -2335,7 +2335,7 @@ done: /* Release destination attribute information on failure */ if (!ret_value && attr_dst && H5A__close(attr_dst) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "can't close attribute") + HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, NULL, "can't close attribute"); FUNC_LEAVE_NOAPI(ret_value) } /* H5A__attr_copy_file() */ @@ -2468,18 +2468,18 @@ H5A__dense_post_copy_file_cb(const H5A_t *attr_src, void *_udata) HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, FAIL, "unable to reset attribute sharing") /* Set COPIED tag for destination object's metadata */ - H5_BEGIN_TAG(H5AC__COPIED_TAG); + H5_BEGIN_TAG(H5AC__COPIED_TAG) /* Insert attribute into dense storage */ if (H5A__dense_insert(udata->file, udata->ainfo, attr_dst) < 0) - HGOTO_ERROR_TAG(H5E_OHDR, H5E_CANTINSERT, H5_ITER_ERROR, "unable to add to dense storage") + HGOTO_ERROR_TAG(H5E_OHDR, H5E_CANTINSERT, H5_ITER_ERROR, "unable to add to dense storage"); /* Reset metadata tag */ H5_END_TAG done: if (attr_dst && H5A__close(attr_dst) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close destination attribute") + HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close destination attribute"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5A__dense_post_copy_file_cb() */ @@ -2566,7 +2566,7 @@ H5A__rename_by_name(H5G_loc_t loc, const char *obj_name, const char *old_attr_na done: /* Release resources */ if (loc_found && H5G_loc_free(&obj_loc) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't free location") + HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't free location"); FUNC_LEAVE_NOAPI(ret_value) } /* H5A__rename_by_name() */ @@ -2742,7 +2742,7 @@ H5A__delete_by_name(const H5G_loc_t *loc, const char *obj_name, const char *attr done: /* Release resources */ if (loc_found && H5G_loc_free(&obj_loc) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't free location") + HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't free location"); FUNC_LEAVE_NOAPI(ret_value) } /* H5A__delete_by_name() */ @@ -2785,7 +2785,7 @@ H5A__delete_by_idx(const H5G_loc_t *loc, const char *obj_name, H5_index_t idx_ty done: /* Release resources */ if (loc_found && H5G_loc_free(&obj_loc) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't free location") + HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't free location"); FUNC_LEAVE_NOAPI(ret_value) } /* H5A__delete_by_idx() */ diff --git a/src/H5Atest.c b/src/H5Atest.c index 92e199f..fdaf8bd 100644 --- a/src/H5Atest.c +++ b/src/H5Atest.c @@ -130,7 +130,7 @@ H5A__get_shared_rc_test(hid_t attr_id, hsize_t *ref_count) done: if (api_ctx_pushed && H5CX_pop(FALSE) < 0) - HDONE_ERROR(H5E_ATTR, H5E_CANTRESET, FAIL, "can't reset API context") + HDONE_ERROR(H5E_ATTR, H5E_CANTRESET, FAIL, "can't reset API context"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5A__get_shared_rc_test() */ diff --git a/src/H5B.c b/src/H5B.c index 16ae4f1..4c87f11 100644 --- a/src/H5B.c +++ b/src/H5B.c @@ -249,7 +249,7 @@ done: if (bt) /* Destroy B-tree node */ if (H5B__node_dest(bt) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree node"); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) @@ -344,7 +344,7 @@ H5B_find(H5F_t *f, const H5B_class_t *type, haddr_t addr, hbool_t *found, void * done: if (bt && H5AC_unprotect(f, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B_find() */ @@ -498,7 +498,7 @@ done: if (ret_value < 0) { if (split_bt_ud->bt && H5AC_unprotect(f, H5AC_BT, split_bt_ud->addr, split_bt_ud->bt, split_bt_ud->cache_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node"); split_bt_ud->bt = NULL; split_bt_ud->addr = HADDR_UNDEF; split_bt_ud->cache_flags = H5AC__NO_FLAGS_SET; @@ -570,7 +570,7 @@ H5B_insert(H5F_t *f, const H5B_class_t *type, haddr_t addr, void *udata) if (H5B_INS_NOOP == my_ins) { /* The root node did not split - just return */ assert(!split_bt_ud.bt); - HGOTO_DONE(SUCCEED) + HGOTO_DONE(SUCCEED); } /* end if */ assert(H5B_INS_RIGHT == my_ins); assert(split_bt_ud.bt); @@ -643,11 +643,11 @@ done: if (bt_ud.bt) if (H5AC_unprotect(f, H5AC_BT, bt_ud.addr, bt_ud.bt, bt_ud.cache_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to unprotect old root") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to unprotect old root"); if (split_bt_ud.bt) if (H5AC_unprotect(f, H5AC_BT, split_bt_ud.addr, split_bt_ud.bt, split_bt_ud.cache_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to unprotect new child") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to unprotect new child"); #ifdef H5B_DEBUG if (ret_value >= 0) @@ -703,7 +703,7 @@ H5B__insert_child(H5B_t *bt, unsigned *bt_flags, unsigned idx, haddr_t child, H5 } /* end if */ else { /* Make room for the new key */ - HDmemmove(base + shared->type->sizeof_nkey, base, (bt->nchildren - idx) * shared->type->sizeof_nkey); + memmove(base + shared->type->sizeof_nkey, base, (bt->nchildren - idx) * shared->type->sizeof_nkey); H5MM_memcpy(base, md_key, shared->type->sizeof_nkey); /* The MD_KEY is the left key of the new node */ @@ -711,7 +711,7 @@ H5B__insert_child(H5B_t *bt, unsigned *bt_flags, unsigned idx, haddr_t child, H5 idx++; /* Make room for the new child address */ - HDmemmove(bt->child + idx + 1, bt->child + idx, (bt->nchildren - idx) * sizeof(haddr_t)); + memmove(bt->child + idx + 1, bt->child + idx, (bt->nchildren - idx) * sizeof(haddr_t)); } /* end if */ bt->child[idx] = child; @@ -1066,12 +1066,12 @@ H5B__insert_helper(H5F_t *f, H5B_ins_ud_t *bt_ud, const H5B_class_t *type, uint8 done: if (child_bt_ud.bt) if (H5AC_unprotect(f, H5AC_BT, child_bt_ud.addr, child_bt_ud.bt, child_bt_ud.cache_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5B_INS_ERROR, "unable to unprotect child") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5B_INS_ERROR, "unable to unprotect child"); if (new_child_bt_ud.bt) if (H5AC_unprotect(f, H5AC_BT, new_child_bt_ud.addr, new_child_bt_ud.bt, new_child_bt_ud.cache_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5B_INS_ERROR, "unable to unprotect new child") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5B_INS_ERROR, "unable to unprotect new child"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B_insert_helper() */ @@ -1132,7 +1132,7 @@ H5B__iterate_helper(H5F_t *f, const H5B_class_t *type, haddr_t addr, H5B_operato done: if (bt && H5AC_unprotect(f, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5_ITER_ERROR, "unable to release B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5_ITER_ERROR, "unable to release B-tree node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B__iterate_helper() */ @@ -1401,8 +1401,7 @@ H5B__remove_helper(H5F_t *f, haddr_t addr, const H5B_class_t *type, int level, u */ if (type->critical_key == H5B_LEFT) { /* Slide all keys down 1, update lt_key */ - HDmemmove(H5B_NKEY(bt, shared, 0), H5B_NKEY(bt, shared, 1), - bt->nchildren * type->sizeof_nkey); + memmove(H5B_NKEY(bt, shared, 0), H5B_NKEY(bt, shared, 1), bt->nchildren * type->sizeof_nkey); H5MM_memcpy(lt_key, H5B_NKEY(bt, shared, 0), type->sizeof_nkey); *lt_key_changed = TRUE; } @@ -1410,10 +1409,10 @@ H5B__remove_helper(H5F_t *f, haddr_t addr, const H5B_class_t *type, int level, u /* Slide all but the leftmost 2 keys down, leaving the leftmost * key intact (the right key of the leftmost child is * overwritten) */ - HDmemmove(H5B_NKEY(bt, shared, 1), H5B_NKEY(bt, shared, 2), - (bt->nchildren - 1) * type->sizeof_nkey); + memmove(H5B_NKEY(bt, shared, 1), H5B_NKEY(bt, shared, 2), + (bt->nchildren - 1) * type->sizeof_nkey); - HDmemmove(bt->child, bt->child + 1, (bt->nchildren - 1) * sizeof(haddr_t)); + memmove(bt->child, bt->child + 1, (bt->nchildren - 1) * sizeof(haddr_t)); bt->nchildren -= 1; bt_flags |= H5AC__DIRTIED_FLAG; @@ -1428,8 +1427,8 @@ H5B__remove_helper(H5F_t *f, haddr_t addr, const H5B_class_t *type, int level, u if (type->critical_key == H5B_LEFT) /* Slide the rightmost key down one, overwriting the left key of * the deleted (rightmost) child */ - HDmemmove(H5B_NKEY(bt, shared, bt->nchildren - 1), H5B_NKEY(bt, shared, bt->nchildren), - type->sizeof_nkey); + memmove(H5B_NKEY(bt, shared, bt->nchildren - 1), H5B_NKEY(bt, shared, bt->nchildren), + type->sizeof_nkey); else { /* Just update rt_key */ H5MM_memcpy(rt_key, H5B_NKEY(bt, shared, bt->nchildren - 1), type->sizeof_nkey); @@ -1449,13 +1448,13 @@ H5B__remove_helper(H5F_t *f, haddr_t addr, const H5B_class_t *type, int level, u * Return H5B_INS_NOOP. */ if (type->critical_key == H5B_LEFT) - HDmemmove(H5B_NKEY(bt, shared, idx), H5B_NKEY(bt, shared, idx + 1), - (bt->nchildren - idx) * type->sizeof_nkey); + memmove(H5B_NKEY(bt, shared, idx), H5B_NKEY(bt, shared, idx + 1), + (bt->nchildren - idx) * type->sizeof_nkey); else - HDmemmove(H5B_NKEY(bt, shared, idx + 1), H5B_NKEY(bt, shared, idx + 2), - (bt->nchildren - 1 - idx) * type->sizeof_nkey); + memmove(H5B_NKEY(bt, shared, idx + 1), H5B_NKEY(bt, shared, idx + 2), + (bt->nchildren - 1 - idx) * type->sizeof_nkey); - HDmemmove(bt->child + idx, bt->child + idx + 1, (bt->nchildren - 1 - idx) * sizeof(haddr_t)); + memmove(bt->child + idx, bt->child + idx + 1, (bt->nchildren - 1 - idx) * sizeof(haddr_t)); bt->nchildren -= 1; bt_flags |= H5AC__DIRTIED_FLAG; @@ -1499,7 +1498,7 @@ H5B__remove_helper(H5F_t *f, haddr_t addr, const H5B_class_t *type, int level, u done: if (bt && H5AC_unprotect(f, H5AC_BT, addr, bt, bt_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5B_INS_ERROR, "unable to release node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5B_INS_ERROR, "unable to release node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B__remove_helper() */ @@ -1613,7 +1612,7 @@ H5B_delete(H5F_t *f, const H5B_class_t *type, haddr_t addr, void *udata) done: if (bt && H5AC_unprotect(f, H5AC_BT, addr, bt, H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node in cache") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node in cache"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B_delete() */ @@ -1874,7 +1873,7 @@ H5B__get_info_helper(H5F_t *f, const H5B_class_t *type, haddr_t addr, const H5B_ done: if (bt && H5AC_unprotect(f, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B__get_info_helper() */ @@ -1972,7 +1971,7 @@ H5B_valid(H5F_t *f, const H5B_class_t *type, haddr_t addr) done: /* Release the node */ if (bt && H5AC_unprotect(f, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B_valid() */ diff --git a/src/H5B2.c b/src/H5B2.c index ebbde9b..63ca28c 100644 --- a/src/H5B2.c +++ b/src/H5B2.c @@ -164,10 +164,10 @@ H5B2_create(H5F_t *f, const H5B2_create_t *cparam, void *ctx_udata) done: if (hdr && H5B2__hdr_unprotect(hdr, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, NULL, "unable to release v2 B-tree header") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, NULL, "unable to release v2 B-tree header"); if (!ret_value && bt2) if (H5B2_close(bt2) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTCLOSEOBJ, NULL, "unable to close v2 B-tree") + HDONE_ERROR(H5E_BTREE, H5E_CANTCLOSEOBJ, NULL, "unable to close v2 B-tree"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2_create() */ @@ -226,10 +226,10 @@ H5B2_open(H5F_t *f, haddr_t addr, void *ctx_udata) done: if (hdr && H5B2__hdr_unprotect(hdr, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, NULL, "unable to release v2 B-tree header") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, NULL, "unable to release v2 B-tree header"); if (!ret_value && bt2) if (H5B2_close(bt2) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTCLOSEOBJ, NULL, "unable to close v2 B-tree") + HDONE_ERROR(H5E_BTREE, H5E_CANTCLOSEOBJ, NULL, "unable to close v2 B-tree"); FUNC_LEAVE_NOAPI(ret_value) } /* H5B2_open() */ @@ -455,7 +455,7 @@ H5B2_find(H5B2_t *bt2, void *udata, hbool_t *found, H5B2_found_t op, void *op_da /* Check for empty tree */ if (curr_node_ptr.node_nrec == 0) { *found = FALSE; - HGOTO_DONE(SUCCEED) + HGOTO_DONE(SUCCEED); } /* Check record against min & max records in tree, to attempt to quickly @@ -466,14 +466,14 @@ H5B2_find(H5B2_t *bt2, void *udata, hbool_t *found, H5B2_found_t op, void *op_da HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") if (cmp < 0) { *found = FALSE; /* Less than the least record--not found */ - HGOTO_DONE(SUCCEED) + HGOTO_DONE(SUCCEED); } else if (cmp == 0) { /* Record is found */ if (op && (op)(hdr->min_native_rec, op_data) < 0) HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "'found' callback failed for B-tree find operation") *found = TRUE; - HGOTO_DONE(SUCCEED) + HGOTO_DONE(SUCCEED); } /* end if */ } /* end if */ if (hdr->max_native_rec != NULL) { @@ -481,14 +481,14 @@ H5B2_find(H5B2_t *bt2, void *udata, hbool_t *found, H5B2_found_t op, void *op_da HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") if (cmp > 0) { *found = FALSE; /* Greater than the largest record--not found */ - HGOTO_DONE(SUCCEED) + HGOTO_DONE(SUCCEED); } else if (cmp == 0) { /* Record is found */ if (op && (op)(hdr->max_native_rec, op_data) < 0) HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "'found' callback failed for B-tree find operation") *found = TRUE; - HGOTO_DONE(SUCCEED) + HGOTO_DONE(SUCCEED); } /* end if */ } /* end if */ @@ -580,7 +580,7 @@ H5B2_find(H5B2_t *bt2, void *udata, hbool_t *found, H5B2_found_t op, void *op_da /* Indicate record found */ *found = TRUE; - HGOTO_DONE(SUCCEED) + HGOTO_DONE(SUCCEED); } /* end else */ /* Decrement depth we're at in B-tree */ @@ -616,7 +616,7 @@ H5B2_find(H5B2_t *bt2, void *udata, hbool_t *found, H5B2_found_t op, void *op_da /* Record not found */ *found = FALSE; - HGOTO_DONE(SUCCEED) + HGOTO_DONE(SUCCEED); } /* end if */ else { /* Make callback for current record */ @@ -665,7 +665,7 @@ done: if (parent) { assert(ret_value < 0); if (parent != hdr && H5AC_unpin_entry(parent) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry"); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) @@ -861,7 +861,7 @@ done: if (parent) { assert(ret_value < 0); if (parent != hdr && H5AC_unpin_entry(parent) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry"); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) @@ -1276,16 +1276,12 @@ H5B2_modify(H5B2_t *bt2, void *udata, H5B2_modify_t op, void *op_data) if (H5AC_unprotect(hdr->f, H5AC_BT2_LEAF, curr_node_ptr.addr, leaf, H5AC__NO_FLAGS_SET) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") - /* Note: don't push error on stack, leave that to next higher level, - * since many times the B-tree is searched in order to determine - * if an object exists in the B-tree or not. -QAK - */ -#ifdef OLD_WAY - HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "key not found in leaf node") -#else /* OLD_WAY */ - HGOTO_DONE(FAIL) -#endif /* OLD_WAY */ - } /* end if */ + /* Note: don't push error on stack, leave that to next higher level, + * since many times the B-tree is searched in order to determine + * if an object exists in the B-tree or not. + */ + HGOTO_DONE(FAIL); + } else { /* Make callback for current record */ if ((op)(H5B2_LEAF_NREC(leaf, hdr, idx), op_data, &changed) < 0) { @@ -1336,7 +1332,7 @@ done: if (parent) { assert(ret_value < 0); if (parent != hdr && H5AC_unpin_entry(parent) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry"); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) @@ -1495,7 +1491,7 @@ H5B2_delete(H5F_t *f, haddr_t addr, void *ctx_udata, H5B2_remove_t op, void *op_ done: /* Unprotect the header, if an error occurred */ if (hdr && H5B2__hdr_unprotect(hdr, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release v2 B-tree header") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release v2 B-tree header"); FUNC_LEAVE_NOAPI(ret_value) } /* H5B2_delete() */ diff --git a/src/H5B2cache.c b/src/H5B2cache.c index d63baa1..2c749ff 100644 --- a/src/H5B2cache.c +++ b/src/H5B2cache.c @@ -13,10 +13,8 @@ /*------------------------------------------------------------------------- * * Created: H5B2cache.c - * Jan 31 2005 - * Quincey Koziol * - * Purpose: Implement v2 B-tree metadata cache methods. + * Purpose: Implement v2 B-tree metadata cache methods * *------------------------------------------------------------------------- */ @@ -30,10 +28,13 @@ /***********/ /* Headers */ /***********/ -#include "H5private.h" /* Generic Functions */ -#include "H5B2pkg.h" /* v2 B-trees */ -#include "H5Eprivate.h" /* Error handling */ -#include "H5WBprivate.h" /* Wrapped Buffers */ +#include "H5private.h" /* Generic Functions */ +#include "H5ACprivate.h" /* Metadata Cache */ +#include "H5B2pkg.h" /* B-Trees (Version 2) */ +#include "H5Eprivate.h" /* Error Handling */ +#include "H5Fprivate.h" /* Files */ +#include "H5FLprivate.h" /* Free Lists */ +#include "H5MMprivate.h" /* Memory Management */ /****************/ /* Local Macros */ @@ -268,7 +269,7 @@ H5B2__cache_hdr_deserialize(const void *_image, size_t H5_ATTR_UNUSED len, void cparam.merge_percent = *image++; /* Root node pointer */ - H5_addr_decode(udata->f, (const uint8_t **)&image, &(hdr->root.addr)); + H5F_addr_decode(udata->f, (const uint8_t **)&image, &(hdr->root.addr)); UINT16DECODE(image, hdr->root.node_nrec); H5F_DECODE_LENGTH(udata->f, image, hdr->root.all_nrec); @@ -297,7 +298,7 @@ H5B2__cache_hdr_deserialize(const void *_image, size_t H5_ATTR_UNUSED len, void done: if (!ret_value && hdr) if (H5B2__hdr_free(hdr) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTRELEASE, NULL, "can't release v2 B-tree header") + HDONE_ERROR(H5E_BTREE, H5E_CANTRELEASE, NULL, "can't release v2 B-tree header"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2__cache_hdr_deserialize() */ @@ -378,7 +379,7 @@ H5B2__cache_hdr_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED le *image++ = (uint8_t)hdr->merge_percent; /* Root node pointer */ - H5_addr_encode(f, &image, hdr->root.addr); + H5F_addr_encode(f, &image, hdr->root.addr); UINT16ENCODE(image, hdr->root.node_nrec); H5F_ENCODE_LENGTH(f, image, hdr->root.all_nrec); @@ -663,12 +664,12 @@ H5B2__cache_int_deserialize(const void *_image, size_t H5_ATTR_UNUSED len, void int_node_ptr = internal->node_ptrs; for (u = 0; u < (unsigned)(internal->nrec + 1); u++) { /* Decode node pointer */ - H5_addr_decode(udata->f, (const uint8_t **)&image, &(int_node_ptr->addr)); + H5F_addr_decode(udata->f, (const uint8_t **)&image, &(int_node_ptr->addr)); UINT64DECODE_VAR(image, node_nrec, udata->hdr->max_nrec_size); H5_CHECKED_ASSIGN(int_node_ptr->node_nrec, uint16_t, node_nrec, int); if (udata->depth > 1) UINT64DECODE_VAR(image, int_node_ptr->all_nrec, - udata->hdr->node_info[udata->depth - 1].cum_max_nrec_size) + udata->hdr->node_info[udata->depth - 1].cum_max_nrec_size); else int_node_ptr->all_nrec = int_node_ptr->node_nrec; @@ -690,7 +691,7 @@ H5B2__cache_int_deserialize(const void *_image, size_t H5_ATTR_UNUSED len, void done: if (!ret_value && internal) if (H5B2__internal_free(internal) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, NULL, "unable to destroy B-tree internal node") + HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, NULL, "unable to destroy B-tree internal node"); FUNC_LEAVE_NOAPI(ret_value) } /* H5B2__cache_int_deserialize() */ @@ -779,7 +780,7 @@ H5B2__cache_int_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED le int_node_ptr = internal->node_ptrs; for (u = 0; u < (unsigned)(internal->nrec + 1); u++) { /* Encode node pointer */ - H5_addr_encode(f, &image, int_node_ptr->addr); + H5F_addr_encode(f, &image, int_node_ptr->addr); UINT64ENCODE_VAR(image, int_node_ptr->node_nrec, internal->hdr->max_nrec_size); if (internal->depth > 1) UINT64ENCODE_VAR(image, int_node_ptr->all_nrec, @@ -1063,7 +1064,7 @@ H5B2__cache_leaf_deserialize(const void *_image, size_t H5_ATTR_UNUSED len, void done: if (!ret_value && leaf) if (H5B2__leaf_free(leaf) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, NULL, "unable to destroy B-tree leaf node") + HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, NULL, "unable to destroy B-tree leaf node"); FUNC_LEAVE_NOAPI(ret_value) } /* H5B2__cache_leaf_deserialize() */ diff --git a/src/H5B2dbg.c b/src/H5B2dbg.c index 0f498a8..f246e97 100644 --- a/src/H5B2dbg.c +++ b/src/H5B2dbg.c @@ -131,7 +131,7 @@ H5B2__hdr_debug(H5F_t *f, haddr_t addr, FILE *stream, int indent, int fwidth, done: if (hdr && H5B2__hdr_unprotect(hdr, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release v2 B-tree header") + HDONE_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release v2 B-tree header"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2__hdr_debug() */ @@ -183,7 +183,7 @@ H5B2__int_debug(H5F_t *f, haddr_t addr, FILE *stream, int indent, int fwidth, co */ H5_CHECK_OVERFLOW(depth, unsigned, uint16_t); node_ptr.addr = addr; - H5_CHECKED_ASSIGN(node_ptr.node_nrec, uint16_t, nrec, unsigned) + H5_CHECKED_ASSIGN(node_ptr.node_nrec, uint16_t, nrec, unsigned); if (NULL == (internal = H5B2__protect_internal(hdr, NULL, &node_ptr, (uint16_t)depth, FALSE, H5AC__READ_ONLY_FLAG))) HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree internal node") @@ -227,9 +227,9 @@ H5B2__int_debug(H5F_t *f, haddr_t addr, FILE *stream, int indent, int fwidth, co done: if (hdr && H5B2__hdr_unprotect(hdr, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release v2 B-tree header") + HDONE_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release v2 B-tree header"); if (internal && H5AC_unprotect(f, H5AC_BT2_INT, addr, internal, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release B-tree internal node") + HDONE_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release B-tree internal node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2__int_debug() */ @@ -281,7 +281,7 @@ H5B2__leaf_debug(H5F_t *f, haddr_t addr, FILE *stream, int indent, int fwidth, c */ H5_CHECK_OVERFLOW(nrec, unsigned, uint16_t); node_ptr.addr = addr; - H5_CHECKED_ASSIGN(node_ptr.node_nrec, uint16_t, nrec, unsigned) + H5_CHECKED_ASSIGN(node_ptr.node_nrec, uint16_t, nrec, unsigned); if (NULL == (leaf = H5B2__protect_leaf(hdr, NULL, &node_ptr, FALSE, H5AC__READ_ONLY_FLAG))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") @@ -312,9 +312,9 @@ H5B2__leaf_debug(H5F_t *f, haddr_t addr, FILE *stream, int indent, int fwidth, c done: if (hdr && H5B2__hdr_unprotect(hdr, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release B-tree header") + HDONE_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release B-tree header"); if (leaf && H5AC_unprotect(f, H5AC_BT2_LEAF, addr, leaf, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release B-tree leaf node") + HDONE_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release B-tree leaf node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2__leaf_debug() */ diff --git a/src/H5B2hdr.c b/src/H5B2hdr.c index 671cdc3..49d5593 100644 --- a/src/H5B2hdr.c +++ b/src/H5B2hdr.c @@ -137,7 +137,7 @@ H5B2__hdr_init(H5B2_hdr_t *hdr, const H5B2_create_t *cparam, void *ctx_udata, ui /* Initialize leaf node info */ sz_max_nrec = H5B2_NUM_LEAF_REC(hdr->node_size, hdr->rrec_size); - H5_CHECKED_ASSIGN(hdr->node_info[0].max_nrec, unsigned, sz_max_nrec, size_t) + H5_CHECKED_ASSIGN(hdr->node_info[0].max_nrec, unsigned, sz_max_nrec, size_t); hdr->node_info[0].split_nrec = (hdr->node_info[0].max_nrec * hdr->split_percent) / 100; hdr->node_info[0].merge_nrec = (hdr->node_info[0].max_nrec * hdr->merge_percent) / 100; hdr->node_info[0].cum_max_nrec = hdr->node_info[0].max_nrec; @@ -160,14 +160,14 @@ H5B2__hdr_init(H5B2_hdr_t *hdr, const H5B2_create_t *cparam, void *ctx_udata, ui /* Compute size to store # of records in each node */ /* (uses leaf # of records because its the largest) */ u_max_nrec_size = H5VM_limit_enc_size((uint64_t)hdr->node_info[0].max_nrec); - H5_CHECKED_ASSIGN(hdr->max_nrec_size, uint8_t, u_max_nrec_size, unsigned) + H5_CHECKED_ASSIGN(hdr->max_nrec_size, uint8_t, u_max_nrec_size, unsigned); assert(hdr->max_nrec_size <= H5B2_SIZEOF_RECORDS_PER_NODE); /* Initialize internal node info */ if (depth > 0) { for (u = 1; u < (unsigned)(depth + 1); u++) { sz_max_nrec = H5B2_NUM_INT_REC(hdr, u); - H5_CHECKED_ASSIGN(hdr->node_info[u].max_nrec, unsigned, sz_max_nrec, size_t) + H5_CHECKED_ASSIGN(hdr->node_info[u].max_nrec, unsigned, sz_max_nrec, size_t); assert(hdr->node_info[u].max_nrec <= hdr->node_info[u - 1].max_nrec); hdr->node_info[u].split_nrec = (hdr->node_info[u].max_nrec * hdr->split_percent) / 100; @@ -177,7 +177,7 @@ H5B2__hdr_init(H5B2_hdr_t *hdr, const H5B2_create_t *cparam, void *ctx_udata, ui ((hdr->node_info[u].max_nrec + 1) * hdr->node_info[u - 1].cum_max_nrec) + hdr->node_info[u].max_nrec; u_max_nrec_size = H5VM_limit_enc_size((uint64_t)hdr->node_info[u].cum_max_nrec); - H5_CHECKED_ASSIGN(hdr->node_info[u].cum_max_nrec_size, uint8_t, u_max_nrec_size, unsigned) + H5_CHECKED_ASSIGN(hdr->node_info[u].cum_max_nrec_size, uint8_t, u_max_nrec_size, unsigned); if (NULL == (hdr->node_info[u].nat_rec_fac = H5FL_fac_init(hdr->cls->nrec_size * hdr->node_info[u].max_nrec))) @@ -204,7 +204,7 @@ H5B2__hdr_init(H5B2_hdr_t *hdr, const H5B2_create_t *cparam, void *ctx_udata, ui done: if (ret_value < 0) if (H5B2__hdr_free(hdr) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to free shared v2 B-tree info") + HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to free shared v2 B-tree info"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2__hdr_init() */ @@ -312,16 +312,16 @@ done: if (inserted) if (H5AC_remove_entry(hdr) < 0) HDONE_ERROR(H5E_BTREE, H5E_CANTREMOVE, HADDR_UNDEF, - "unable to remove v2 B-tree header from cache") + "unable to remove v2 B-tree header from cache"); /* Release header's disk space */ if (H5_addr_defined(hdr->addr) && H5MF_xfree(f, H5FD_MEM_BTREE, hdr->addr, (hsize_t)hdr->hdr_size) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, HADDR_UNDEF, "unable to free v2 B-tree header") + HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, HADDR_UNDEF, "unable to free v2 B-tree header"); /* Destroy header */ if (H5B2__hdr_free(hdr) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTRELEASE, HADDR_UNDEF, "unable to release v2 B-tree header") + HDONE_ERROR(H5E_BTREE, H5E_CANTRELEASE, HADDR_UNDEF, "unable to release v2 B-tree header"); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) @@ -520,7 +520,7 @@ done: /* Release the header, if it was protected */ if (hdr && H5AC_unprotect(hdr->f, H5AC_BT2_HDR, hdr_addr, hdr, H5AC__NO_FLAGS_SET) < 0) HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, NULL, - "unable to unprotect v2 B-tree header, address = %llu", (unsigned long long)hdr_addr) + "unable to unprotect v2 B-tree header, address = %llu", (unsigned long long)hdr_addr); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) @@ -674,7 +674,7 @@ H5B2__hdr_delete(H5B2_hdr_t *hdr) done: /* Unprotect the header with appropriate flags */ if (H5B2__hdr_unprotect(hdr, cache_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release v2 B-tree header") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release v2 B-tree header"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2__hdr_delete() */ diff --git a/src/H5B2int.c b/src/H5B2int.c index 766157b..a514078 100644 --- a/src/H5B2int.c +++ b/src/H5B2int.c @@ -151,10 +151,10 @@ H5B2__split1(H5B2_hdr_t *hdr, uint16_t depth, H5B2_node_ptr_t *curr_node_ptr, /* Slide records in parent node up one space, to make room for promoted record */ if (idx < internal->nrec) { - HDmemmove(H5B2_INT_NREC(internal, hdr, idx + 1), H5B2_INT_NREC(internal, hdr, idx), - hdr->cls->nrec_size * (internal->nrec - idx)); - HDmemmove(&(internal->node_ptrs[idx + 2]), &(internal->node_ptrs[idx + 1]), - sizeof(H5B2_node_ptr_t) * (internal->nrec - idx)); + memmove(H5B2_INT_NREC(internal, hdr, idx + 1), H5B2_INT_NREC(internal, hdr, idx), + hdr->cls->nrec_size * (internal->nrec - idx)); + memmove(&(internal->node_ptrs[idx + 2]), &(internal->node_ptrs[idx + 1]), + sizeof(H5B2_node_ptr_t) * (internal->nrec - idx)); } /* end if */ /* Check for the kind of B-tree node to split */ @@ -309,9 +309,9 @@ H5B2__split1(H5B2_hdr_t *hdr, uint16_t depth, H5B2_node_ptr_t *curr_node_ptr, done: /* Release child nodes (marked as dirty) */ if (left_child && H5AC_unprotect(hdr->f, child_class, left_addr, left_child, left_child_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree leaf node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree leaf node"); if (right_child && H5AC_unprotect(hdr->f, child_class, right_addr, right_child, right_child_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree leaf node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree leaf node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2__split1() */ @@ -351,14 +351,14 @@ H5B2__split_root(H5B2_hdr_t *hdr) /* Update node info for new depth of tree */ sz_max_nrec = H5B2_NUM_INT_REC(hdr, hdr->depth); - H5_CHECKED_ASSIGN(hdr->node_info[hdr->depth].max_nrec, unsigned, sz_max_nrec, size_t) + H5_CHECKED_ASSIGN(hdr->node_info[hdr->depth].max_nrec, unsigned, sz_max_nrec, size_t); hdr->node_info[hdr->depth].split_nrec = (hdr->node_info[hdr->depth].max_nrec * hdr->split_percent) / 100; hdr->node_info[hdr->depth].merge_nrec = (hdr->node_info[hdr->depth].max_nrec * hdr->merge_percent) / 100; hdr->node_info[hdr->depth].cum_max_nrec = ((hdr->node_info[hdr->depth].max_nrec + 1) * hdr->node_info[hdr->depth - 1].cum_max_nrec) + hdr->node_info[hdr->depth].max_nrec; u_max_nrec_size = H5VM_limit_enc_size((uint64_t)hdr->node_info[hdr->depth].cum_max_nrec); - H5_CHECKED_ASSIGN(hdr->node_info[hdr->depth].cum_max_nrec_size, uint8_t, u_max_nrec_size, unsigned) + H5_CHECKED_ASSIGN(hdr->node_info[hdr->depth].cum_max_nrec_size, uint8_t, u_max_nrec_size, unsigned); if (NULL == (hdr->node_info[hdr->depth].nat_rec_fac = H5FL_fac_init(hdr->cls->nrec_size * hdr->node_info[hdr->depth].max_nrec))) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create node native key block factory") @@ -390,7 +390,7 @@ H5B2__split_root(H5B2_hdr_t *hdr) done: /* Release new root node (marked as dirty) */ if (new_root && H5AC_unprotect(hdr->f, H5AC_BT2_INT, hdr->root.addr, new_root, new_root_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree internal node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree internal node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2__split_root() */ @@ -521,8 +521,8 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, hdr->cls->nrec_size); /* Slide records in right node down */ - HDmemmove(H5B2_NAT_NREC(right_native, hdr, 0), H5B2_NAT_NREC(right_native, hdr, move_nrec), - hdr->cls->nrec_size * new_right_nrec); + memmove(H5B2_NAT_NREC(right_native, hdr, 0), H5B2_NAT_NREC(right_native, hdr, move_nrec), + hdr->cls->nrec_size * new_right_nrec); /* Handle node pointers, if we have an internal node */ if (depth > 1) { @@ -532,7 +532,7 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, /* Count the number of records being moved */ for (u = 0; u < move_nrec; u++) moved_nrec += right_node_ptrs[u].all_nrec; - H5_CHECKED_ASSIGN(left_moved_nrec, hssize_t, moved_nrec, hsize_t) + H5_CHECKED_ASSIGN(left_moved_nrec, hssize_t, moved_nrec, hsize_t); right_moved_nrec -= (hssize_t)moved_nrec; /* Copy node pointers from right node to left */ @@ -540,8 +540,8 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, sizeof(H5B2_node_ptr_t) * move_nrec); /* Slide node pointers in right node down */ - HDmemmove(&(right_node_ptrs[0]), &(right_node_ptrs[move_nrec]), - sizeof(H5B2_node_ptr_t) * (new_right_nrec + (unsigned)1)); + memmove(&(right_node_ptrs[0]), &(right_node_ptrs[move_nrec]), + sizeof(H5B2_node_ptr_t) * (new_right_nrec + (unsigned)1)); } /* end if */ /* Update flush dependencies for grandchildren, if using SWMR */ @@ -571,8 +571,8 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, assert(*left_nrec > *right_nrec); /* Slide records in right node up */ - HDmemmove(H5B2_NAT_NREC(right_native, hdr, move_nrec), H5B2_NAT_NREC(right_native, hdr, 0), - hdr->cls->nrec_size * (*right_nrec)); + memmove(H5B2_NAT_NREC(right_native, hdr, move_nrec), H5B2_NAT_NREC(right_native, hdr, 0), + hdr->cls->nrec_size * (*right_nrec)); /* Copy record from parent node down into right child */ H5MM_memcpy(H5B2_NAT_NREC(right_native, hdr, (move_nrec - 1)), H5B2_INT_NREC(internal, hdr, idx), @@ -594,8 +594,8 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, unsigned u; /* Local index variable */ /* Slide node pointers in right node up */ - HDmemmove(&(right_node_ptrs[move_nrec]), &(right_node_ptrs[0]), - sizeof(H5B2_node_ptr_t) * (size_t)(*right_nrec + 1)); + memmove(&(right_node_ptrs[move_nrec]), &(right_node_ptrs[0]), + sizeof(H5B2_node_ptr_t) * (size_t)(*right_nrec + 1)); /* Copy node pointers from left node to right */ H5MM_memcpy(&(right_node_ptrs[0]), &(left_node_ptrs[new_left_nrec + 1]), @@ -605,7 +605,7 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, for (u = 0; u < move_nrec; u++) moved_nrec += right_node_ptrs[u].all_nrec; left_moved_nrec -= (hssize_t)moved_nrec; - H5_CHECKED_ASSIGN(right_moved_nrec, hssize_t, moved_nrec, hsize_t) + H5_CHECKED_ASSIGN(right_moved_nrec, hssize_t, moved_nrec, hsize_t); } /* end if */ /* Update flush dependencies for grandchildren, if using SWMR */ @@ -656,9 +656,9 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, done: /* Release child nodes (marked as dirty) */ if (left_child && H5AC_unprotect(hdr->f, child_class, left_addr, left_child, left_child_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node"); if (right_child && H5AC_unprotect(hdr->f, child_class, right_addr, right_child, right_child_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2__redistribute2() */ @@ -813,9 +813,9 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, moved_middle_nrec++; /* Slide records in middle node down */ - HDmemmove(H5B2_NAT_NREC(middle_native, hdr, 0), - H5B2_NAT_NREC(middle_native, hdr, moved_middle_nrec), - hdr->cls->nrec_size * (size_t)(*middle_nrec - moved_middle_nrec)); + memmove(H5B2_NAT_NREC(middle_native, hdr, 0), + H5B2_NAT_NREC(middle_native, hdr, moved_middle_nrec), + hdr->cls->nrec_size * (size_t)(*middle_nrec - moved_middle_nrec)); /* Move node pointers also if this is an internal node */ if (depth > 1) { @@ -835,8 +835,8 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, middle_moved_nrec -= (hssize_t)(moved_nrec + move_nptrs); /* Slide the node pointers in middle node down */ - HDmemmove(&(middle_node_ptrs[0]), &(middle_node_ptrs[move_nptrs]), - sizeof(H5B2_node_ptr_t) * ((*middle_nrec - move_nptrs) + 1)); + memmove(&(middle_node_ptrs[0]), &(middle_node_ptrs[move_nptrs]), + sizeof(H5B2_node_ptr_t) * ((*middle_nrec - move_nptrs) + 1)); } /* end if */ /* Update flush dependencies for grandchildren, if using SWMR */ @@ -860,8 +860,8 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, (unsigned)(new_right_nrec - *right_nrec); /* Number of records to move out of right node */ /* Slide records in right node up */ - HDmemmove(H5B2_NAT_NREC(right_native, hdr, right_nrec_move), H5B2_NAT_NREC(right_native, hdr, 0), - hdr->cls->nrec_size * (*right_nrec)); + memmove(H5B2_NAT_NREC(right_native, hdr, right_nrec_move), H5B2_NAT_NREC(right_native, hdr, 0), + hdr->cls->nrec_size * (*right_nrec)); /* Move right parent record down to right node */ H5MM_memcpy(H5B2_NAT_NREC(right_native, hdr, right_nrec_move - 1), @@ -884,8 +884,8 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, unsigned u; /* Local index variable */ /* Slide the node pointers in right node up */ - HDmemmove(&(right_node_ptrs[right_nrec_move]), &(right_node_ptrs[0]), - sizeof(H5B2_node_ptr_t) * (size_t)(*right_nrec + 1)); + memmove(&(right_node_ptrs[right_nrec_move]), &(right_node_ptrs[0]), + sizeof(H5B2_node_ptr_t) * (size_t)(*right_nrec + 1)); /* Move middle node pointers into right node */ H5MM_memcpy(&(right_node_ptrs[0]), @@ -920,8 +920,8 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, (unsigned)(*left_nrec - new_left_nrec); /* Number of records to move out of left node */ /* Slide middle records up */ - HDmemmove(H5B2_NAT_NREC(middle_native, hdr, left_nrec_move), H5B2_NAT_NREC(middle_native, hdr, 0), - hdr->cls->nrec_size * curr_middle_nrec); + memmove(H5B2_NAT_NREC(middle_native, hdr, left_nrec_move), H5B2_NAT_NREC(middle_native, hdr, 0), + hdr->cls->nrec_size * curr_middle_nrec); /* Move left parent record down to middle node */ H5MM_memcpy(H5B2_NAT_NREC(middle_native, hdr, left_nrec_move - 1), @@ -929,9 +929,9 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, /* Move left records to middle node */ if (left_nrec_move > 1) - HDmemmove(H5B2_NAT_NREC(middle_native, hdr, 0), - H5B2_NAT_NREC(left_native, hdr, new_left_nrec + 1), - hdr->cls->nrec_size * (left_nrec_move - 1)); + memmove(H5B2_NAT_NREC(middle_native, hdr, 0), + H5B2_NAT_NREC(left_native, hdr, new_left_nrec + 1), + hdr->cls->nrec_size * (left_nrec_move - 1)); /* Move left parent record up from left node */ H5MM_memcpy(H5B2_INT_NREC(internal, hdr, idx - 1), H5B2_NAT_NREC(left_native, hdr, new_left_nrec), @@ -943,8 +943,8 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, unsigned u; /* Local index variable */ /* Slide the node pointers in middle node up */ - HDmemmove(&(middle_node_ptrs[left_nrec_move]), &(middle_node_ptrs[0]), - sizeof(H5B2_node_ptr_t) * (size_t)(curr_middle_nrec + 1)); + memmove(&(middle_node_ptrs[left_nrec_move]), &(middle_node_ptrs[0]), + sizeof(H5B2_node_ptr_t) * (size_t)(curr_middle_nrec + 1)); /* Move left node pointers into middle node */ H5MM_memcpy(&(middle_node_ptrs[0]), &(left_node_ptrs[new_left_nrec + 1]), @@ -981,16 +981,16 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, H5B2_INT_NREC(internal, hdr, idx), hdr->cls->nrec_size); /* Move right records to middle node */ - HDmemmove(H5B2_NAT_NREC(middle_native, hdr, (curr_middle_nrec + 1)), - H5B2_NAT_NREC(right_native, hdr, 0), hdr->cls->nrec_size * (right_nrec_move - 1)); + memmove(H5B2_NAT_NREC(middle_native, hdr, (curr_middle_nrec + 1)), + H5B2_NAT_NREC(right_native, hdr, 0), hdr->cls->nrec_size * (right_nrec_move - 1)); /* Move right parent record up from right node */ H5MM_memcpy(H5B2_INT_NREC(internal, hdr, idx), H5B2_NAT_NREC(right_native, hdr, right_nrec_move - 1), hdr->cls->nrec_size); /* Slide right records down */ - HDmemmove(H5B2_NAT_NREC(right_native, hdr, 0), H5B2_NAT_NREC(right_native, hdr, right_nrec_move), - hdr->cls->nrec_size * new_right_nrec); + memmove(H5B2_NAT_NREC(right_native, hdr, 0), H5B2_NAT_NREC(right_native, hdr, right_nrec_move), + hdr->cls->nrec_size * new_right_nrec); /* Move node pointers also if this is an internal node */ if (depth > 1) { @@ -1008,8 +1008,8 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, middle_moved_nrec += (hssize_t)(moved_nrec + right_nrec_move); /* Slide the node pointers in right node down */ - HDmemmove(&(right_node_ptrs[0]), &(right_node_ptrs[right_nrec_move]), - sizeof(H5B2_node_ptr_t) * (size_t)(new_right_nrec + 1)); + memmove(&(right_node_ptrs[0]), &(right_node_ptrs[right_nrec_move]), + sizeof(H5B2_node_ptr_t) * (size_t)(new_right_nrec + 1)); } /* end if */ /* Update flush dependencies for grandchildren, if using SWMR */ @@ -1075,12 +1075,12 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, done: /* Unlock child nodes (marked as dirty) */ if (left_child && H5AC_unprotect(hdr->f, child_class, left_addr, left_child, left_child_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node"); if (middle_child && H5AC_unprotect(hdr->f, child_class, middle_addr, middle_child, middle_child_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node"); if (right_child && H5AC_unprotect(hdr->f, child_class, right_addr, right_child, right_child_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2__redistribute3() */ @@ -1218,10 +1218,10 @@ H5B2__merge2(H5B2_hdr_t *hdr, uint16_t depth, H5B2_node_ptr_t *curr_node_ptr, /* Slide records in parent node down, to eliminate demoted record */ if ((idx + 1) < internal->nrec) { - HDmemmove(H5B2_INT_NREC(internal, hdr, idx), H5B2_INT_NREC(internal, hdr, idx + 1), - hdr->cls->nrec_size * (internal->nrec - (idx + 1))); - HDmemmove(&(internal->node_ptrs[idx + 1]), &(internal->node_ptrs[idx + 2]), - sizeof(H5B2_node_ptr_t) * (internal->nrec - (idx + 1))); + memmove(H5B2_INT_NREC(internal, hdr, idx), H5B2_INT_NREC(internal, hdr, idx + 1), + hdr->cls->nrec_size * (internal->nrec - (idx + 1))); + memmove(&(internal->node_ptrs[idx + 1]), &(internal->node_ptrs[idx + 2]), + sizeof(H5B2_node_ptr_t) * (internal->nrec - (idx + 1))); } /* end if */ /* Update # of records in parent node */ @@ -1248,11 +1248,11 @@ H5B2__merge2(H5B2_hdr_t *hdr, uint16_t depth, H5B2_node_ptr_t *curr_node_ptr, done: /* Unlock left node (marked as dirty) */ if (left_child && H5AC_unprotect(hdr->f, child_class, left_addr, left_child, left_child_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node"); /* Delete right node & remove from cache (marked as dirty) */ if (right_child && H5AC_unprotect(hdr->f, child_class, right_addr, right_child, right_child_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2__merge2() */ @@ -1396,8 +1396,8 @@ H5B2__merge3(H5B2_hdr_t *hdr, uint16_t depth, H5B2_node_ptr_t *curr_node_ptr, H5B2_NAT_NREC(middle_native, hdr, (middle_nrec_move - 1)), hdr->cls->nrec_size); /* Slide records in middle node down */ - HDmemmove(H5B2_NAT_NREC(middle_native, hdr, 0), H5B2_NAT_NREC(middle_native, hdr, middle_nrec_move), - hdr->cls->nrec_size * (*middle_nrec - middle_nrec_move)); + memmove(H5B2_NAT_NREC(middle_native, hdr, 0), H5B2_NAT_NREC(middle_native, hdr, middle_nrec_move), + hdr->cls->nrec_size * (*middle_nrec - middle_nrec_move)); /* Move node pointers also if this is an internal node */ if (depth > 1) { @@ -1412,8 +1412,8 @@ H5B2__merge3(H5B2_hdr_t *hdr, uint16_t depth, H5B2_node_ptr_t *curr_node_ptr, middle_moved_nrec += middle_node_ptrs[u].all_nrec; /* Slide the node pointers in middle node down */ - HDmemmove(&(middle_node_ptrs[0]), &(middle_node_ptrs[middle_nrec_move]), - sizeof(H5B2_node_ptr_t) * (size_t)((unsigned)(*middle_nrec + 1) - middle_nrec_move)); + memmove(&(middle_node_ptrs[0]), &(middle_node_ptrs[middle_nrec_move]), + sizeof(H5B2_node_ptr_t) * (size_t)((unsigned)(*middle_nrec + 1) - middle_nrec_move)); } /* end if */ /* Update flush dependencies for grandchildren, if using SWMR */ @@ -1475,10 +1475,10 @@ H5B2__merge3(H5B2_hdr_t *hdr, uint16_t depth, H5B2_node_ptr_t *curr_node_ptr, /* Slide records in parent node down, to eliminate demoted record */ if ((idx + 1) < internal->nrec) { - HDmemmove(H5B2_INT_NREC(internal, hdr, idx), H5B2_INT_NREC(internal, hdr, idx + 1), - hdr->cls->nrec_size * (internal->nrec - (idx + 1))); - HDmemmove(&(internal->node_ptrs[idx + 1]), &(internal->node_ptrs[idx + 2]), - sizeof(H5B2_node_ptr_t) * (internal->nrec - (idx + 1))); + memmove(H5B2_INT_NREC(internal, hdr, idx), H5B2_INT_NREC(internal, hdr, idx + 1), + hdr->cls->nrec_size * (internal->nrec - (idx + 1))); + memmove(&(internal->node_ptrs[idx + 1]), &(internal->node_ptrs[idx + 2]), + sizeof(H5B2_node_ptr_t) * (internal->nrec - (idx + 1))); } /* end if */ /* Update # of records in parent node */ @@ -1510,14 +1510,14 @@ H5B2__merge3(H5B2_hdr_t *hdr, uint16_t depth, H5B2_node_ptr_t *curr_node_ptr, done: /* Unlock left & middle nodes (marked as dirty) */ if (left_child && H5AC_unprotect(hdr->f, child_class, left_addr, left_child, left_child_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node"); if (middle_child && H5AC_unprotect(hdr->f, child_class, middle_addr, middle_child, middle_child_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node"); /* Delete right node & remove from cache (marked as dirty) */ if (right_child && H5AC_unprotect(hdr->f, child_class, right_addr, right_child, right_child_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2__merge3() */ @@ -1683,7 +1683,7 @@ H5B2__iterate_node(H5B2_hdr_t *hdr, uint16_t depth, H5B2_node_ptr_t *curr_node, done: /* Unpin the node if it was pinned */ if (node_pinned && H5AC_unpin_entry(node) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "can't unpin node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "can't unpin node"); /* Release the node pointers & native records, if they were copied */ if (node_ptrs) @@ -1770,7 +1770,7 @@ done: if (node && H5AC_unprotect( hdr->f, curr_node_class, curr_node->addr, node, (unsigned)(H5AC__DELETED_FLAG | (hdr->swmr_write ? 0 : H5AC__FREE_FILE_SPACE_FLAG))) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node"); FUNC_LEAVE_NOAPI(ret_value) } /* H5B2__delete_node() */ @@ -1823,7 +1823,7 @@ H5B2__node_size(H5B2_hdr_t *hdr, uint16_t depth, H5B2_node_ptr_t *curr_node, voi done: if (internal && H5AC_unprotect(hdr->f, H5AC_BT2_INT, curr_node->addr, internal, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node"); FUNC_LEAVE_NOAPI(ret_value) } /* H5B2__node_size() */ @@ -1946,7 +1946,7 @@ done: /* Unprotect the child */ if (child) if (H5AC_unprotect(hdr->f, child_class, node_ptr->addr, child, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2__update_flush_depend() */ diff --git a/src/H5B2internal.c b/src/H5B2internal.c index e3f1cf9..e8405de 100644 --- a/src/H5B2internal.c +++ b/src/H5B2internal.c @@ -145,17 +145,17 @@ done: if (inserted) if (H5AC_remove_entry(internal) < 0) HDONE_ERROR(H5E_BTREE, H5E_CANTREMOVE, FAIL, - "unable to remove v2 B-tree internal node from cache") + "unable to remove v2 B-tree internal node from cache"); /* Release internal node's disk space */ if (H5_addr_defined(node_ptr->addr) && H5MF_xfree(hdr->f, H5FD_MEM_BTREE, node_ptr->addr, (hsize_t)hdr->node_size) < 0) HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, - "unable to release file space for v2 B-tree internal node") + "unable to release file space for v2 B-tree internal node"); /* Destroy internal node */ if (H5B2__internal_free(internal) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to release v2 B-tree internal node") + HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to release v2 B-tree internal node"); } /* end if */ } /* end if */ @@ -229,7 +229,7 @@ done: if (H5AC_proxy_entry_remove_child(internal->top_proxy, internal) < 0) HDONE_ERROR( H5E_BTREE, H5E_CANTUNDEPEND, NULL, - "unable to destroy flush dependency between internal node and v2 B-tree 'top' proxy") + "unable to destroy flush dependency between internal node and v2 B-tree 'top' proxy"); internal->top_proxy = NULL; } /* end if */ @@ -237,7 +237,7 @@ done: if (H5AC_unprotect(hdr->f, H5AC_BT2_INT, node_ptr->addr, internal, H5AC__NO_FLAGS_SET) < 0) HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, NULL, "unable to unprotect v2 B-tree internal node, address = %llu", - (unsigned long long)node_ptr->addr) + (unsigned long long)node_ptr->addr); } /* end if */ } /* end if */ @@ -325,7 +325,7 @@ done: /* Release the B-tree internal node */ if (internal && H5AC_unprotect(hdr->f, H5AC_BT2_INT, curr_node_ptr->addr, internal, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node"); FUNC_LEAVE_NOAPI(ret_value) } /* H5B2__neighbor_internal() */ @@ -483,11 +483,11 @@ done: /* Shadow the node if doing SWMR writes */ if (hdr->swmr_write && (internal_flags & H5AC__DIRTIED_FLAG)) if (H5B2__shadow_internal(internal, curr_node_ptr) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal B-tree node"); /* Unprotect node */ if (H5AC_unprotect(hdr->f, H5AC_BT2_INT, curr_node_ptr->addr, internal, internal_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node"); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) @@ -686,7 +686,7 @@ done: if (hdr->swmr_write && (internal_flags & H5AC__DIRTIED_FLAG)) { /* Attempt to shadow the node if doing SWMR writes */ if (H5B2__shadow_internal(internal, curr_node_ptr) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal B-tree node"); /* Change the state to "shadowed" if only modified currently */ /* (Triggers parent to be marked dirty) */ @@ -696,7 +696,7 @@ done: /* Unprotect node */ if (H5AC_unprotect(hdr->f, H5AC_BT2_INT, curr_node_ptr->addr, internal, internal_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node"); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) @@ -1002,7 +1002,7 @@ H5B2__remove_internal(H5B2_hdr_t *hdr, hbool_t *depth_decreased, void *swap_loc, done: /* Release the B-tree internal node */ if (internal && H5AC_unprotect(hdr->f, H5AC_BT2_INT, internal_addr, internal, internal_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node"); FUNC_LEAVE_NOAPI(ret_value) } /* H5B2__remove_internal() */ @@ -1294,7 +1294,7 @@ H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hbool_t *depth_decreased, void *sw done: /* Release the B-tree internal node */ if (internal && H5AC_unprotect(hdr->f, H5AC_BT2_INT, internal_addr, internal, internal_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node"); FUNC_LEAVE_NOAPI(ret_value) } /* H5B2__remove_internal_by_idx() */ diff --git a/src/H5B2leaf.c b/src/H5B2leaf.c index 4f1e566..062528b 100644 --- a/src/H5B2leaf.c +++ b/src/H5B2leaf.c @@ -134,17 +134,17 @@ done: if (inserted) if (H5AC_remove_entry(leaf) < 0) HDONE_ERROR(H5E_BTREE, H5E_CANTREMOVE, FAIL, - "unable to remove v2 B-tree leaf node from cache") + "unable to remove v2 B-tree leaf node from cache"); /* Release leaf node's disk space */ if (H5_addr_defined(node_ptr->addr) && H5MF_xfree(hdr->f, H5FD_MEM_BTREE, node_ptr->addr, (hsize_t)hdr->node_size) < 0) HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, - "unable to release file space for v2 B-tree leaf node") + "unable to release file space for v2 B-tree leaf node"); /* Destroy leaf node */ if (H5B2__leaf_free(leaf) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to release v2 B-tree leaf node") + HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to release v2 B-tree leaf node"); } /* end if */ } /* end if */ @@ -213,7 +213,7 @@ done: if (H5AC_proxy_entry_remove_child(leaf->top_proxy, leaf) < 0) HDONE_ERROR( H5E_BTREE, H5E_CANTUNDEPEND, NULL, - "unable to destroy flush dependency between leaf node and v2 B-tree 'top' proxy") + "unable to destroy flush dependency between leaf node and v2 B-tree 'top' proxy"); leaf->top_proxy = NULL; } /* end if */ @@ -221,7 +221,7 @@ done: if (H5AC_unprotect(hdr->f, H5AC_BT2_LEAF, node_ptr->addr, leaf, H5AC__NO_FLAGS_SET) < 0) HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, NULL, "unable to unprotect v2 B-tree leaf node, address = %llu", - (unsigned long long)node_ptr->addr) + (unsigned long long)node_ptr->addr); } /* end if */ } /* end if */ @@ -304,7 +304,7 @@ H5B2__neighbor_leaf(H5B2_hdr_t *hdr, H5B2_node_ptr_t *curr_node_ptr, void *neigh done: /* Release the B-tree leaf node */ if (leaf && H5AC_unprotect(hdr->f, H5AC_BT2_LEAF, curr_node_ptr->addr, leaf, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree leaf node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree leaf node"); FUNC_LEAVE_NOAPI(ret_value) } /* H5B2__neighbor_leaf() */ @@ -360,8 +360,8 @@ H5B2__insert_leaf(H5B2_hdr_t *hdr, H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_ /* Make room for new record */ if (idx < leaf->nrec) - HDmemmove(H5B2_LEAF_NREC(leaf, hdr, idx + 1), H5B2_LEAF_NREC(leaf, hdr, idx), - hdr->cls->nrec_size * (leaf->nrec - idx)); + memmove(H5B2_LEAF_NREC(leaf, hdr, idx + 1), H5B2_LEAF_NREC(leaf, hdr, idx), + hdr->cls->nrec_size * (leaf->nrec - idx)); } /* end else */ /* Make callback to store record in native form */ @@ -407,11 +407,11 @@ done: /* Shadow the node if doing SWMR writes */ if (hdr->swmr_write && (leaf_flags & H5AC__DIRTIED_FLAG)) if (H5B2__shadow_leaf(leaf, curr_node_ptr) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf B-tree node"); /* Unprotect leaf node */ if (H5AC_unprotect(hdr->f, H5AC_BT2_LEAF, curr_node_ptr->addr, leaf, leaf_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node"); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) @@ -470,7 +470,7 @@ H5B2__update_leaf(H5B2_hdr_t *hdr, H5B2_node_ptr_t *curr_node_ptr, H5B2_update_s *status = H5B2_UPDATE_INSERT_CHILD_FULL; /* Let calling routine handle insertion */ - HGOTO_DONE(SUCCEED) + HGOTO_DONE(SUCCEED); } /* end if */ /* Adjust index to leave room for record to insert */ @@ -479,8 +479,8 @@ H5B2__update_leaf(H5B2_hdr_t *hdr, H5B2_node_ptr_t *curr_node_ptr, H5B2_update_s /* Make room for new record */ if (idx < leaf->nrec) - HDmemmove(H5B2_LEAF_NREC(leaf, hdr, idx + 1), H5B2_LEAF_NREC(leaf, hdr, idx), - hdr->cls->nrec_size * (leaf->nrec - idx)); + memmove(H5B2_LEAF_NREC(leaf, hdr, idx + 1), H5B2_LEAF_NREC(leaf, hdr, idx), + hdr->cls->nrec_size * (leaf->nrec - idx)); } /* end if */ } /* end else */ @@ -555,7 +555,7 @@ done: if (hdr->swmr_write && (leaf_flags & H5AC__DIRTIED_FLAG)) { /* Attempt to shadow the node if doing SWMR writes */ if (H5B2__shadow_leaf(leaf, curr_node_ptr) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf B-tree node"); /* Change the state to "shadowed" if only modified currently */ /* (Triggers parent to be marked dirty) */ @@ -565,7 +565,7 @@ done: /* Unprotect leaf node */ if (H5AC_unprotect(hdr->f, H5AC_BT2_LEAF, curr_node_ptr->addr, leaf, leaf_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node"); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) @@ -654,7 +654,7 @@ H5B2__swap_leaf(H5B2_hdr_t *hdr, uint16_t depth, H5B2_internal_t *internal, unsi done: /* Unlock child node */ if (child && H5AC_unprotect(hdr->f, child_class, child_addr, child, H5AC__DIRTIED_FLAG) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2__swap_leaf() */ @@ -801,8 +801,8 @@ H5B2__remove_leaf(H5B2_hdr_t *hdr, H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_ /* Pack record out of leaf */ if (idx < leaf->nrec) - HDmemmove(H5B2_LEAF_NREC(leaf, hdr, idx), H5B2_LEAF_NREC(leaf, hdr, (idx + 1)), - hdr->cls->nrec_size * (leaf->nrec - idx)); + memmove(H5B2_LEAF_NREC(leaf, hdr, idx), H5B2_LEAF_NREC(leaf, hdr, (idx + 1)), + hdr->cls->nrec_size * (leaf->nrec - idx)); /* Mark leaf node as dirty also */ leaf_flags |= H5AC__DIRTIED_FLAG; @@ -823,7 +823,7 @@ H5B2__remove_leaf(H5B2_hdr_t *hdr, H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_ done: /* Release the B-tree leaf node */ if (leaf && H5AC_unprotect(hdr->f, H5AC_BT2_LEAF, leaf_addr, leaf, leaf_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node"); FUNC_LEAVE_NOAPI(ret_value) } /* H5B2__remove_leaf() */ @@ -899,8 +899,8 @@ H5B2__remove_leaf_by_idx(H5B2_hdr_t *hdr, H5B2_node_ptr_t *curr_node_ptr, H5B2_n /* Pack record out of leaf */ if (idx < leaf->nrec) - HDmemmove(H5B2_LEAF_NREC(leaf, hdr, idx), H5B2_LEAF_NREC(leaf, hdr, (idx + 1)), - hdr->cls->nrec_size * (leaf->nrec - idx)); + memmove(H5B2_LEAF_NREC(leaf, hdr, idx), H5B2_LEAF_NREC(leaf, hdr, (idx + 1)), + hdr->cls->nrec_size * (leaf->nrec - idx)); /* Mark leaf node as dirty also */ leaf_flags |= H5AC__DIRTIED_FLAG; @@ -921,7 +921,7 @@ H5B2__remove_leaf_by_idx(H5B2_hdr_t *hdr, H5B2_node_ptr_t *curr_node_ptr, H5B2_n done: /* Release the B-tree leaf node */ if (leaf && H5AC_unprotect(hdr->f, H5AC_BT2_LEAF, leaf_addr, leaf, leaf_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node"); FUNC_LEAVE_NOAPI(ret_value) } /* H5B2__remove_leaf_by_idx() */ diff --git a/src/H5B2test.c b/src/H5B2test.c index e1558cc..face2e0 100644 --- a/src/H5B2test.c +++ b/src/H5B2test.c @@ -25,9 +25,12 @@ /***********/ /* Headers */ /***********/ -#include "H5private.h" /* Generic Functions */ -#include "H5B2pkg.h" /* v2 B-trees */ -#include "H5Eprivate.h" /* Error handling */ +#include "H5private.h" /* Generic Functions */ +#include "H5ACprivate.h" /* Metadata Cache */ +#include "H5B2pkg.h" /* B-Trees (Version 2) */ +#include "H5Eprivate.h" /* Error Handling */ +#include "H5Fprivate.h" /* Files */ +#include "H5FLprivate.h" /* Free Lists */ /****************/ /* Local Macros */ @@ -234,7 +237,7 @@ H5B2__test_encode(uint8_t *raw, const void *nrecord, void *_ctx) /* Sanity check */ assert(ctx); - H5F_ENCODE_LENGTH_LEN(raw, *(const hsize_t *)nrecord, ctx->sizeof_size); + H5_ENCODE_LENGTH_LEN(raw, *(const hsize_t *)nrecord, ctx->sizeof_size); FUNC_LEAVE_NOAPI(SUCCEED) } /* H5B2__test_encode() */ @@ -259,7 +262,7 @@ H5B2__test_decode(const uint8_t *raw, void *nrecord, void *_ctx) /* Sanity check */ assert(ctx); - H5F_DECODE_LENGTH_LEN(raw, *(hsize_t *)nrecord, ctx->sizeof_size); + H5_DECODE_LENGTH_LEN(raw, *(hsize_t *)nrecord, ctx->sizeof_size); FUNC_LEAVE_NOAPI(SUCCEED) } /* H5B2__test_decode() */ @@ -347,8 +350,8 @@ H5B2__test2_encode(uint8_t *raw, const void *nrecord, void *_ctx) /* Sanity check */ assert(ctx); - H5F_ENCODE_LENGTH_LEN(raw, ((const H5B2_test_rec_t *)nrecord)->key, ctx->sizeof_size); - H5F_ENCODE_LENGTH_LEN(raw, ((const H5B2_test_rec_t *)nrecord)->val, ctx->sizeof_size); + H5_ENCODE_LENGTH_LEN(raw, ((const H5B2_test_rec_t *)nrecord)->key, ctx->sizeof_size); + H5_ENCODE_LENGTH_LEN(raw, ((const H5B2_test_rec_t *)nrecord)->val, ctx->sizeof_size); FUNC_LEAVE_NOAPI(SUCCEED) } /* H5B2__test2_encode() */ @@ -373,8 +376,8 @@ H5B2__test2_decode(const uint8_t *raw, void *nrecord, void *_ctx) /* Sanity check */ assert(ctx); - H5F_DECODE_LENGTH_LEN(raw, ((H5B2_test_rec_t *)nrecord)->key, ctx->sizeof_size); - H5F_DECODE_LENGTH_LEN(raw, ((H5B2_test_rec_t *)nrecord)->val, ctx->sizeof_size); + H5_DECODE_LENGTH_LEN(raw, ((H5B2_test_rec_t *)nrecord)->key, ctx->sizeof_size); + H5_DECODE_LENGTH_LEN(raw, ((H5B2_test_rec_t *)nrecord)->val, ctx->sizeof_size); FUNC_LEAVE_NOAPI(SUCCEED) } /* H5B2__test2_decode() */ @@ -523,7 +526,7 @@ H5B2__get_node_info_test(H5B2_t *bt2, void *udata, H5B2_node_info_test_t *ninfo) ninfo->nrec = curr_node_ptr.node_nrec; /* Indicate success */ - HGOTO_DONE(SUCCEED) + HGOTO_DONE(SUCCEED); } /* end else */ /* Decrement depth we're at in B-tree */ @@ -565,7 +568,7 @@ done: if (parent) { assert(ret_value < 0); if (parent != hdr && H5AC_unpin_entry(parent) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry"); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) diff --git a/src/H5Bcache.c b/src/H5Bcache.c index 939d8aa..8f0e1c8 100644 --- a/src/H5Bcache.c +++ b/src/H5Bcache.c @@ -181,11 +181,11 @@ H5B__cache_deserialize(const void *_image, size_t len, void *_udata, hbool_t H5_ /* Sibling pointers */ if (H5_IS_BUFFER_OVERFLOW(image, H5F_sizeof_addr(udata->f), p_end)) HGOTO_ERROR(H5E_BTREE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); - H5_addr_decode(udata->f, (const uint8_t **)&image, &(bt->left)); + H5F_addr_decode(udata->f, (const uint8_t **)&image, &(bt->left)); if (H5_IS_BUFFER_OVERFLOW(image, H5F_sizeof_addr(udata->f), p_end)) HGOTO_ERROR(H5E_BTREE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); - H5_addr_decode(udata->f, (const uint8_t **)&image, &(bt->right)); + H5F_addr_decode(udata->f, (const uint8_t **)&image, &(bt->right)); /* Child/key pairs */ native = bt->native; @@ -201,7 +201,7 @@ H5B__cache_deserialize(const void *_image, size_t len, void *_udata, hbool_t H5_ /* Decode address value */ if (H5_IS_BUFFER_OVERFLOW(image, H5F_sizeof_addr(udata->f), p_end)) HGOTO_ERROR(H5E_BTREE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); - H5_addr_decode(udata->f, (const uint8_t **)&image, bt->child + u); + H5F_addr_decode(udata->f, (const uint8_t **)&image, bt->child + u); } /* Final key */ @@ -217,7 +217,7 @@ H5B__cache_deserialize(const void *_image, size_t len, void *_udata, hbool_t H5_ done: if (!ret_value && bt) if (H5B__node_dest(bt) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, NULL, "unable to destroy B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, NULL, "unable to destroy B-tree node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B__cache_deserialize() */ @@ -299,8 +299,8 @@ H5B__cache_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len, vo UINT16ENCODE(image, bt->nchildren); /* sibling pointers */ - H5_addr_encode(f, &image, bt->left); - H5_addr_encode(f, &image, bt->right); + H5F_addr_encode(f, &image, bt->left); + H5F_addr_encode(f, &image, bt->right); /* child keys and pointers */ native = bt->native; @@ -312,7 +312,7 @@ H5B__cache_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len, vo native += shared->type->sizeof_nkey; /* encode the child address */ - H5_addr_encode(f, &image, bt->child[u]); + H5F_addr_encode(f, &image, bt->child[u]); } /* end for */ if (bt->nchildren > 0) { /* Encode the final key */ diff --git a/src/H5Bdbg.c b/src/H5Bdbg.c index d54e041..93d44d0 100644 --- a/src/H5Bdbg.c +++ b/src/H5Bdbg.c @@ -124,7 +124,7 @@ H5B_debug(H5F_t *f, haddr_t addr, FILE *stream, int indent, int fwidth, const H5 done: if (bt && H5AC_unprotect(f, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B_debug() */ diff --git a/src/H5C.c b/src/H5C.c index 00c4563..8cff6fe 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -59,74 +59,26 @@ /* Headers */ /***********/ #include "H5private.h" /* Generic Functions */ +#include "H5ACprivate.h" /* Metadata cache */ #include "H5Cpkg.h" /* Cache */ -#include "H5CXprivate.h" /* API Contexts */ #include "H5Eprivate.h" /* Error handling */ #include "H5Fpkg.h" /* Files */ #include "H5FLprivate.h" /* Free Lists */ -#include "H5Iprivate.h" /* IDs */ #include "H5MFprivate.h" /* File memory management */ #include "H5MMprivate.h" /* Memory management */ -#include "H5Pprivate.h" /* Property lists */ /****************/ /* Local Macros */ /****************/ -#if H5C_DO_MEMORY_SANITY_CHECKS -#define H5C_IMAGE_EXTRA_SPACE 8 -#define H5C_IMAGE_SANITY_VALUE "DeadBeef" -#else /* H5C_DO_MEMORY_SANITY_CHECKS */ -#define H5C_IMAGE_EXTRA_SPACE 0 -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ /******************/ /* Local Typedefs */ /******************/ -/* Alias for pointer to cache entry, for use when allocating sequences of them */ -typedef H5C_cache_entry_t *H5C_cache_entry_ptr_t; - /********************/ /* Local Prototypes */ /********************/ -static herr_t H5C__pin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr); -static herr_t H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp); -static herr_t H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp); -static herr_t H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted); -static herr_t H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *status_ptr, - size_t *new_max_cache_size_ptr, hbool_t write_permitted); -static herr_t H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr); -static herr_t H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted); -static herr_t H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr); -static herr_t H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr); -static herr_t H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr); -static herr_t H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t new_entry_size); -static herr_t H5C__flush_invalidate_cache(H5F_t *f, unsigned flags); -static herr_t H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags); -static herr_t H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags); -static void *H5C__load_entry(H5F_t *f, -#ifdef H5_HAVE_PARALLEL - hbool_t coll_access, -#endif /* H5_HAVE_PARALLEL */ - const H5C_class_t *type, haddr_t addr, void *udata); - -static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry); -static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry); -static herr_t H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry); -static herr_t H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry); - -static herr_t H5C__serialize_ring(H5F_t *f, H5C_ring_t ring); -static herr_t H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr); -static herr_t H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr); -static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len, - hbool_t actual); - -#ifndef NDEBUG -static void H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, - const H5C_cache_entry_t *base_entry); -#endif /* NDEBUG */ - /*********************/ /* Package Variables */ /*********************/ @@ -145,9 +97,6 @@ H5FL_DEFINE(H5C_tag_info_t); /* Declare a free list to manage the H5C_t struct */ H5FL_DEFINE_STATIC(H5C_t); -/* Declare a free list to manage arrays of cache entries */ -H5FL_SEQ_DEFINE_STATIC(H5C_cache_entry_ptr_t); - /*------------------------------------------------------------------------- * Function: H5C_create * @@ -204,8 +153,6 @@ H5C_create(size_t max_cache_size, size_t min_clean_size, int max_type_id, * the fields. */ - cache_ptr->magic = H5C__H5C_T_MAGIC; - cache_ptr->flush_in_progress = FALSE; if (NULL == (cache_ptr->log_info = (H5C_log_info_t *)H5MM_calloc(sizeof(H5C_log_info_t)))) @@ -359,9 +306,8 @@ H5C_create(size_t max_cache_size, size_t min_clean_size, int max_type_id, /* Set non-zero/FALSE/NULL fields for epoch markers */ for (i = 0; i < H5C__MAX_EPOCH_MARKERS; i++) { - ((cache_ptr->epoch_markers)[i]).magic = H5C__H5C_CACHE_ENTRY_T_MAGIC; - ((cache_ptr->epoch_markers)[i]).addr = (haddr_t)i; - ((cache_ptr->epoch_markers)[i]).type = H5AC_EPOCH_MARKER; + ((cache_ptr->epoch_markers)[i]).addr = (haddr_t)i; + ((cache_ptr->epoch_markers)[i]).type = H5AC_EPOCH_MARKER; } /* Initialize cache image generation on file close related fields. @@ -405,7 +351,7 @@ H5C_create(size_t max_cache_size, size_t min_clean_size, int max_type_id, #ifndef NDEBUG cache_ptr->get_entry_ptr_from_addr_counter = 0; -#endif /* NDEBUG */ +#endif /* Set return value */ ret_value = cache_ptr; @@ -422,8 +368,7 @@ done: if (cache_ptr->log_info != NULL) H5MM_xfree(cache_ptr->log_info); - cache_ptr->magic = 0; - cache_ptr = H5FL_FREE(H5C_t, cache_ptr); + cache_ptr = H5FL_FREE(H5C_t, cache_ptr); } } @@ -431,137 +376,6 @@ done: } /* H5C_create() */ /*------------------------------------------------------------------------- - * Function: H5C_def_auto_resize_rpt_fcn - * - * Purpose: Print results of a automatic cache resize. - * - * This function should only be used where printf() behaves - * well -- i.e. not on Windows. - * - * Return: void - * - * Programmer: John Mainzer - * 10/27/04 - * - *------------------------------------------------------------------------- - */ -void -H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, -#ifndef NDEBUG - int32_t version, -#else /* NDEBUG */ - int32_t H5_ATTR_UNUSED version, -#endif /* NDEBUG */ - double hit_rate, enum H5C_resize_status status, size_t old_max_cache_size, - size_t new_max_cache_size, size_t old_min_clean_size, size_t new_min_clean_size) -{ - assert(cache_ptr != NULL); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(version == H5C__CURR_AUTO_RESIZE_RPT_FCN_VER); - - switch (status) { - case in_spec: - fprintf(stdout, "%sAuto cache resize -- no change. (hit rate = %lf)\n", cache_ptr->prefix, - hit_rate); - break; - - case increase: - assert(hit_rate < cache_ptr->resize_ctl.lower_hr_threshold); - assert(old_max_cache_size < new_max_cache_size); - - fprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n", - cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold); - fprintf(stdout, "%scache size increased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix, - old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size); - break; - - case flash_increase: - assert(old_max_cache_size < new_max_cache_size); - - fprintf(stdout, "%sflash cache resize(%d) -- size threshold = %zu.\n", cache_ptr->prefix, - (int)(cache_ptr->resize_ctl.flash_incr_mode), cache_ptr->flash_size_increase_threshold); - fprintf(stdout, "%s cache size increased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix, - old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size); - break; - - case decrease: - assert(old_max_cache_size > new_max_cache_size); - - switch (cache_ptr->resize_ctl.decr_mode) { - case H5C_decr__off: - fprintf(stdout, "%sAuto cache resize -- decrease off. HR = %lf\n", cache_ptr->prefix, - hit_rate); - break; - - case H5C_decr__threshold: - assert(hit_rate > cache_ptr->resize_ctl.upper_hr_threshold); - - fprintf(stdout, "%sAuto cache resize -- decrease by threshold. HR = %lf > %6.5lf\n", - cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.upper_hr_threshold); - fprintf(stdout, "%sout of bounds high (%6.5lf).\n", cache_ptr->prefix, - cache_ptr->resize_ctl.upper_hr_threshold); - break; - - case H5C_decr__age_out: - fprintf(stdout, "%sAuto cache resize -- decrease by ageout. HR = %lf\n", - cache_ptr->prefix, hit_rate); - break; - - case H5C_decr__age_out_with_threshold: - assert(hit_rate > cache_ptr->resize_ctl.upper_hr_threshold); - - fprintf(stdout, - "%sAuto cache resize -- decrease by ageout with threshold. HR = %lf > %6.5lf\n", - cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.upper_hr_threshold); - break; - - default: - fprintf(stdout, "%sAuto cache resize -- decrease by unknown mode. HR = %lf\n", - cache_ptr->prefix, hit_rate); - } - - fprintf(stdout, "%s cache size decreased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix, - old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size); - break; - - case at_max_size: - fprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n", - cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold); - fprintf(stdout, "%s cache already at maximum size so no change.\n", cache_ptr->prefix); - break; - - case at_min_size: - fprintf(stdout, "%sAuto cache resize -- hit rate (%lf) -- can't decrease.\n", cache_ptr->prefix, - hit_rate); - fprintf(stdout, "%s cache already at minimum size.\n", cache_ptr->prefix); - break; - - case increase_disabled: - fprintf(stdout, "%sAuto cache resize -- increase disabled -- HR = %lf.", cache_ptr->prefix, - hit_rate); - break; - - case decrease_disabled: - fprintf(stdout, "%sAuto cache resize -- decrease disabled -- HR = %lf.\n", cache_ptr->prefix, - hit_rate); - break; - - case not_full: - assert(hit_rate < cache_ptr->resize_ctl.lower_hr_threshold); - - fprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n", - cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold); - fprintf(stdout, "%s cache not full so no increase in size.\n", cache_ptr->prefix); - break; - - default: - fprintf(stdout, "%sAuto cache resize -- unknown status code.\n", cache_ptr->prefix); - break; - } -} /* H5C_def_auto_resize_rpt_fcn() */ - -/*------------------------------------------------------------------------- - * * Function: H5C_prep_for_file_close * * Purpose: This function should be called just prior to the cache @@ -587,11 +401,10 @@ H5C_prep_for_file_close(H5F_t *f) assert(f->shared->cache); cache_ptr = f->shared->cache; assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); /* It is possible to receive the close warning more than once */ if (cache_ptr->close_warning_received) - HGOTO_DONE(SUCCEED) + HGOTO_DONE(SUCCEED); cache_ptr->close_warning_received = TRUE; /* Make certain there aren't any protected entries */ @@ -672,11 +485,10 @@ H5C_dest(H5F_t *f) /* Sanity check */ assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(cache_ptr->close_warning_received); #if H5AC_DUMP_IMAGE_STATS_ON_CLOSE - if (H5C_image_stats(cache_ptr, TRUE) < 0) + if (H5C__image_stats(cache_ptr, TRUE) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't display cache image stats") #endif /* H5AC_DUMP_IMAGE_STATS_ON_CLOSE */ @@ -713,16 +525,12 @@ H5C_dest(H5F_t *f) if (cache_ptr->log_info != NULL) H5MM_xfree(cache_ptr->log_info); -#ifndef NDEBUG #ifdef H5C_DO_SANITY_CHECKS if (cache_ptr->get_entry_ptr_from_addr_counter > 0) fprintf(stdout, "*** %" PRId64 " calls to H5C_get_entry_ptr_from_add(). ***\n", cache_ptr->get_entry_ptr_from_addr_counter); #endif /* H5C_DO_SANITY_CHECKS */ - cache_ptr->magic = 0; -#endif /* NDEBUG */ - cache_ptr = H5FL_FREE(H5C_t, cache_ptr); done: @@ -734,7 +542,7 @@ done: * and the cache still exist. JRM -- 5/15/20 */ if (H5C_set_slist_enabled(f->shared->cache, FALSE, FALSE) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist on flush dest failure failed") + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist on flush dest failure failed"); FUNC_LEAVE_NOAPI(ret_value) } /* H5C_dest() */ @@ -775,81 +583,6 @@ done: } /* H5C_evict() */ /*------------------------------------------------------------------------- - * Function: H5C_expunge_entry - * - * Purpose: Expunge an entry from the cache without writing it to disk - * even if it is dirty. The entry may not be either pinned or - * protected. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 6/29/06 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, unsigned flags) -{ - H5C_t *cache_ptr; - H5C_cache_entry_t *entry_ptr = NULL; - unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG); - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - assert(f); - assert(f->shared); - cache_ptr = f->shared->cache; - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(type); - assert(H5_addr_defined(addr)); - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - /* Look for entry in cache */ - H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL) - if ((entry_ptr == NULL) || (entry_ptr->type != type)) - /* the target doesn't exist in the cache, so we are done. */ - HGOTO_DONE(SUCCEED) - - assert(entry_ptr->addr == addr); - assert(entry_ptr->type == type); - - /* Check for entry being pinned or protected */ - if (entry_ptr->is_protected) - HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is protected") - if (entry_ptr->is_pinned) - HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is pinned") - - /* If we get this far, call H5C__flush_single_entry() with the - * H5C__FLUSH_INVALIDATE_FLAG and the H5C__FLUSH_CLEAR_ONLY_FLAG. - * This will clear the entry, and then delete it from the cache. - */ - - /* Pass along 'free file space' flag */ - flush_flags |= (flags & H5C__FREE_FILE_SPACE_FLAG); - - /* Delete the entry from the skip list on destroy */ - flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; - - if (H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't flush entry") - -done: -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_lru_list(cache_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on exit") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_expunge_entry() */ - -/*------------------------------------------------------------------------- * Function: H5C_flush_cache * * Purpose: Flush (and possibly destroy) the entries contained in the @@ -888,7 +621,6 @@ H5C_flush_cache(H5F_t *f, unsigned flags) assert(f->shared); cache_ptr = f->shared->cache; assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(cache_ptr->slist_ptr); #ifdef H5C_DO_SANITY_CHECKS @@ -1015,11 +747,8 @@ H5C_flush_to_min_clean(H5F_t *f) assert(f); assert(f->shared); - cache_ptr = f->shared->cache; - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); if (cache_ptr->check_write_permitted != NULL) { if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0) @@ -1039,7216 +768,672 @@ done: } /* H5C_flush_to_min_clean() */ /*------------------------------------------------------------------------- - * Function: H5C_insert_entry - * - * Purpose: Adds the specified thing to the cache. The thing need not - * exist on disk yet, but it must have an address and disk - * space reserved. + * Function: H5C_reset_cache_hit_rate_stats() * - * Return: Non-negative on success/Negative on failure + * Purpose: Reset the cache hit rate computation fields. * - * Programmer: John Mainzer - * 6/2/04 + * Return: SUCCEED on success, and FAIL on failure. * *------------------------------------------------------------------------- */ herr_t -H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, unsigned int flags) +H5C_reset_cache_hit_rate_stats(H5C_t *cache_ptr) { - H5C_t *cache_ptr; - H5AC_ring_t ring = H5C_RING_UNDEFINED; - hbool_t insert_pinned; - hbool_t flush_last; -#ifdef H5_HAVE_PARALLEL - hbool_t coll_access = FALSE; /* whether access to the cache entry is done collectively */ -#endif /* H5_HAVE_PARALLEL */ - hbool_t set_flush_marker; - hbool_t write_permitted = TRUE; - size_t empty_space; - H5C_cache_entry_t *entry_ptr = NULL; - H5C_cache_entry_t *test_entry_ptr; - hbool_t entry_tagged = FALSE; - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) - assert(f); - assert(f->shared); - - cache_ptr = f->shared->cache; - - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(type); - assert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type); - assert(type->image_len); - assert(H5_addr_defined(addr)); - assert(thing); - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - /* no need to verify that entry is not already in the index as */ - /* we already make that check below. */ - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0); - insert_pinned = ((flags & H5C__PIN_ENTRY_FLAG) != 0); - flush_last = ((flags & H5C__FLUSH_LAST_FLAG) != 0); - - /* Get the ring type from the API context */ - ring = H5CX_get_ring(); - - entry_ptr = (H5C_cache_entry_t *)thing; - - /* verify that the new entry isn't already in the hash table -- scream - * and die if it is. - */ - - H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) - - if (test_entry_ptr != NULL) { - if (test_entry_ptr == entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "entry already in cache") - else - HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "duplicate entry in cache") - } /* end if */ - - entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC; - entry_ptr->cache_ptr = cache_ptr; - entry_ptr->addr = addr; - entry_ptr->type = type; - - entry_ptr->image_ptr = NULL; - entry_ptr->image_up_to_date = FALSE; - - entry_ptr->is_protected = FALSE; - entry_ptr->is_read_only = FALSE; - entry_ptr->ro_ref_count = 0; - - entry_ptr->is_pinned = insert_pinned; - entry_ptr->pinned_from_client = insert_pinned; - entry_ptr->pinned_from_cache = FALSE; - entry_ptr->flush_me_last = flush_last; - - /* newly inserted entries are assumed to be dirty */ - entry_ptr->is_dirty = TRUE; - - /* not protected, so can't be dirtied */ - entry_ptr->dirtied = FALSE; - - /* Retrieve the size of the thing */ - if ((type->image_len)(thing, &(entry_ptr->size)) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTGETSIZE, FAIL, "can't get size of thing") - assert(entry_ptr->size > 0 && entry_ptr->size < H5C_MAX_ENTRY_SIZE); - - entry_ptr->in_slist = FALSE; - -#ifdef H5_HAVE_PARALLEL - entry_ptr->clear_on_unprotect = FALSE; - entry_ptr->flush_immediately = FALSE; -#endif /* H5_HAVE_PARALLEL */ - - entry_ptr->flush_in_progress = FALSE; - entry_ptr->destroy_in_progress = FALSE; - - entry_ptr->ring = ring; - - /* Initialize flush dependency fields */ - entry_ptr->flush_dep_parent = NULL; - entry_ptr->flush_dep_nparents = 0; - entry_ptr->flush_dep_parent_nalloc = 0; - entry_ptr->flush_dep_nchildren = 0; - entry_ptr->flush_dep_ndirty_children = 0; - entry_ptr->flush_dep_nunser_children = 0; - - entry_ptr->ht_next = NULL; - entry_ptr->ht_prev = NULL; - entry_ptr->il_next = NULL; - entry_ptr->il_prev = NULL; - - entry_ptr->next = NULL; - entry_ptr->prev = NULL; - -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - entry_ptr->aux_next = NULL; - entry_ptr->aux_prev = NULL; -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - -#ifdef H5_HAVE_PARALLEL - entry_ptr->coll_next = NULL; - entry_ptr->coll_prev = NULL; -#endif /* H5_HAVE_PARALLEL */ - - /* initialize cache image related fields */ - entry_ptr->include_in_image = FALSE; - entry_ptr->lru_rank = 0; - entry_ptr->image_dirty = FALSE; - entry_ptr->fd_parent_count = 0; - entry_ptr->fd_parent_addrs = NULL; - entry_ptr->fd_child_count = 0; - entry_ptr->fd_dirty_child_count = 0; - entry_ptr->image_fd_height = 0; - entry_ptr->prefetched = FALSE; - entry_ptr->prefetch_type_id = 0; - entry_ptr->age = 0; - entry_ptr->prefetched_dirty = FALSE; -#ifndef NDEBUG /* debugging field */ - entry_ptr->serialization_count = 0; -#endif /* NDEBUG */ - - /* initialize tag list fields */ - entry_ptr->tl_next = NULL; - entry_ptr->tl_prev = NULL; - entry_ptr->tag_info = NULL; - - /* Apply tag to newly inserted entry */ - if (H5C__tag_entry(cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry") - entry_tagged = TRUE; - - H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) - - if (cache_ptr->flash_size_increase_possible && - (entry_ptr->size > cache_ptr->flash_size_increase_threshold)) - if (H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__flash_increase_cache_size failed") - - if (cache_ptr->index_size >= cache_ptr->max_cache_size) - empty_space = 0; - else - empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; - - if (cache_ptr->evictions_enabled && - (((cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size) || - (((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size)))) { - size_t space_needed; - - if (empty_space <= entry_ptr->size) - cache_ptr->cache_full = TRUE; - - if (cache_ptr->check_write_permitted != NULL) { - if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "Can't get write_permitted") - } /* end if */ - else - write_permitted = cache_ptr->write_permitted; - - assert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE); - space_needed = entry_ptr->size; - if (space_needed > cache_ptr->max_cache_size) - space_needed = cache_ptr->max_cache_size; - - /* Note that space_needed is just the amount of space that - * needed to insert the new entry without exceeding the cache - * size limit. The subsequent call to H5C__make_space_in_cache() - * may evict the entries required to free more or less space - * depending on conditions. It MAY be less if the cache is - * currently undersized, or more if the cache is oversized. - * - * The cache can exceed its maximum size limit via the following - * mechanisms: - * - * First, it is possible for the cache to grow without - * bound as long as entries are protected and not unprotected. - * - * Second, when writes are not permitted it is also possible - * for the cache to grow without bound. - * - * Finally, we usually don't check to see if the cache is - * oversized at the end of an unprotect. As a result, it is - * possible to have a vastly oversized cache with no protected - * entries as long as all the protects precede the unprotects. - */ - - if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__make_space_in_cache failed") - } /* end if */ - - H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL) - - /* New entries are presumed to be dirty */ - assert(entry_ptr->is_dirty); - entry_ptr->flush_marker = set_flush_marker; - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) - H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, FAIL) - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed just before done") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - /* If the entry's type has a 'notify' callback send a 'after insertion' - * notice now that the entry is fully integrated into the cache. - */ - if (entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_INSERT, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry inserted into cache") - - H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) + if (cache_ptr == NULL) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry") -#ifdef H5_HAVE_PARALLEL - if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) - coll_access = H5F_get_coll_metadata_reads(f); - - entry_ptr->coll_access = coll_access; - if (coll_access) { - H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, FAIL) - - /* Make sure the size of the collective entries in the cache remain in check */ - if (H5P_USER_TRUE == H5F_COLL_MD_READ(f)) { - if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100) { - if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries") - } /* end if */ - } /* end if */ - else { - if (cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100) { - if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries") - } /* end if */ - } /* end else */ - } /* end if */ -#endif + cache_ptr->cache_hits = 0; + cache_ptr->cache_accesses = 0; done: -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - if (ret_value < 0 && entry_tagged) - if (H5C__untag_entry(cache_ptr, entry_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list") - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_insert_entry() */ +} /* H5C_reset_cache_hit_rate_stats() */ /*------------------------------------------------------------------------- - * Function: H5C_mark_entry_dirty - * - * Purpose: Mark a pinned or protected entry as dirty. The target entry - * MUST be either pinned or protected, and MAY be both. - * - * In the protected case, this call is the functional - * equivalent of setting the H5C__DIRTIED_FLAG on an unprotect - * call. + * Function: H5C_set_cache_auto_resize_config * - * In the pinned but not protected case, if the entry is not - * already dirty, the function places function marks the entry - * dirty and places it on the skip list. + * Purpose: Set the cache automatic resize configuration to the + * provided values if they are in range, and fail if they + * are not. * - * Return: Non-negative on success/Negative on failure + * If the new configuration enables automatic cache resizing, + * coerce the cache max size and min clean size into agreement + * with the new policy and re-set the full cache hit rate + * stats. * - * Programmer: John Mainzer - * 5/15/06 + * Return: SUCCEED on success, and FAIL on failure. * *------------------------------------------------------------------------- */ herr_t -H5C_mark_entry_dirty(void *thing) +H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_ptr) { - H5C_t *cache_ptr; - H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing; - herr_t ret_value = SUCCEED; /* Return value */ + size_t new_max_cache_size; + size_t new_min_clean_size; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) - /* Sanity checks */ - assert(entry_ptr); - assert(H5_addr_defined(entry_ptr->addr)); - cache_ptr = entry_ptr->cache_ptr; - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - if (entry_ptr->is_protected) { - assert(!((entry_ptr)->is_read_only)); + if (cache_ptr == NULL) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry") + if (config_ptr == NULL) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry") + if (config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unknown config version") - /* set the dirtied flag */ - entry_ptr->dirtied = TRUE; + /* check general configuration section of the config: */ + if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_GENERAL) < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in general configuration fields of new config") - /* reset image_up_to_date */ - if (entry_ptr->image_up_to_date) { - entry_ptr->image_up_to_date = FALSE; + /* check size increase control fields of the config: */ + if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INCREMENT) < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size increase control fields of new config") - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "Can't propagate serialization status to fd parents") - } /* end if */ - } /* end if */ - else if (entry_ptr->is_pinned) { - hbool_t was_clean; /* Whether the entry was previously clean */ - hbool_t image_was_up_to_date; + /* check size decrease control fields of the config: */ + if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_DECREMENT) < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size decrease control fields of new config") - /* Remember previous dirty status */ - was_clean = !entry_ptr->is_dirty; + /* check for conflicts between size increase and size decrease controls: */ + if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "conflicting threshold fields in new config") - /* Check if image is up to date */ - image_was_up_to_date = entry_ptr->image_up_to_date; + /* will set the increase possible fields to FALSE later if needed */ + cache_ptr->size_increase_possible = TRUE; + cache_ptr->flash_size_increase_possible = TRUE; + cache_ptr->size_decrease_possible = TRUE; - /* Mark the entry as dirty if it isn't already */ - entry_ptr->is_dirty = TRUE; - entry_ptr->image_up_to_date = FALSE; + switch (config_ptr->incr_mode) { + case H5C_incr__off: + cache_ptr->size_increase_possible = FALSE; + break; - /* Modify cache data structures */ - if (was_clean) - H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, FAIL) - if (!entry_ptr->in_slist) - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) + case H5C_incr__threshold: + if ((config_ptr->lower_hr_threshold <= 0.0) || (config_ptr->increment <= 1.0) || + ((config_ptr->apply_max_increment) && (config_ptr->max_increment <= 0))) + cache_ptr->size_increase_possible = FALSE; + break; - /* Update stats for entry being marked dirty */ - H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) + default: /* should be unreachable */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown incr_mode?!?!?") + } /* end switch */ - /* Check for entry changing status and do notifications, etc. */ - if (was_clean) { - /* If the entry's type has a 'notify' callback send a 'entry dirtied' - * notice now that the entry is fully integrated into the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set") - - /* Propagate the dirty flag up the flush dependency chain if appropriate */ - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_dirty(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") - } /* end if */ - if (image_was_up_to_date) - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "Can't propagate serialization status to fd parents") - } /* end if */ - else - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Entry is neither pinned nor protected??") + /* logically, this is where configuration for flash cache size increases + * should go. However, this configuration depends on max_cache_size, so + * we wait until the end of the function, when this field is set. + */ -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_mark_entry_dirty() */ + switch (config_ptr->decr_mode) { + case H5C_decr__off: + cache_ptr->size_decrease_possible = FALSE; + break; -/*------------------------------------------------------------------------- - * Function: H5C_mark_entry_clean - * - * Purpose: Mark a pinned entry as clean. The target entry MUST be pinned. - * - * If the entry is not - * already clean, the function places function marks the entry - * clean and removes it from the skip list. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * 7/23/16 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_mark_entry_clean(void *_thing) -{ - H5C_t *cache_ptr; - H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)_thing; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - assert(entry_ptr); - assert(H5_addr_defined(entry_ptr->addr)); - cache_ptr = entry_ptr->cache_ptr; - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - /* Operate on pinned entry */ - if (entry_ptr->is_protected) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "entry is protected") - else if (entry_ptr->is_pinned) { - hbool_t was_dirty; /* Whether the entry was previously dirty */ - - /* Remember previous dirty status */ - was_dirty = entry_ptr->is_dirty; - - /* Mark the entry as clean if it isn't already */ - entry_ptr->is_dirty = FALSE; - - /* Also reset the 'flush_marker' flag, since the entry shouldn't be flushed now */ - entry_ptr->flush_marker = FALSE; - - /* Modify cache data structures */ - if (was_dirty) - H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, FAIL) - if (entry_ptr->in_slist) - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE) - - /* Update stats for entry being marked clean */ - H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) - - /* Check for entry changing status and do notifications, etc. */ - if (was_dirty) { - /* If the entry's type has a 'notify' callback send a 'entry cleaned' - * notice now that the entry is fully integrated into the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify client about entry dirty flag cleared") - - /* Propagate the clean up the flush dependency chain, if appropriate */ - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_clean(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean") - } /* end if */ - } /* end if */ - else - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Entry is not pinned??") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_mark_entry_clean() */ - -/*------------------------------------------------------------------------- - * Function: H5C_mark_entry_unserialized - * - * Purpose: Mark a pinned or protected entry as unserialized. The target - * entry MUST be either pinned or protected, and MAY be both. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * 12/23/16 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_mark_entry_unserialized(void *thing) -{ - H5C_cache_entry_t *entry = (H5C_cache_entry_t *)thing; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - assert(entry); - assert(H5_addr_defined(entry->addr)); - - if (entry->is_protected || entry->is_pinned) { - assert(!entry->is_read_only); - - /* Reset image_up_to_date */ - if (entry->image_up_to_date) { - entry->image_up_to_date = FALSE; - - if (entry->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_unserialized(entry) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, - "Can't propagate serialization status to fd parents") - } /* end if */ - } /* end if */ - else - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKUNSERIALIZED, FAIL, - "Entry to unserialize is neither pinned nor protected??") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_mark_entry_unserialized() */ - -/*------------------------------------------------------------------------- - * Function: H5C_mark_entry_serialized - * - * Purpose: Mark a pinned entry as serialized. The target entry MUST be - * pinned. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * 12/23/16 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_mark_entry_serialized(void *_thing) -{ - H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_thing; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - assert(entry); - assert(H5_addr_defined(entry->addr)); - - /* Operate on pinned entry */ - if (entry->is_protected) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "entry is protected") - else if (entry->is_pinned) { - /* Check for entry changing status and do notifications, etc. */ - if (!entry->image_up_to_date) { - /* Set the image_up_to_date flag */ - entry->image_up_to_date = TRUE; - - /* Propagate the serialize up the flush dependency chain, if appropriate */ - if (entry->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_serialized(entry) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, - "Can't propagate flush dep serialize") - } /* end if */ - } /* end if */ - else - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "Entry is not pinned??") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_mark_entry_serialized() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C_move_entry - * - * Purpose: Use this function to notify the cache that an entry's - * file address changed. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 6/2/04 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, haddr_t new_addr) -{ - H5C_cache_entry_t *entry_ptr = NULL; - H5C_cache_entry_t *test_entry_ptr = NULL; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(type); - assert(H5_addr_defined(old_addr)); - assert(H5_addr_defined(new_addr)); - assert(H5_addr_ne(old_addr, new_addr)); + case H5C_decr__threshold: + if (config_ptr->upper_hr_threshold >= 1.0 || config_ptr->decrement >= 1.0 || + (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0)) + cache_ptr->size_decrease_possible = FALSE; + break; -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + case H5C_decr__age_out: + if ((config_ptr->apply_empty_reserve && config_ptr->empty_reserve >= 1.0) || + (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0)) + cache_ptr->size_decrease_possible = FALSE; + break; - H5C__SEARCH_INDEX(cache_ptr, old_addr, entry_ptr, FAIL) + case H5C_decr__age_out_with_threshold: + if ((config_ptr->apply_empty_reserve && config_ptr->empty_reserve >= 1.0) || + (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0) || + config_ptr->upper_hr_threshold >= 1.0) + cache_ptr->size_decrease_possible = FALSE; + break; - if (entry_ptr == NULL || entry_ptr->type != type) - /* the old item doesn't exist in the cache, so we are done. */ - HGOTO_DONE(SUCCEED) + default: /* should be unreachable */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown decr_mode?!?!?") + } /* end switch */ - assert(entry_ptr->addr == old_addr); - assert(entry_ptr->type == type); + if (config_ptr->max_size == config_ptr->min_size) { + cache_ptr->size_increase_possible = FALSE; + cache_ptr->flash_size_increase_possible = FALSE; + cache_ptr->size_decrease_possible = FALSE; + } /* end if */ - /* Check for R/W status, otherwise error */ - /* (Moving a R/O entry would mark it dirty, which shouldn't - * happen. QAK - 2016/12/02) + /* flash_size_increase_possible is intentionally omitted from the + * following: */ - if (entry_ptr->is_read_only) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "can't move R/O entry") + cache_ptr->resize_enabled = cache_ptr->size_increase_possible || cache_ptr->size_decrease_possible; + cache_ptr->resize_ctl = *config_ptr; - H5C__SEARCH_INDEX(cache_ptr, new_addr, test_entry_ptr, FAIL) + /* Resize the cache to the supplied initial value if requested, or as + * necessary to force it within the bounds of the current automatic + * cache resizing configuration. + * + * Note that the min_clean_fraction may have changed, so we + * go through the exercise even if the current size is within + * range and an initial size has not been provided. + */ + if (cache_ptr->resize_ctl.set_initial_size) + new_max_cache_size = cache_ptr->resize_ctl.initial_size; + else if (cache_ptr->max_cache_size > cache_ptr->resize_ctl.max_size) + new_max_cache_size = cache_ptr->resize_ctl.max_size; + else if (cache_ptr->max_cache_size < cache_ptr->resize_ctl.min_size) + new_max_cache_size = cache_ptr->resize_ctl.min_size; + else + new_max_cache_size = cache_ptr->max_cache_size; - if (test_entry_ptr != NULL) { /* we are hosed */ - if (test_entry_ptr->type == type) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "target already moved & reinserted???") - else - HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "new address already in use?") - } /* end if */ + new_min_clean_size = (size_t)((double)new_max_cache_size * (cache_ptr->resize_ctl.min_clean_fraction)); - /* If we get this far we have work to do. Remove *entry_ptr from - * the hash table (and skip list if necessary), change its address to the - * new address, mark it as dirty (if it isn't already) and then re-insert. - * - * Update the replacement policy for a hit to avoid an eviction before - * the moved entry is touched. Update stats for a move. + /* since new_min_clean_size is of type size_t, we have * - * Note that we do not check the size of the cache, or evict anything. - * Since this is a simple re-name, cache size should be unaffected. + * ( 0 <= new_min_clean_size ) * - * Check to see if the target entry is in the process of being destroyed - * before we delete from the index, etc. If it is, all we do is - * change the addr. If the entry is only in the process of being flushed, - * don't mark it as dirty either, lest we confuse the flush call back. + * by definition. */ - if (!entry_ptr->destroy_in_progress) { - H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL) - - if (entry_ptr->in_slist) { - assert(cache_ptr->slist_ptr); - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE) - } /* end if */ - } /* end if */ - - entry_ptr->addr = new_addr; - - if (!entry_ptr->destroy_in_progress) { - hbool_t was_dirty; /* Whether the entry was previously dirty */ - - /* Remember previous dirty status */ - was_dirty = entry_ptr->is_dirty; - - /* Mark the entry as dirty if it isn't already */ - entry_ptr->is_dirty = TRUE; - - /* This shouldn't be needed, but it keeps the test code happy */ - if (entry_ptr->image_up_to_date) { - entry_ptr->image_up_to_date = FALSE; - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "Can't propagate serialization status to fd parents") - } /* end if */ - - /* Modify cache data structures */ - H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL) - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) - - /* Skip some actions if we're in the middle of flushing the entry */ - if (!entry_ptr->flush_in_progress) { - /* Update the replacement policy for the entry */ - H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, FAIL) - - /* Check for entry changing status and do notifications, etc. */ - if (!was_dirty) { - /* If the entry's type has a 'notify' callback send a 'entry dirtied' - * notice now that the entry is fully integrated into the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify client about entry dirty flag set") - - /* Propagate the dirty flag up the flush dependency chain if appropriate */ - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_dirty(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, - "Can't propagate flush dep dirty flag") - } /* end if */ - } /* end if */ - } /* end if */ - - H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) - -done: -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_move_entry() */ - -/*------------------------------------------------------------------------- - * Function: H5C_resize_entry - * - * Purpose: Resize a pinned or protected entry. - * - * Resizing an entry dirties it, so if the entry is not - * already dirty, the function places the entry on the - * skip list. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 7/5/06 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_resize_entry(void *thing, size_t new_size) -{ - H5C_t *cache_ptr; - H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - assert(entry_ptr); - assert(H5_addr_defined(entry_ptr->addr)); - cache_ptr = entry_ptr->cache_ptr; - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - /* Check for usage errors */ - if (new_size <= 0) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "New size is non-positive") - if (!(entry_ptr->is_pinned || entry_ptr->is_protected)) - HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "Entry isn't pinned or protected??") - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - /* update for change in entry size if necessary */ - if (entry_ptr->size != new_size) { - hbool_t was_clean; - - /* make note of whether the entry was clean to begin with */ - was_clean = !entry_ptr->is_dirty; - - /* mark the entry as dirty if it isn't already */ - entry_ptr->is_dirty = TRUE; - - /* Reset the image up-to-date status */ - if (entry_ptr->image_up_to_date) { - entry_ptr->image_up_to_date = FALSE; - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "Can't propagate serialization status to fd parents") - } /* end if */ - - /* Release the current image */ - if (entry_ptr->image_ptr) - entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr); - - /* do a flash cache size increase if appropriate */ - if (cache_ptr->flash_size_increase_possible) { - if (new_size > entry_ptr->size) { - size_t size_increase; - - size_increase = new_size - entry_ptr->size; - if (size_increase >= cache_ptr->flash_size_increase_threshold) - if (H5C__flash_increase_cache_size(cache_ptr, entry_ptr->size, new_size) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "flash cache increase failed") - } - } - - /* update the pinned and/or protected entry list */ - if (entry_ptr->is_pinned) - H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->pel_len, cache_ptr->pel_size, entry_ptr->size, - new_size, FAIL) - if (entry_ptr->is_protected) - H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->pl_len, cache_ptr->pl_size, entry_ptr->size, new_size, - FAIL) - -#ifdef H5_HAVE_PARALLEL - if (entry_ptr->coll_access) - H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->coll_list_len, cache_ptr->coll_list_size, - entry_ptr->size, new_size, FAIL) -#endif /* H5_HAVE_PARALLEL */ - - /* update statistics just before changing the entry size */ - H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size); - - /* update the hash table */ - H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_size, entry_ptr, was_clean, FAIL); - - /* if the entry is in the skip list, update that too */ - if (entry_ptr->in_slist) - H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_size); - - /* finally, update the entry size proper */ - entry_ptr->size = new_size; - - if (!entry_ptr->in_slist) - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) - - if (entry_ptr->is_pinned) - H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) + assert(new_min_clean_size <= new_max_cache_size); + assert(cache_ptr->resize_ctl.min_size <= new_max_cache_size); + assert(new_max_cache_size <= cache_ptr->resize_ctl.max_size); - /* Check for entry changing status and do notifications, etc. */ - if (was_clean) { - /* If the entry's type has a 'notify' callback send a 'entry dirtied' - * notice now that the entry is fully integrated into the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set") - - /* Propagate the dirty flag up the flush dependency chain if appropriate */ - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_dirty(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") - } /* end if */ - } /* end if */ + if (new_max_cache_size < cache_ptr->max_cache_size) + cache_ptr->size_decreased = TRUE; -done: -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + cache_ptr->max_cache_size = new_max_cache_size; + cache_ptr->min_clean_size = new_min_clean_size; - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_resize_entry() */ + if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0) + /* this should be impossible... */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed") -/*------------------------------------------------------------------------- - * Function: H5C_pin_protected_entry() - * - * Purpose: Pin a protected cache entry. The entry must be protected - * at the time of call, and must be unpinned. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 4/26/06 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_pin_protected_entry(void *thing) -{ - H5C_t *cache_ptr; - H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing; /* Pointer to entry to pin */ - herr_t ret_value = SUCCEED; /* Return value */ + /* remove excess epoch markers if any */ + if ((config_ptr->decr_mode == H5C_decr__age_out_with_threshold) || + (config_ptr->decr_mode == H5C_decr__age_out)) { + if (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction) + if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers") + } /* end if */ + else if (cache_ptr->epoch_markers_active > 0) { + if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers") + } - FUNC_ENTER_NOAPI(FAIL) + /* configure flash size increase facility. We wait until the + * end of the function, as we need the max_cache_size set before + * we start to keep things simple. + * + * If we haven't already ruled out flash cache size increases above, + * go ahead and configure it. + */ + if (cache_ptr->flash_size_increase_possible) { + switch (config_ptr->flash_incr_mode) { + case H5C_flash_incr__off: + cache_ptr->flash_size_increase_possible = FALSE; + break; - /* Sanity checks */ - assert(entry_ptr); - assert(H5_addr_defined(entry_ptr->addr)); - cache_ptr = entry_ptr->cache_ptr; - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); + case H5C_flash_incr__add_space: + cache_ptr->flash_size_increase_possible = TRUE; + cache_ptr->flash_size_increase_threshold = + (size_t)(((double)(cache_ptr->max_cache_size)) * (cache_ptr->resize_ctl.flash_threshold)); + break; -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - /* Only protected entries can be pinned */ - if (!entry_ptr->is_protected) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Entry isn't protected") - - /* Pin the entry from a client */ - if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client") - -done: -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_pin_protected_entry() */ - -/*------------------------------------------------------------------------- - * Function: H5C_protect - * - * Purpose: If the target entry is not in the cache, load it. If - * necessary, attempt to evict one or more entries to keep - * the cache within its maximum size. - * - * Mark the target entry as protected, and return its address - * to the caller. The caller must call H5C_unprotect() when - * finished with the entry. - * - * While it is protected, the entry may not be either evicted - * or flushed -- nor may it be accessed by another call to - * H5C_protect. Any attempt to do so will result in a failure. - * - * Return: Success: Ptr to the desired entry - * Failure: NULL - * - * Programmer: John Mainzer - 6/2/04 - * - *------------------------------------------------------------------------- - */ -void * -H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsigned flags) -{ - H5C_t *cache_ptr; - H5AC_ring_t ring = H5C_RING_UNDEFINED; - hbool_t hit; - hbool_t have_write_permitted = FALSE; - hbool_t read_only = FALSE; - hbool_t flush_last; -#ifdef H5_HAVE_PARALLEL - hbool_t coll_access = FALSE; /* whether access to the cache entry is done collectively */ -#endif /* H5_HAVE_PARALLEL */ - hbool_t write_permitted = FALSE; - hbool_t was_loaded = FALSE; /* Whether the entry was loaded as a result of the protect */ - size_t empty_space; - void *thing; - H5C_cache_entry_t *entry_ptr; - void *ret_value = NULL; /* Return value */ - - FUNC_ENTER_NOAPI(NULL) - - /* check args */ - assert(f); - assert(f->shared); - - cache_ptr = f->shared->cache; - - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(type); - assert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type); - assert(H5_addr_defined(addr)); - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - /* Load the cache image, if requested */ - if (cache_ptr->load_image) { - cache_ptr->load_image = FALSE; - if (H5C__load_cache_image(f) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't load cache image") - } /* end if */ - - read_only = ((flags & H5C__READ_ONLY_FLAG) != 0); - flush_last = ((flags & H5C__FLUSH_LAST_FLAG) != 0); - - /* Get the ring type from the API context */ - ring = H5CX_get_ring(); - -#ifdef H5_HAVE_PARALLEL - if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) - coll_access = H5F_get_coll_metadata_reads(f); -#endif /* H5_HAVE_PARALLEL */ - - /* first check to see if the target is in cache */ - H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL) - - if (entry_ptr != NULL) { - if (entry_ptr->ring != ring) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occurred for cache entry") - - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - - if (entry_ptr->prefetched) { - /* This call removes the prefetched entry from the cache, - * and replaces it with an entry deserialized from the - * image of the prefetched entry. - */ - if (H5C__deserialize_prefetched_entry(f, cache_ptr, &entry_ptr, type, addr, udata) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't deserialize prefetched entry") - - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(!entry_ptr->prefetched); - assert(entry_ptr->addr == addr); - } /* end if */ - - /* Check for trying to load the wrong type of entry from an address */ - if (entry_ptr->type != type) - HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, NULL, "incorrect cache entry type") - -#ifdef H5_HAVE_PARALLEL - /* If this is a collective metadata read, the entry is not marked as - * collective, and is clean, it is possible that other processes will - * not have it in its cache and will expect a bcast of the entry from - * process 0. So process 0 will bcast the entry to all other ranks. - * Ranks that _do_ have the entry in their cache still have to - * participate in the bcast. - */ - if (coll_access) { - if (!entry_ptr->is_dirty && !entry_ptr->coll_access) { - MPI_Comm comm; /* File MPI Communicator */ - int mpi_code; /* MPI error code */ - int buf_size; - - if (MPI_COMM_NULL == (comm = H5F_mpi_get_comm(f))) - HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed") - - if (entry_ptr->image_ptr == NULL) { - int mpi_rank; - - if ((mpi_rank = H5F_mpi_get_rank(f)) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank") - - if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, - "memory allocation failed for on disk image buffer") -#if H5C_DO_MEMORY_SANITY_CHECKS - H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, - H5C_IMAGE_EXTRA_SPACE); -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ - if (0 == mpi_rank && H5C__generate_image(f, cache_ptr, entry_ptr) < 0) - /* If image generation fails, push an error but - * still participate in the following MPI_Bcast - */ - HDONE_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't generate entry's image") - } /* end if */ - assert(entry_ptr->image_ptr); - - H5_CHECKED_ASSIGN(buf_size, int, entry_ptr->size, size_t); - if (MPI_SUCCESS != (mpi_code = MPI_Bcast(entry_ptr->image_ptr, buf_size, MPI_BYTE, 0, comm))) - HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code) - - /* Mark the entry as collective and insert into the collective list */ - entry_ptr->coll_access = TRUE; - H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL) - } /* end if */ - else if (entry_ptr->coll_access) - H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, NULL) - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ - -#ifdef H5C_DO_TAGGING_SANITY_CHECKS - { - /* Verify tag value */ - if (cache_ptr->ignore_tags != TRUE) { - haddr_t tag; /* Tag value */ - - /* The entry is already in the cache, but make sure that the tag value - * is still legal. This will ensure that had the entry NOT been in the - * cache, tagging was still set up correctly and it would have received - * a legal tag value after getting loaded from disk. - */ - - /* Get the tag */ - tag = H5CX_get_tag(); - - if (H5C_verify_tag(entry_ptr->type->id, tag) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "tag verification failed") - } /* end if */ - } -#endif - - hit = TRUE; - thing = (void *)entry_ptr; - } - else { - /* must try to load the entry from disk. */ - hit = FALSE; - if (NULL == (thing = H5C__load_entry(f, -#ifdef H5_HAVE_PARALLEL - coll_access, -#endif /* H5_HAVE_PARALLEL */ - type, addr, udata))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry") - - entry_ptr = (H5C_cache_entry_t *)thing; - cache_ptr->entries_loaded_counter++; - - entry_ptr->ring = ring; -#ifdef H5_HAVE_PARALLEL - if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && entry_ptr->coll_access) - H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL) -#endif /* H5_HAVE_PARALLEL */ - - /* Apply tag to newly protected entry */ - if (H5C__tag_entry(cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, NULL, "Cannot tag metadata entry") - - /* If the entry is very large, and we are configured to allow it, - * we may wish to perform a flash cache size increase. - */ - if (cache_ptr->flash_size_increase_possible && - (entry_ptr->size > cache_ptr->flash_size_increase_threshold)) - if (H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__flash_increase_cache_size failed") - - if (cache_ptr->index_size >= cache_ptr->max_cache_size) - empty_space = 0; - else - empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; - - /* try to free up if necceary and if evictions are permitted. Note - * that if evictions are enabled, we will call H5C__make_space_in_cache() - * regardless if the min_free_space requirement is not met. - */ - if (cache_ptr->evictions_enabled && - (((cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size) || - ((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size))) { - - size_t space_needed; - - if (empty_space <= entry_ptr->size) - cache_ptr->cache_full = TRUE; - - if (cache_ptr->check_write_permitted != NULL) { - if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted 1") - else - have_write_permitted = TRUE; - } /* end if */ - else { - write_permitted = cache_ptr->write_permitted; - have_write_permitted = TRUE; - } /* end else */ - - assert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE); - space_needed = entry_ptr->size; - if (space_needed > cache_ptr->max_cache_size) - space_needed = cache_ptr->max_cache_size; - - /* Note that space_needed is just the amount of space that - * needed to insert the new entry without exceeding the cache - * size limit. The subsequent call to H5C__make_space_in_cache() - * may evict the entries required to free more or less space - * depending on conditions. It MAY be less if the cache is - * currently undersized, or more if the cache is oversized. - * - * The cache can exceed its maximum size limit via the following - * mechanisms: - * - * First, it is possible for the cache to grow without - * bound as long as entries are protected and not unprotected. - * - * Second, when writes are not permitted it is also possible - * for the cache to grow without bound. - * - * Third, the user may choose to disable evictions -- causing - * the cache to grow without bound until evictions are - * re-enabled. - * - * Finally, we usually don't check to see if the cache is - * oversized at the end of an unprotect. As a result, it is - * possible to have a vastly oversized cache with no protected - * entries as long as all the protects precede the unprotects. - */ - if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed") - } /* end if */ - - /* Insert the entry in the hash table. - * - * ******************************************* - * - * Set the flush_me_last field - * of the newly loaded entry before inserting it into the - * index. Must do this, as the index tracked the number of - * entries with the flush_last field set, but assumes that - * the field will not change after insertion into the index. - * - * Note that this means that the H5C__FLUSH_LAST_FLAG flag - * is ignored if the entry is already in cache. - */ - entry_ptr->flush_me_last = flush_last; - - H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, NULL) - if (entry_ptr->is_dirty && !entry_ptr->in_slist) - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, NULL) - - /* insert the entry in the data structures used by the replacement - * policy. We are just going to take it out again when we update - * the replacement policy for a protect, but this simplifies the - * code. If we do this often enough, we may want to optimize this. - */ - H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, NULL) - - /* Record that the entry was loaded, to trigger a notify callback later */ - /* (After the entry is fully added to the cache) */ - was_loaded = TRUE; - } /* end else */ - - assert(entry_ptr->addr == addr); - assert(entry_ptr->type == type); - - if (entry_ptr->is_protected) { - if (read_only && entry_ptr->is_read_only) { - assert(entry_ptr->ro_ref_count > 0); - (entry_ptr->ro_ref_count)++; - } /* end if */ - else - HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Target already protected & not read only?!?") - } /* end if */ - else { - H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, NULL) - - entry_ptr->is_protected = TRUE; - if (read_only) { - entry_ptr->is_read_only = TRUE; - entry_ptr->ro_ref_count = 1; - } /* end if */ - entry_ptr->dirtied = FALSE; - } /* end else */ - - H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) - H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) - - ret_value = thing; - - if (cache_ptr->evictions_enabled && - (cache_ptr->size_decreased || - (cache_ptr->resize_enabled && (cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length)))) { - - if (!have_write_permitted) { - if (cache_ptr->check_write_permitted != NULL) { - if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted") - else - have_write_permitted = TRUE; - } - else { - write_permitted = cache_ptr->write_permitted; - have_write_permitted = TRUE; - } - } - - if (cache_ptr->resize_enabled && (cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length)) - if (H5C__auto_adjust_cache_size(f, write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Cache auto-resize failed") - - if (cache_ptr->size_decreased) { - cache_ptr->size_decreased = FALSE; - - /* check to see if the cache is now oversized due to the cache - * size reduction. If it is, try to evict enough entries to - * bring the cache size down to the current maximum cache size. - * - * Also, if the min_clean_size requirement is not met, we - * should also call H5C__make_space_in_cache() to bring us - * into compliance. - */ - if (cache_ptr->index_size >= cache_ptr->max_cache_size) - empty_space = 0; - else - empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; - - if ((cache_ptr->index_size > cache_ptr->max_cache_size) || - ((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size)) { - - if (cache_ptr->index_size > cache_ptr->max_cache_size) - cache_ptr->cache_full = TRUE; - - if (H5C__make_space_in_cache(f, (size_t)0, write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed") - } - } /* end if */ - } - - /* If we loaded the entry and the entry's type has a 'notify' callback, send - * an 'after load' notice now that the entry is fully integrated into - * the cache and protected. We must wait until it is protected so it is not - * evicted during the notify callback. - */ - if (was_loaded) - /* If the entry's type has a 'notify' callback send a 'after load' - * notice now that the entry is fully integrated into the cache. - */ - if (entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, NULL, - "can't notify client about entry inserted into cache") - -#ifdef H5_HAVE_PARALLEL - /* Make sure the size of the collective entries in the cache remain in check */ - if (coll_access) { - if (H5P_USER_TRUE == H5F_COLL_MD_READ(f)) { - if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100) - if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries") - } /* end if */ - else { - if (cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100) - if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries") - } /* end else */ - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ - -done: -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on exit") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_protect() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C_reset_cache_hit_rate_stats() - * - * Purpose: Reset the cache hit rate computation fields. - * - * Return: SUCCEED on success, and FAIL on failure. - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_reset_cache_hit_rate_stats(H5C_t *cache_ptr) -{ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry") - - cache_ptr->cache_hits = 0; - cache_ptr->cache_accesses = 0; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_reset_cache_hit_rate_stats() */ - -/*------------------------------------------------------------------------- - * Function: H5C_set_cache_auto_resize_config - * - * Purpose: Set the cache automatic resize configuration to the - * provided values if they are in range, and fail if they - * are not. - * - * If the new configuration enables automatic cache resizing, - * coerce the cache max size and min clean size into agreement - * with the new policy and re-set the full cache hit rate - * stats. - * - * Return: SUCCEED on success, and FAIL on failure. - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_ptr) -{ - size_t new_max_cache_size; - size_t new_min_clean_size; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry") - if (config_ptr == NULL) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry") - if (config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unknown config version") - - /* check general configuration section of the config: */ - if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_GENERAL) < 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in general configuration fields of new config") - - /* check size increase control fields of the config: */ - if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INCREMENT) < 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size increase control fields of new config") - - /* check size decrease control fields of the config: */ - if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_DECREMENT) < 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size decrease control fields of new config") - - /* check for conflicts between size increase and size decrease controls: */ - if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) < 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "conflicting threshold fields in new config") - - /* will set the increase possible fields to FALSE later if needed */ - cache_ptr->size_increase_possible = TRUE; - cache_ptr->flash_size_increase_possible = TRUE; - cache_ptr->size_decrease_possible = TRUE; - - switch (config_ptr->incr_mode) { - case H5C_incr__off: - cache_ptr->size_increase_possible = FALSE; - break; - - case H5C_incr__threshold: - if ((config_ptr->lower_hr_threshold <= 0.0) || (config_ptr->increment <= 1.0) || - ((config_ptr->apply_max_increment) && (config_ptr->max_increment <= 0))) - cache_ptr->size_increase_possible = FALSE; - break; - - default: /* should be unreachable */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown incr_mode?!?!?") - } /* end switch */ - - /* logically, this is where configuration for flash cache size increases - * should go. However, this configuration depends on max_cache_size, so - * we wait until the end of the function, when this field is set. - */ - - switch (config_ptr->decr_mode) { - case H5C_decr__off: - cache_ptr->size_decrease_possible = FALSE; - break; - - case H5C_decr__threshold: - if (config_ptr->upper_hr_threshold >= 1.0 || config_ptr->decrement >= 1.0 || - (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0)) - cache_ptr->size_decrease_possible = FALSE; - break; - - case H5C_decr__age_out: - if ((config_ptr->apply_empty_reserve && config_ptr->empty_reserve >= 1.0) || - (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0)) - cache_ptr->size_decrease_possible = FALSE; - break; - - case H5C_decr__age_out_with_threshold: - if ((config_ptr->apply_empty_reserve && config_ptr->empty_reserve >= 1.0) || - (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0) || - config_ptr->upper_hr_threshold >= 1.0) - cache_ptr->size_decrease_possible = FALSE; - break; - - default: /* should be unreachable */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown decr_mode?!?!?") - } /* end switch */ - - if (config_ptr->max_size == config_ptr->min_size) { - cache_ptr->size_increase_possible = FALSE; - cache_ptr->flash_size_increase_possible = FALSE; - cache_ptr->size_decrease_possible = FALSE; - } /* end if */ - - /* flash_size_increase_possible is intentionally omitted from the - * following: - */ - cache_ptr->resize_enabled = cache_ptr->size_increase_possible || cache_ptr->size_decrease_possible; - cache_ptr->resize_ctl = *config_ptr; - - /* Resize the cache to the supplied initial value if requested, or as - * necessary to force it within the bounds of the current automatic - * cache resizing configuration. - * - * Note that the min_clean_fraction may have changed, so we - * go through the exercise even if the current size is within - * range and an initial size has not been provided. - */ - if (cache_ptr->resize_ctl.set_initial_size) - new_max_cache_size = cache_ptr->resize_ctl.initial_size; - else if (cache_ptr->max_cache_size > cache_ptr->resize_ctl.max_size) - new_max_cache_size = cache_ptr->resize_ctl.max_size; - else if (cache_ptr->max_cache_size < cache_ptr->resize_ctl.min_size) - new_max_cache_size = cache_ptr->resize_ctl.min_size; - else - new_max_cache_size = cache_ptr->max_cache_size; - - new_min_clean_size = (size_t)((double)new_max_cache_size * (cache_ptr->resize_ctl.min_clean_fraction)); - - /* since new_min_clean_size is of type size_t, we have - * - * ( 0 <= new_min_clean_size ) - * - * by definition. - */ - assert(new_min_clean_size <= new_max_cache_size); - assert(cache_ptr->resize_ctl.min_size <= new_max_cache_size); - assert(new_max_cache_size <= cache_ptr->resize_ctl.max_size); - - if (new_max_cache_size < cache_ptr->max_cache_size) - cache_ptr->size_decreased = TRUE; - - cache_ptr->max_cache_size = new_max_cache_size; - cache_ptr->min_clean_size = new_min_clean_size; - - if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0) - /* this should be impossible... */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed") - - /* remove excess epoch markers if any */ - if ((config_ptr->decr_mode == H5C_decr__age_out_with_threshold) || - (config_ptr->decr_mode == H5C_decr__age_out)) { - if (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction) - if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers") - } /* end if */ - else if (cache_ptr->epoch_markers_active > 0) { - if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers") - } - - /* configure flash size increase facility. We wait until the - * end of the function, as we need the max_cache_size set before - * we start to keep things simple. - * - * If we haven't already ruled out flash cache size increases above, - * go ahead and configure it. - */ - if (cache_ptr->flash_size_increase_possible) { - switch (config_ptr->flash_incr_mode) { - case H5C_flash_incr__off: - cache_ptr->flash_size_increase_possible = FALSE; - break; - - case H5C_flash_incr__add_space: - cache_ptr->flash_size_increase_possible = TRUE; - cache_ptr->flash_size_increase_threshold = - (size_t)(((double)(cache_ptr->max_cache_size)) * (cache_ptr->resize_ctl.flash_threshold)); - break; - - default: /* should be unreachable */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?") - break; - } /* end switch */ - } /* end if */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_set_cache_auto_resize_config() */ - -/*------------------------------------------------------------------------- - * Function: H5C_set_evictions_enabled() - * - * Purpose: Set cache_ptr->evictions_enabled to the value of the - * evictions enabled parameter. - * - * Return: SUCCEED on success, and FAIL on failure. - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled) -{ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry") - - /* There is no fundamental reason why we should not permit - * evictions to be disabled while automatic resize is enabled. - * However, allowing it would greatly complicate testing - * the feature. Hence the following: - */ - if ((evictions_enabled != TRUE) && ((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) || - (cache_ptr->resize_ctl.decr_mode != H5C_decr__off))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't disable evictions when auto resize enabled") - - cache_ptr->evictions_enabled = evictions_enabled; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_set_evictions_enabled() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C_set_slist_enabled() - * - * Purpose: Enable or disable the slist as directed. - * - * The slist (skip list) is an address ordered list of - * dirty entries in the metadata cache. However, this - * list is only needed during flush and close, where we - * use it to write entries in more or less increasing - * address order. - * - * This function sets up and enables further operations - * on the slist, or disable the slist. This in turn - * allows us to avoid the overhead of maintaining the - * slist when it is not needed. - * - * - * If the slist_enabled parameter is TRUE, the function - * - * 1) Verifies that the slist is empty. - * - * 2) Scans the index list, and inserts all dirty entries - * into the slist. - * - * 3) Sets cache_ptr->slist_enabled = TRUE. - * - * Note that the clear_slist parameter is ignored if - * the slist_enabed parameter is TRUE. - * - * - * If the slist_enabled_parameter is FALSE, the function - * shuts down the slist. - * - * Normally the slist will be empty at this point, however - * that need not be the case if H5C_flush_cache() has been - * called with the H5C__FLUSH_MARKED_ENTRIES_FLAG. - * - * Thus shutdown proceeds as follows: - * - * 1) Test to see if the slist is empty. If it is, proceed - * to step 3. - * - * 2) Test to see if the clear_slist parameter is TRUE. - * - * If it is, remove all entries from the slist. - * - * If it isn't, throw an error. - * - * 3) set cache_ptr->slist_enabled = FALSE. - * - * Return: SUCCEED on success, and FAIL on failure. - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_slist) -{ - H5C_cache_entry_t *entry_ptr; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry") - - if (slist_enabled) { - if (cache_ptr->slist_enabled) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already enabled?") - if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty?") - - /* set cache_ptr->slist_enabled to TRUE so that the slist - * maintenance macros will be enabled. - */ - cache_ptr->slist_enabled = TRUE; - - /* scan the index list and insert all dirty entries in the slist */ - entry_ptr = cache_ptr->il_head; - while (entry_ptr != NULL) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - if (entry_ptr->is_dirty) - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) - entry_ptr = entry_ptr->il_next; - } - - /* we don't maintain a dirty index len, so we can't do a cross - * check against it. Note that there is no point in cross checking - * against the dirty LRU size, as the dirty LRU may not be maintained, - * and in any case, there is no requirement that all dirty entries - * will reside on the dirty LRU. - */ - assert(cache_ptr->dirty_index_size == cache_ptr->slist_size); - } - else { /* take down the skip list */ - if (!cache_ptr->slist_enabled) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already disabled?") - - if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) { - if (clear_slist) { - H5SL_node_t *node_ptr; - - node_ptr = H5SL_first(cache_ptr->slist_ptr); - while (node_ptr != NULL) { - entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE); - node_ptr = H5SL_first(cache_ptr->slist_ptr); - } - } - else - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty?") - } - - cache_ptr->slist_enabled = FALSE; - - assert(0 == cache_ptr->slist_len); - assert(0 == cache_ptr->slist_size); - } - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_set_slist_enabled() */ - -/*------------------------------------------------------------------------- - * Function: H5C_unpin_entry() - * - * Purpose: Unpin a cache entry. The entry can be either protected or - * unprotected at the time of call, but must be pinned. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 3/22/06 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_unpin_entry(void *_entry_ptr) -{ - H5C_t *cache_ptr; - H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)_entry_ptr; /* Pointer to entry to unpin */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity check */ - assert(entry_ptr); - cache_ptr = entry_ptr->cache_ptr; - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - /* Unpin the entry */ - if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry from client") - -done: -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_unpin_entry() */ - -/*------------------------------------------------------------------------- - * Function: H5C_unprotect - * - * Purpose: Undo an H5C_protect() call -- specifically, mark the - * entry as unprotected, remove it from the protected list, - * and give it back to the replacement policy. - * - * The TYPE and ADDR arguments must be the same as those in - * the corresponding call to H5C_protect() and the THING - * argument must be the value returned by that call to - * H5C_protect(). - * - * Return: Non-negative on success/Negative on failure - * - * If the deleted flag is TRUE, simply remove the target entry - * from the cache, clear it, and free it without writing it to - * disk. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 6/2/04 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) -{ - H5C_t *cache_ptr; - hbool_t deleted; - hbool_t dirtied; - hbool_t set_flush_marker; - hbool_t pin_entry; - hbool_t unpin_entry; - hbool_t free_file_space; - hbool_t take_ownership; - hbool_t was_clean; -#ifdef H5_HAVE_PARALLEL - hbool_t clear_entry = FALSE; -#endif /* H5_HAVE_PARALLEL */ - H5C_cache_entry_t *entry_ptr; - H5C_cache_entry_t *test_entry_ptr; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - deleted = ((flags & H5C__DELETED_FLAG) != 0); - dirtied = ((flags & H5C__DIRTIED_FLAG) != 0); - set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0); - pin_entry = ((flags & H5C__PIN_ENTRY_FLAG) != 0); - unpin_entry = ((flags & H5C__UNPIN_ENTRY_FLAG) != 0); - free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0); - take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0); - - assert(f); - assert(f->shared); - - cache_ptr = f->shared->cache; - - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(H5_addr_defined(addr)); - assert(thing); - assert(!(pin_entry && unpin_entry)); - - /* deleted flag must accompany free_file_space */ - assert((!free_file_space) || (deleted)); - - /* deleted flag must accompany take_ownership */ - assert((!take_ownership) || (deleted)); - - /* can't have both free_file_space & take_ownership */ - assert(!(free_file_space && take_ownership)); - - entry_ptr = (H5C_cache_entry_t *)thing; - assert(entry_ptr->addr == addr); - - /* also set the dirtied variable if the dirtied field is set in - * the entry. - */ - dirtied |= entry_ptr->dirtied; - was_clean = !(entry_ptr->is_dirty); - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - /* if the entry has multiple read only protects, just decrement - * the ro_ref_counter. Don't actually unprotect until the ref count - * drops to zero. - */ - if (entry_ptr->ro_ref_count > 1) { - /* Sanity check */ - assert(entry_ptr->is_protected); - assert(entry_ptr->is_read_only); - - if (dirtied) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??") - - /* Reduce the RO ref count */ - (entry_ptr->ro_ref_count)--; - - /* Pin or unpin the entry as requested. */ - if (pin_entry) { - /* Pin the entry from a client */ - if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client") - } - else if (unpin_entry) { - /* Unpin the entry from a client */ - if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client") - } /* end if */ - } - else { - if (entry_ptr->is_read_only) { - /* Sanity check */ - assert(entry_ptr->ro_ref_count == 1); - - if (dirtied) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??") - - entry_ptr->is_read_only = FALSE; - entry_ptr->ro_ref_count = 0; - } /* end if */ - -#ifdef H5_HAVE_PARALLEL - /* When the H5C code is used to implement the metadata cache in the - * PHDF5 case, only the cache on process 0 is allowed to write to file. - * All the other metadata caches must hold dirty entries until they - * are told that the entries are clean. - * - * The clear_on_unprotect flag in the H5C_cache_entry_t structure - * exists to deal with the case in which an entry is protected when - * its cache receives word that the entry is now clean. In this case, - * the clear_on_unprotect flag is set, and the entry is flushed with - * the H5C__FLUSH_CLEAR_ONLY_FLAG. - * - * All this is a bit awkward, but until the metadata cache entries - * are contiguous, with only one dirty flag, we have to let the supplied - * functions deal with the resetting the is_dirty flag. - */ - if (entry_ptr->clear_on_unprotect) { - /* Sanity check */ - assert(entry_ptr->is_dirty); - - entry_ptr->clear_on_unprotect = FALSE; - if (!dirtied) - clear_entry = TRUE; - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ - - if (!entry_ptr->is_protected) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Entry already unprotected??") - - /* Mark the entry as dirty if appropriate */ - entry_ptr->is_dirty = (entry_ptr->is_dirty || dirtied); - if (dirtied && entry_ptr->image_up_to_date) { - entry_ptr->image_up_to_date = FALSE; - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "Can't propagate serialization status to fd parents") - } /* end if */ - - /* Check for newly dirtied entry */ - if (was_clean && entry_ptr->is_dirty) { - /* Update index for newly dirtied entry */ - H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, FAIL) - - /* If the entry's type has a 'notify' callback send a - * 'entry dirtied' notice now that the entry is fully - * integrated into the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set") - - /* Propagate the flush dep dirty flag up the flush dependency chain - * if appropriate - */ - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_dirty(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") - } /* end if */ - /* Check for newly clean entry */ - else if (!was_clean && !entry_ptr->is_dirty) { - - /* If the entry's type has a 'notify' callback send a - * 'entry cleaned' notice now that the entry is fully - * integrated into the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify client about entry dirty flag cleared") - - /* Propagate the flush dep clean flag up the flush dependency chain - * if appropriate - */ - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_clean(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") - } /* end else-if */ - - /* Pin or unpin the entry as requested. */ - if (pin_entry) { - /* Pin the entry from a client */ - if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client") - } - else if (unpin_entry) { - /* Unpin the entry from a client */ - if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client") - } /* end if */ - - /* H5C__UPDATE_RP_FOR_UNPROTECT will place the unprotected entry on - * the pinned entry list if entry_ptr->is_pinned is TRUE. - */ - H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, FAIL) - - entry_ptr->is_protected = FALSE; - - /* if the entry is dirty, 'or' its flush_marker with the set flush flag, - * and then add it to the skip list if it isn't there already. - */ - if (entry_ptr->is_dirty) { - entry_ptr->flush_marker |= set_flush_marker; - if (!entry_ptr->in_slist) - /* this is a no-op if cache_ptr->slist_enabled is FALSE */ - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) - } /* end if */ - - /* This implementation of the "deleted" option is a bit inefficient, as - * we re-insert the entry to be deleted into the replacement policy - * data structures, only to remove them again. Depending on how often - * we do this, we may want to optimize a bit. - */ - if (deleted) { - unsigned flush_flags = (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__FLUSH_INVALIDATE_FLAG); - - /* verify that the target entry is in the cache. */ - H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) - - if (test_entry_ptr == NULL) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?") - else if (test_entry_ptr != entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, - "hash table contains multiple entries for addr?!?") - - /* Set the 'free file space' flag for the flush, if needed */ - if (free_file_space) - flush_flags |= H5C__FREE_FILE_SPACE_FLAG; - - /* Set the "take ownership" flag for the flush, if needed */ - if (take_ownership) - flush_flags |= H5C__TAKE_OWNERSHIP_FLAG; - - /* Delete the entry from the skip list on destroy */ - flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; - - assert((!cache_ptr->slist_enabled) || (((!was_clean) || dirtied) == (entry_ptr->in_slist))); - - if (H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush entry") - } /* end if */ -#ifdef H5_HAVE_PARALLEL - else if (clear_entry) { - /* Verify that the target entry is in the cache. */ - H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) - - if (test_entry_ptr == NULL) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?") - else if (test_entry_ptr != entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, - "hash table contains multiple entries for addr?!?") - - if (H5C__flush_single_entry(f, entry_ptr, - H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear entry") - } /* end else if */ -#endif /* H5_HAVE_PARALLEL */ - } - - H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) - -done: -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_unprotect() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C_unsettle_entry_ring - * - * Purpose: Advise the metadata cache that the specified entry's free space - * manager ring is no longer settled (if it was on entry). - * - * If the target free space manager ring is already - * unsettled, do nothing, and return SUCCEED. - * - * If the target free space manager ring is settled, and - * we are not in the process of a file shutdown, mark - * the ring as unsettled, and return SUCCEED. - * - * If the target free space manager is settled, and we - * are in the process of a file shutdown, post an error - * message, and return FAIL. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * January 3, 2017 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_unsettle_entry_ring(void *_entry) -{ - H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry whose ring to unsettle */ - H5C_t *cache; /* Cache for file */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - assert(entry); - assert(entry->ring != H5C_RING_UNDEFINED); - assert((H5C_RING_USER == entry->ring) || (H5C_RING_RDFSM == entry->ring) || - (H5C_RING_MDFSM == entry->ring)); - cache = entry->cache_ptr; - assert(cache); - assert(cache->magic == H5C__H5C_T_MAGIC); - - switch (entry->ring) { - case H5C_RING_USER: - /* Do nothing */ - break; - - case H5C_RING_RDFSM: - if (cache->rdfsm_settled) { - if (cache->flush_in_progress || cache->close_warning_received) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle") - cache->rdfsm_settled = FALSE; - } /* end if */ - break; - - case H5C_RING_MDFSM: - if (cache->mdfsm_settled) { - if (cache->flush_in_progress || cache->close_warning_received) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle") - cache->mdfsm_settled = FALSE; - } /* end if */ - break; - - default: - assert(FALSE); /* this should be un-reachable */ - break; - } /* end switch */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_unsettle_entry_ring() */ - -/*------------------------------------------------------------------------- - * Function: H5C_unsettle_ring() - * - * Purpose: Advise the metadata cache that the specified free space - * manager ring is no longer settled (if it was on entry). - * - * If the target free space manager ring is already - * unsettled, do nothing, and return SUCCEED. - * - * If the target free space manager ring is settled, and - * we are not in the process of a file shutdown, mark - * the ring as unsettled, and return SUCCEED. - * - * If the target free space manager is settled, and we - * are in the process of a file shutdown, post an error - * message, and return FAIL. - * - * Return: Non-negative on success/Negative on failure - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_unsettle_ring(H5F_t *f, H5C_ring_t ring) -{ - H5C_t *cache_ptr; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - assert(f); - assert(f->shared); - assert(f->shared->cache); - assert((H5C_RING_RDFSM == ring) || (H5C_RING_MDFSM == ring)); - cache_ptr = f->shared->cache; - assert(H5C__H5C_T_MAGIC == cache_ptr->magic); - - switch (ring) { - case H5C_RING_RDFSM: - if (cache_ptr->rdfsm_settled) { - if (cache_ptr->close_warning_received) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle") - cache_ptr->rdfsm_settled = FALSE; - } /* end if */ - break; - - case H5C_RING_MDFSM: - if (cache_ptr->mdfsm_settled) { - if (cache_ptr->close_warning_received) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle") - cache_ptr->mdfsm_settled = FALSE; - } /* end if */ - break; - - default: - assert(FALSE); /* this should be un-reachable */ - break; - } /* end switch */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_unsettle_ring() */ - -/*------------------------------------------------------------------------- - * Function: H5C_validate_resize_config() - * - * Purpose: Run a sanity check on the specified sections of the - * provided instance of struct H5C_auto_size_ctl_t. - * - * Do nothing and return SUCCEED if no errors are detected, - * and flag an error and return FAIL otherwise. - * - * Return: Non-negative on success/Negative on failure - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr, unsigned int tests) -{ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - if (config_ptr == NULL) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry") - - if (config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version") - - if ((tests & H5C_RESIZE_CFG__VALIDATE_GENERAL) != 0) { - if (config_ptr->max_size > H5C__MAX_MAX_CACHE_SIZE) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "max_size too big") - if (config_ptr->min_size < H5C__MIN_MAX_CACHE_SIZE) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size too small") - if (config_ptr->min_size > config_ptr->max_size) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size > max_size") - if (config_ptr->set_initial_size && ((config_ptr->initial_size < config_ptr->min_size) || - (config_ptr->initial_size > config_ptr->max_size))) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, - "initial_size must be in the interval [min_size, max_size]") - if ((config_ptr->min_clean_fraction < 0.0) || (config_ptr->min_clean_fraction > 1.0)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_clean_fraction must be in the interval [0.0, 1.0]") - if (config_ptr->epoch_length < H5C__MIN_AR_EPOCH_LENGTH) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too small") - if (config_ptr->epoch_length > H5C__MAX_AR_EPOCH_LENGTH) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too big") - } /* H5C_RESIZE_CFG__VALIDATE_GENERAL */ - - if ((tests & H5C_RESIZE_CFG__VALIDATE_INCREMENT) != 0) { - if ((config_ptr->incr_mode != H5C_incr__off) && (config_ptr->incr_mode != H5C_incr__threshold)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid incr_mode") - - if (config_ptr->incr_mode == H5C_incr__threshold) { - if ((config_ptr->lower_hr_threshold < 0.0) || (config_ptr->lower_hr_threshold > 1.0)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, - "lower_hr_threshold must be in the range [0.0, 1.0]") - if (config_ptr->increment < 1.0) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "increment must be greater than or equal to 1.0") - - /* no need to check max_increment, as it is a size_t, - * and thus must be non-negative. - */ - } /* H5C_incr__threshold */ - - switch (config_ptr->flash_incr_mode) { - case H5C_flash_incr__off: - /* nothing to do here */ - break; - - case H5C_flash_incr__add_space: - if ((config_ptr->flash_multiple < 0.1) || (config_ptr->flash_multiple > 10.0)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, - "flash_multiple must be in the range [0.1, 10.0]") - if ((config_ptr->flash_threshold < 0.1) || (config_ptr->flash_threshold > 1.0)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, - "flash_threshold must be in the range [0.1, 1.0]") - break; - - default: - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid flash_incr_mode") - break; - } /* end switch */ - } /* H5C_RESIZE_CFG__VALIDATE_INCREMENT */ - - if ((tests & H5C_RESIZE_CFG__VALIDATE_DECREMENT) != 0) { - if ((config_ptr->decr_mode != H5C_decr__off) && (config_ptr->decr_mode != H5C_decr__threshold) && - (config_ptr->decr_mode != H5C_decr__age_out) && - (config_ptr->decr_mode != H5C_decr__age_out_with_threshold)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid decr_mode") - - if (config_ptr->decr_mode == H5C_decr__threshold) { - if (config_ptr->upper_hr_threshold > 1.0) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be <= 1.0") - if ((config_ptr->decrement > 1.0) || (config_ptr->decrement < 0.0)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "decrement must be in the interval [0.0, 1.0]") - - /* no need to check max_decrement as it is a size_t - * and thus must be non-negative. - */ - } /* H5C_decr__threshold */ - - if ((config_ptr->decr_mode == H5C_decr__age_out) || - (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) { - if (config_ptr->epochs_before_eviction < 1) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction must be positive") - if (config_ptr->epochs_before_eviction > H5C__MAX_EPOCH_MARKERS) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction too big") - if (config_ptr->apply_empty_reserve && - (config_ptr->empty_reserve > 1.0 || config_ptr->empty_reserve < 0.0)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "empty_reserve must be in the interval [0.0, 1.0]") - - /* no need to check max_decrement as it is a size_t - * and thus must be non-negative. - */ - } /* H5C_decr__age_out || H5C_decr__age_out_with_threshold */ - - if (config_ptr->decr_mode == H5C_decr__age_out_with_threshold) - if ((config_ptr->upper_hr_threshold > 1.0) || (config_ptr->upper_hr_threshold < 0.0)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, - "upper_hr_threshold must be in the interval [0.0, 1.0]") - } /* H5C_RESIZE_CFG__VALIDATE_DECREMENT */ - - if ((tests & H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) != 0) { - if ((config_ptr->incr_mode == H5C_incr__threshold) && - ((config_ptr->decr_mode == H5C_decr__threshold) || - (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) && - (config_ptr->lower_hr_threshold >= config_ptr->upper_hr_threshold)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "conflicting threshold fields in config") - } /* H5C_RESIZE_CFG__VALIDATE_INTERACTIONS */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_validate_resize_config() */ - -/*------------------------------------------------------------------------- - * Function: H5C_create_flush_dependency() - * - * Purpose: Initiates a parent<->child entry flush dependency. The parent - * entry must be pinned or protected at the time of call, and must - * have all dependencies removed before the cache can shut down. - * - * Note: Flush dependencies in the cache indicate that a child entry - * must be flushed to the file before its parent. (This is - * currently used to implement Single-Writer/Multiple-Reader (SWMR) - * I/O access for data structures in the file). - * - * Creating a flush dependency between two entries will also pin - * the parent entry. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * 3/05/09 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_create_flush_dependency(void *parent_thing, void *child_thing) -{ - H5C_t *cache_ptr; - H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent thing's entry */ - H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child thing's entry */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - assert(parent_entry); - assert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(H5_addr_defined(parent_entry->addr)); - assert(child_entry); - assert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(H5_addr_defined(child_entry->addr)); - cache_ptr = parent_entry->cache_ptr; - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(cache_ptr == child_entry->cache_ptr); -#ifndef NDEBUG - /* Make sure the parent is not already a parent */ - { - unsigned u; - - for (u = 0; u < child_entry->flush_dep_nparents; u++) - assert(child_entry->flush_dep_parent[u] != parent_entry); - } /* end block */ -#endif /* NDEBUG */ - - /* More sanity checks */ - if (child_entry == parent_entry) - HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Child entry flush dependency parent can't be itself") - if (!(parent_entry->is_protected || parent_entry->is_pinned)) - HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Parent entry isn't pinned or protected") - - /* Check for parent not pinned */ - if (!parent_entry->is_pinned) { - /* Sanity check */ - assert(parent_entry->flush_dep_nchildren == 0); - assert(!parent_entry->pinned_from_client); - assert(!parent_entry->pinned_from_cache); - - /* Pin the parent entry */ - parent_entry->is_pinned = TRUE; - H5C__UPDATE_STATS_FOR_PIN(cache_ptr, parent_entry) - } /* end else */ - - /* Mark the entry as pinned from the cache's action (possibly redundantly) */ - parent_entry->pinned_from_cache = TRUE; - - /* Check if we need to resize the child's parent array */ - if (child_entry->flush_dep_nparents >= child_entry->flush_dep_parent_nalloc) { - if (child_entry->flush_dep_parent_nalloc == 0) { - /* Array does not exist yet, allocate it */ - assert(!child_entry->flush_dep_parent); - - if (NULL == (child_entry->flush_dep_parent = - H5FL_SEQ_MALLOC(H5C_cache_entry_ptr_t, H5C_FLUSH_DEP_PARENT_INIT))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, - "memory allocation failed for flush dependency parent list") - child_entry->flush_dep_parent_nalloc = H5C_FLUSH_DEP_PARENT_INIT; - } /* end if */ - else { - /* Resize existing array */ - assert(child_entry->flush_dep_parent); - - if (NULL == (child_entry->flush_dep_parent = - H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent, - 2 * child_entry->flush_dep_parent_nalloc))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, - "memory allocation failed for flush dependency parent list") - child_entry->flush_dep_parent_nalloc *= 2; - } /* end else */ - cache_ptr->entry_fd_height_change_counter++; - } /* end if */ - - /* Add the dependency to the child's parent array */ - child_entry->flush_dep_parent[child_entry->flush_dep_nparents] = parent_entry; - child_entry->flush_dep_nparents++; - - /* Increment parent's number of children */ - parent_entry->flush_dep_nchildren++; - - /* Adjust the number of dirty children */ - if (child_entry->is_dirty) { - /* Sanity check */ - assert(parent_entry->flush_dep_ndirty_children < parent_entry->flush_dep_nchildren); - - parent_entry->flush_dep_ndirty_children++; - - /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */ - if (parent_entry->type->notify && - (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED, parent_entry) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify parent about child entry dirty flag set") - } /* end if */ - - /* adjust the parent's number of unserialized children. Note - * that it is possible for and entry to be clean and unserialized. - */ - if (!child_entry->image_up_to_date) { - assert(parent_entry->flush_dep_nunser_children < parent_entry->flush_dep_nchildren); - - parent_entry->flush_dep_nunser_children++; - - /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */ - if (parent_entry->type->notify && - (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, parent_entry) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify parent about child entry serialized flag reset") - } /* end if */ - - /* Post-conditions, for successful operation */ - assert(parent_entry->is_pinned); - assert(parent_entry->flush_dep_nchildren > 0); - assert(child_entry->flush_dep_parent); - assert(child_entry->flush_dep_nparents > 0); - assert(child_entry->flush_dep_parent_nalloc > 0); -#ifndef NDEBUG - H5C__assert_flush_dep_nocycle(parent_entry, child_entry); -#endif /* NDEBUG */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_create_flush_dependency() */ - -/*------------------------------------------------------------------------- - * Function: H5C_destroy_flush_dependency() - * - * Purpose: Terminates a parent<-> child entry flush dependency. The - * parent entry must be pinned. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * 3/05/09 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_destroy_flush_dependency(void *parent_thing, void *child_thing) -{ - H5C_t *cache_ptr; - H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent entry */ - H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child entry */ - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - assert(parent_entry); - assert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(H5_addr_defined(parent_entry->addr)); - assert(child_entry); - assert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(H5_addr_defined(child_entry->addr)); - cache_ptr = parent_entry->cache_ptr; - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(cache_ptr == child_entry->cache_ptr); - - /* Usage checks */ - if (!parent_entry->is_pinned) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't pinned") - if (NULL == child_entry->flush_dep_parent) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, - "Child entry doesn't have a flush dependency parent array") - if (0 == parent_entry->flush_dep_nchildren) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, - "Parent entry flush dependency ref. count has no child dependencies") - - /* Search for parent in child's parent array. This is a linear search - * because we do not expect large numbers of parents. If this changes, we - * may wish to change the parent array to a skip list */ - for (u = 0; u < child_entry->flush_dep_nparents; u++) - if (child_entry->flush_dep_parent[u] == parent_entry) - break; - if (u == child_entry->flush_dep_nparents) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, - "Parent entry isn't a flush dependency parent for child entry") - - /* Remove parent entry from child's parent array */ - if (u < (child_entry->flush_dep_nparents - 1)) - HDmemmove(&child_entry->flush_dep_parent[u], &child_entry->flush_dep_parent[u + 1], - (child_entry->flush_dep_nparents - u - 1) * sizeof(child_entry->flush_dep_parent[0])); - child_entry->flush_dep_nparents--; - - /* Adjust parent entry's nchildren and unpin parent if it goes to zero */ - parent_entry->flush_dep_nchildren--; - if (0 == parent_entry->flush_dep_nchildren) { - /* Sanity check */ - assert(parent_entry->pinned_from_cache); - - /* Check if we should unpin parent entry now */ - if (!parent_entry->pinned_from_client) - if (H5C__unpin_entry_real(cache_ptr, parent_entry, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry") - - /* Mark the entry as unpinned from the cache's action */ - parent_entry->pinned_from_cache = FALSE; - } /* end if */ - - /* Adjust parent entry's ndirty_children */ - if (child_entry->is_dirty) { - /* Sanity check */ - assert(parent_entry->flush_dep_ndirty_children > 0); - - parent_entry->flush_dep_ndirty_children--; - - /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */ - if (parent_entry->type->notify && - (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED, parent_entry) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify parent about child entry dirty flag reset") - } /* end if */ - - /* adjust parent entry's number of unserialized children */ - if (!child_entry->image_up_to_date) { - assert(parent_entry->flush_dep_nunser_children > 0); - - parent_entry->flush_dep_nunser_children--; - - /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */ - if (parent_entry->type->notify && - (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, parent_entry) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify parent about child entry serialized flag set") - } /* end if */ - - /* Shrink or free the parent array if appropriate */ - if (child_entry->flush_dep_nparents == 0) { - child_entry->flush_dep_parent = H5FL_SEQ_FREE(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent); - child_entry->flush_dep_parent_nalloc = 0; - } /* end if */ - else if (child_entry->flush_dep_parent_nalloc > H5C_FLUSH_DEP_PARENT_INIT && - child_entry->flush_dep_nparents <= (child_entry->flush_dep_parent_nalloc / 4)) { - if (NULL == (child_entry->flush_dep_parent = - H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent, - child_entry->flush_dep_parent_nalloc / 4))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, - "memory allocation failed for flush dependency parent list") - child_entry->flush_dep_parent_nalloc /= 4; - } /* end if */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_destroy_flush_dependency() */ - -/*************************************************************************/ -/**************************** Private Functions: *************************/ -/*************************************************************************/ - -/*------------------------------------------------------------------------- - * Function: H5C__pin_entry_from_client() - * - * Purpose: Internal routine to pin a cache entry from a client action. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * 3/26/09 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__pin_entry_from_client(H5C_t -#if !H5C_COLLECT_CACHE_STATS - H5_ATTR_UNUSED -#endif - *cache_ptr, - H5C_cache_entry_t *entry_ptr) -{ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity checks */ - assert(cache_ptr); - assert(entry_ptr); - assert(entry_ptr->is_protected); - - /* Check if the entry is already pinned */ - if (entry_ptr->is_pinned) { - /* Check if the entry was pinned through an explicit pin from a client */ - if (entry_ptr->pinned_from_client) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "entry is already pinned") - } /* end if */ - else { - entry_ptr->is_pinned = TRUE; - - H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) - } /* end else */ - - /* Mark that the entry was pinned through an explicit pin from a client */ - entry_ptr->pinned_from_client = TRUE; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__pin_entry_from_client() */ - -/*------------------------------------------------------------------------- - * Function: H5C__unpin_entry_real() - * - * Purpose: Internal routine to unpin a cache entry. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * 1/6/18 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp) -{ - herr_t ret_value = SUCCEED; /* Return value */ - -#ifdef H5C_DO_SANITY_CHECKS - FUNC_ENTER_PACKAGE -#else - FUNC_ENTER_PACKAGE_NOERR -#endif - - /* Sanity checking */ - assert(cache_ptr); - assert(entry_ptr); - assert(entry_ptr->is_pinned); - - /* If requested, update the replacement policy if the entry is not protected */ - if (update_rp && !entry_ptr->is_protected) - H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, FAIL) - - /* Unpin the entry now */ - entry_ptr->is_pinned = FALSE; - - /* Update the stats for an unpin operation */ - H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) - -#ifdef H5C_DO_SANITY_CHECKS -done: -#endif - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__unpin_entry_real() */ - -/*------------------------------------------------------------------------- - * Function: H5C__unpin_entry_from_client() - * - * Purpose: Internal routine to unpin a cache entry from a client action. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * 3/24/09 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp) -{ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity checking */ - assert(cache_ptr); - assert(entry_ptr); - - /* Error checking (should be sanity checks?) */ - if (!entry_ptr->is_pinned) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry isn't pinned") - if (!entry_ptr->pinned_from_client) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry wasn't pinned by cache client") - - /* Check if the entry is not pinned from a flush dependency */ - if (!entry_ptr->pinned_from_cache) - if (H5C__unpin_entry_real(cache_ptr, entry_ptr, update_rp) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "can't unpin entry") - - /* Mark the entry as explicitly unpinned by the client */ - entry_ptr->pinned_from_client = FALSE; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__unpin_entry_from_client() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__auto_adjust_cache_size - * - * Purpose: Obtain the current full cache hit rate, and compare it - * with the hit rate thresholds for modifying cache size. - * If one of the thresholds has been crossed, adjusts the - * size of the cache accordingly. - * - * The function then resets the full cache hit rate - * statistics, and exits. - * - * Return: Non-negative on success/Negative on failure or if there was - * an attempt to flush a protected item. - * - * - * Programmer: John Mainzer, 10/7/04 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted) -{ - H5C_t *cache_ptr = f->shared->cache; - hbool_t reentrant_call = FALSE; - hbool_t inserted_epoch_marker = FALSE; - size_t new_max_cache_size = 0; - size_t old_max_cache_size = 0; - size_t new_min_clean_size = 0; - size_t old_min_clean_size = 0; - double hit_rate; - enum H5C_resize_status status = in_spec; /* will change if needed */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - assert(f); - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length); - assert(0.0 <= cache_ptr->resize_ctl.min_clean_fraction); - assert(cache_ptr->resize_ctl.min_clean_fraction <= 100.0); - - /* check to see if cache_ptr->resize_in_progress is TRUE. If it, this - * is a re-entrant call via a client callback called in the resize - * process. To avoid an infinite recursion, set reentrant_call to - * TRUE, and goto done. - */ - if (cache_ptr->resize_in_progress) { - reentrant_call = TRUE; - HGOTO_DONE(SUCCEED) - } /* end if */ - - cache_ptr->resize_in_progress = TRUE; - - if (!cache_ptr->resize_enabled) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled") - - assert((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) || - (cache_ptr->resize_ctl.decr_mode != H5C_decr__off)); - - if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate") - - assert((0.0 <= hit_rate) && (hit_rate <= 1.0)); - - switch (cache_ptr->resize_ctl.incr_mode) { - case H5C_incr__off: - if (cache_ptr->size_increase_possible) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "size_increase_possible but H5C_incr__off?!?!?") - break; - - case H5C_incr__threshold: - if (hit_rate < cache_ptr->resize_ctl.lower_hr_threshold) { - if (!cache_ptr->size_increase_possible) - status = increase_disabled; - else if (cache_ptr->max_cache_size >= cache_ptr->resize_ctl.max_size) { - assert(cache_ptr->max_cache_size == cache_ptr->resize_ctl.max_size); - status = at_max_size; - } - else if (!cache_ptr->cache_full) - status = not_full; - else { - new_max_cache_size = - (size_t)(((double)(cache_ptr->max_cache_size)) * cache_ptr->resize_ctl.increment); - - /* clip to max size if necessary */ - if (new_max_cache_size > cache_ptr->resize_ctl.max_size) - new_max_cache_size = cache_ptr->resize_ctl.max_size; - - /* clip to max increment if necessary */ - if (cache_ptr->resize_ctl.apply_max_increment && - ((cache_ptr->max_cache_size + cache_ptr->resize_ctl.max_increment) < - new_max_cache_size)) - new_max_cache_size = cache_ptr->max_cache_size + cache_ptr->resize_ctl.max_increment; - - status = increase; - } - } - break; - - default: - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode") - } - - /* If the decr_mode is either age out or age out with threshold, we - * must run the marker maintenance code, whether we run the size - * reduction code or not. We do this in two places -- here we - * insert a new marker if the number of active epoch markers is - * is less than the current epochs before eviction, and after - * the ageout call, we cycle the markers. - * - * However, we can't call the ageout code or cycle the markers - * unless there was a full complement of markers in place on - * entry. The inserted_epoch_marker flag is used to track this. - */ - - if (((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) || - (cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold)) && - (cache_ptr->epoch_markers_active < cache_ptr->resize_ctl.epochs_before_eviction)) { - - if (H5C__autoadjust__ageout__insert_new_marker(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't insert new epoch marker") - - inserted_epoch_marker = TRUE; - } - - /* don't run the cache size decrease code unless the cache size - * increase code is disabled, or the size increase code sees no need - * for action. In either case, status == in_spec at this point. - */ - - if (status == in_spec) { - switch (cache_ptr->resize_ctl.decr_mode) { - case H5C_decr__off: - break; - - case H5C_decr__threshold: - if (hit_rate > cache_ptr->resize_ctl.upper_hr_threshold) { - if (!cache_ptr->size_decrease_possible) - status = decrease_disabled; - else if (cache_ptr->max_cache_size <= cache_ptr->resize_ctl.min_size) { - assert(cache_ptr->max_cache_size == cache_ptr->resize_ctl.min_size); - status = at_min_size; - } - else { - new_max_cache_size = - (size_t)(((double)(cache_ptr->max_cache_size)) * cache_ptr->resize_ctl.decrement); - - /* clip to min size if necessary */ - if (new_max_cache_size < cache_ptr->resize_ctl.min_size) - new_max_cache_size = cache_ptr->resize_ctl.min_size; - - /* clip to max decrement if necessary */ - if (cache_ptr->resize_ctl.apply_max_decrement && - ((cache_ptr->resize_ctl.max_decrement + new_max_cache_size) < - cache_ptr->max_cache_size)) - new_max_cache_size = - cache_ptr->max_cache_size - cache_ptr->resize_ctl.max_decrement; - - status = decrease; - } - } - break; - - case H5C_decr__age_out_with_threshold: - case H5C_decr__age_out: - if (!inserted_epoch_marker) { - if (!cache_ptr->size_decrease_possible) - status = decrease_disabled; - else { - if (H5C__autoadjust__ageout(f, hit_rate, &status, &new_max_cache_size, - write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ageout code failed") - } /* end else */ - } /* end if */ - break; - - default: - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode") - } - } - - /* cycle the epoch markers here if appropriate */ - if (((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) || - (cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold)) && - !inserted_epoch_marker) - /* move last epoch marker to the head of the LRU list */ - if (H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error cycling epoch marker") - - if ((status == increase) || (status == decrease)) { - old_max_cache_size = cache_ptr->max_cache_size; - old_min_clean_size = cache_ptr->min_clean_size; - - new_min_clean_size = - (size_t)((double)new_max_cache_size * (cache_ptr->resize_ctl.min_clean_fraction)); - - /* new_min_clean_size is of size_t, and thus must be non-negative. - * Hence we have - * - * ( 0 <= new_min_clean_size ). - * - * by definition. - */ - assert(new_min_clean_size <= new_max_cache_size); - assert(cache_ptr->resize_ctl.min_size <= new_max_cache_size); - assert(new_max_cache_size <= cache_ptr->resize_ctl.max_size); - - cache_ptr->max_cache_size = new_max_cache_size; - cache_ptr->min_clean_size = new_min_clean_size; - - if (status == increase) - cache_ptr->cache_full = FALSE; - else if (status == decrease) - cache_ptr->size_decreased = TRUE; - - /* update flash cache size increase fields as appropriate */ - if (cache_ptr->flash_size_increase_possible) { - switch (cache_ptr->resize_ctl.flash_incr_mode) { - case H5C_flash_incr__off: - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, - "flash_size_increase_possible but H5C_flash_incr__off?!") - break; - - case H5C_flash_incr__add_space: - cache_ptr->flash_size_increase_threshold = - (size_t)(((double)(cache_ptr->max_cache_size)) * - (cache_ptr->resize_ctl.flash_threshold)); - break; - - default: /* should be unreachable */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?") - break; - } - } - } - - if (cache_ptr->resize_ctl.rpt_fcn != NULL) - (cache_ptr->resize_ctl.rpt_fcn)(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status, - old_max_cache_size, new_max_cache_size, old_min_clean_size, - new_min_clean_size); - - if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0) - /* this should be impossible... */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed") - -done: - /* Sanity checks */ - assert(cache_ptr->resize_in_progress); - if (!reentrant_call) - cache_ptr->resize_in_progress = FALSE; - assert((!reentrant_call) || (cache_ptr->resize_in_progress)); - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__auto_adjust_cache_size() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__autoadjust__ageout - * - * Purpose: Implement the ageout automatic cache size decrement - * algorithm. Note that while this code evicts aged out - * entries, the code does not change the maximum cache size. - * Instead, the function simply computes the new value (if - * any change is indicated) and reports this value in - * *new_max_cache_size_ptr. - * - * Return: Non-negative on success/Negative on failure or if there was - * an attempt to flush a protected item. - * - * - * Programmer: John Mainzer, 11/18/04 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *status_ptr, - size_t *new_max_cache_size_ptr, hbool_t write_permitted) -{ - H5C_t *cache_ptr = f->shared->cache; - size_t test_size; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - assert(f); - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert((status_ptr) && (*status_ptr == in_spec)); - assert((new_max_cache_size_ptr) && (*new_max_cache_size_ptr == 0)); - - /* remove excess epoch markers if any */ - if (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction) - if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers") - - if ((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) || - ((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold) && - (hit_rate >= cache_ptr->resize_ctl.upper_hr_threshold))) { - - if (cache_ptr->max_cache_size > cache_ptr->resize_ctl.min_size) { - /* evict aged out cache entries if appropriate... */ - if (H5C__autoadjust__ageout__evict_aged_out_entries(f, write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries") - - /* ... and then reduce cache size if appropriate */ - if (cache_ptr->index_size < cache_ptr->max_cache_size) { - if (cache_ptr->resize_ctl.apply_empty_reserve) { - test_size = - (size_t)(((double)cache_ptr->index_size) / (1 - cache_ptr->resize_ctl.empty_reserve)); - if (test_size < cache_ptr->max_cache_size) { - *status_ptr = decrease; - *new_max_cache_size_ptr = test_size; - } - } - else { - *status_ptr = decrease; - *new_max_cache_size_ptr = cache_ptr->index_size; - } - - if (*status_ptr == decrease) { - /* clip to min size if necessary */ - if (*new_max_cache_size_ptr < cache_ptr->resize_ctl.min_size) - *new_max_cache_size_ptr = cache_ptr->resize_ctl.min_size; - - /* clip to max decrement if necessary */ - if ((cache_ptr->resize_ctl.apply_max_decrement) && - ((cache_ptr->resize_ctl.max_decrement + *new_max_cache_size_ptr) < - cache_ptr->max_cache_size)) - *new_max_cache_size_ptr = - cache_ptr->max_cache_size - cache_ptr->resize_ctl.max_decrement; - } - } - } - else - *status_ptr = at_min_size; - } - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__autoadjust__ageout() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__autoadjust__ageout__cycle_epoch_marker - * - * Purpose: Remove the oldest epoch marker from the LRU list, - * and reinsert it at the head of the LRU list. Also - * remove the epoch marker's index from the head of the - * ring buffer, and re-insert it at the tail of the ring - * buffer. - * - * Return: SUCCEED on success/FAIL on failure. - * - * Programmer: John Mainzer, 11/22/04 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr) -{ - int i; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - if (cache_ptr->epoch_markers_active <= 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "No active epoch markers on entry?!?!?") - - /* remove the last marker from both the ring buffer and the LRU list */ - i = cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_first]; - cache_ptr->epoch_marker_ringbuf_first = - (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1); - if (cache_ptr->epoch_marker_ringbuf_size <= 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow") - - cache_ptr->epoch_marker_ringbuf_size -= 1; - if (cache_ptr->epoch_marker_active[i] != TRUE) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") - - H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr, - (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, (FAIL)) - - /* now, re-insert it at the head of the LRU list, and at the tail of - * the ring buffer. - */ - assert(cache_ptr->epoch_markers[i].addr == (haddr_t)i); - assert(cache_ptr->epoch_markers[i].next == NULL); - assert(cache_ptr->epoch_markers[i].prev == NULL); - - cache_ptr->epoch_marker_ringbuf_last = - (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1); - cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_last] = i; - if (cache_ptr->epoch_marker_ringbuf_size >= H5C__MAX_EPOCH_MARKERS) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow") - - cache_ptr->epoch_marker_ringbuf_size += 1; - - H5C__DLL_PREPEND(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, - cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) -done: - - FUNC_LEAVE_NOAPI(ret_value) - -} /* H5C__autoadjust__ageout__cycle_epoch_marker() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__autoadjust__ageout__evict_aged_out_entries - * - * Purpose: Evict clean entries in the cache that haven't - * been accessed for at least - * cache_ptr->resize_ctl.epochs_before_eviction epochs, - * and flush dirty entries that haven't been accessed for - * that amount of time. - * - * Depending on configuration, the function will either - * flush or evict all such entries, or all such entries it - * encounters until it has freed the maximum amount of space - * allowed under the maximum decrement. - * - * If we are running in parallel mode, writes may not be - * permitted. If so, the function simply skips any dirty - * entries it may encounter. - * - * The function makes no attempt to maintain the minimum - * clean size, as there is no guarantee that the cache size - * will be changed. - * - * If there is no cache size change, the minimum clean size - * constraint will be met through a combination of clean - * entries and free space in the cache. - * - * If there is a cache size reduction, the minimum clean size - * will be re-calculated, and will be enforced the next time - * we have to make space in the cache. - * - * Return: Non-negative on success/Negative on failure. - * - * Programmer: John Mainzer, 11/22/04 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted) -{ - H5C_t *cache_ptr = f->shared->cache; - size_t eviction_size_limit; - size_t bytes_evicted = 0; - hbool_t prev_is_dirty = FALSE; - hbool_t restart_scan; - H5C_cache_entry_t *entry_ptr; - H5C_cache_entry_t *next_ptr; - H5C_cache_entry_t *prev_ptr; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - assert(f); - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - /* if there is a limit on the amount that the cache size can be decrease - * in any one round of the cache size reduction algorithm, load that - * limit into eviction_size_limit. Otherwise, set eviction_size_limit - * to the equivalent of infinity. The current size of the index will - * do nicely. - */ - if (cache_ptr->resize_ctl.apply_max_decrement) - eviction_size_limit = cache_ptr->resize_ctl.max_decrement; - else - eviction_size_limit = cache_ptr->index_size; /* i.e. infinity */ - - if (write_permitted) { - restart_scan = FALSE; - entry_ptr = cache_ptr->LRU_tail_ptr; - while (entry_ptr != NULL && entry_ptr->type->id != H5AC_EPOCH_MARKER_ID && - bytes_evicted < eviction_size_limit) { - hbool_t skipping_entry = FALSE; - - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(!(entry_ptr->is_protected)); - assert(!(entry_ptr->is_read_only)); - assert((entry_ptr->ro_ref_count) == 0); - - next_ptr = entry_ptr->next; - prev_ptr = entry_ptr->prev; - - if (prev_ptr != NULL) - prev_is_dirty = prev_ptr->is_dirty; - - if (entry_ptr->is_dirty) { - assert(!entry_ptr->prefetched_dirty); - - /* dirty corked entry is skipped */ - if (entry_ptr->tag_info && entry_ptr->tag_info->corked) - skipping_entry = TRUE; - else { - /* reset entries_removed_counter and - * last_entry_removed_ptr prior to the call to - * H5C__flush_single_entry() so that we can spot - * unexpected removals of entries from the cache, - * and set the restart_scan flag if proceeding - * would be likely to cause us to scan an entry - * that is no longer in the cache. - */ - cache_ptr->entries_removed_counter = 0; - cache_ptr->last_entry_removed_ptr = NULL; - - if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") - - if (cache_ptr->entries_removed_counter > 1 || - cache_ptr->last_entry_removed_ptr == prev_ptr) - restart_scan = TRUE; - } /* end else */ - } /* end if */ - else if (!entry_ptr->prefetched_dirty) { - bytes_evicted += entry_ptr->size; - - if (H5C__flush_single_entry( - f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") - } /* end else-if */ - else { - assert(!entry_ptr->is_dirty); - assert(entry_ptr->prefetched_dirty); - - skipping_entry = TRUE; - } /* end else */ - - if (prev_ptr != NULL) { - if (skipping_entry) - entry_ptr = prev_ptr; - else if (restart_scan || (prev_ptr->is_dirty != prev_is_dirty) || - (prev_ptr->next != next_ptr) || (prev_ptr->is_protected) || (prev_ptr->is_pinned)) { - /* Something has happened to the LRU -- start over - * from the tail. - */ - restart_scan = FALSE; - entry_ptr = cache_ptr->LRU_tail_ptr; - - H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr) - } /* end else-if */ - else - entry_ptr = prev_ptr; - } /* end if */ - else - entry_ptr = NULL; - } /* end while */ - - /* for now at least, don't bother to maintain the minimum clean size, - * as the cache should now be less than its maximum size. Due to - * the vaguries of the cache size reduction algorithm, we may not - * reduce the size of the cache. - * - * If we do, we will calculate a new minimum clean size, which will - * be enforced the next time we try to make space in the cache. - * - * If we don't, no action is necessary, as we have just evicted and/or - * or flushed a bunch of entries and therefore the sum of the clean - * and free space in the cache must be greater than or equal to the - * min clean space requirement (assuming that requirement was met on - * entry). - */ - } /* end if */ - else /* ! write_permitted */ { - /* Since we are not allowed to write, all we can do is evict - * any clean entries that we may encounter before we either - * hit the eviction size limit, or encounter the epoch marker. - * - * If we are operating read only, this isn't an issue, as there - * will not be any dirty entries. - * - * If we are operating in R/W mode, all the dirty entries we - * skip will be flushed the next time we attempt to make space - * when writes are permitted. This may have some local - * performance implications, but it shouldn't cause any net - * slowdown. - */ - assert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS); - entry_ptr = cache_ptr->LRU_tail_ptr; - while (entry_ptr != NULL && ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) && - (bytes_evicted < eviction_size_limit)) { - assert(!(entry_ptr->is_protected)); - - prev_ptr = entry_ptr->prev; - - if (!(entry_ptr->is_dirty) && !(entry_ptr->prefetched_dirty)) - if (H5C__flush_single_entry( - f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry") - - /* just skip the entry if it is dirty, as we can't do - * anything with it now since we can't write. - * - * Since all entries are clean, serialize() will not be called, - * and thus we needn't test to see if the LRU has been changed - * out from under us. - */ - entry_ptr = prev_ptr; - } /* end while */ - } /* end else */ - - if (cache_ptr->index_size < cache_ptr->max_cache_size) - cache_ptr->cache_full = FALSE; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__autoadjust__ageout__evict_aged_out_entries() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__autoadjust__ageout__insert_new_marker - * - * Purpose: Find an unused marker cache entry, mark it as used, and - * insert it at the head of the LRU list. Also add the - * marker's index in the epoch_markers array. - * - * Return: SUCCEED on success/FAIL on failure. - * - * Programmer: John Mainzer, 11/19/04 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr) -{ - int i; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - if (cache_ptr->epoch_markers_active >= cache_ptr->resize_ctl.epochs_before_eviction) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Already have a full complement of markers") - - /* find an unused marker */ - i = 0; - while ((cache_ptr->epoch_marker_active)[i] && i < H5C__MAX_EPOCH_MARKERS) - i++; - if (i >= H5C__MAX_EPOCH_MARKERS) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker") - - assert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i); - assert(((cache_ptr->epoch_markers)[i]).next == NULL); - assert(((cache_ptr->epoch_markers)[i]).prev == NULL); - - (cache_ptr->epoch_marker_active)[i] = TRUE; - - cache_ptr->epoch_marker_ringbuf_last = - (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1); - (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i; - if (cache_ptr->epoch_marker_ringbuf_size >= H5C__MAX_EPOCH_MARKERS) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow") - - cache_ptr->epoch_marker_ringbuf_size += 1; - - H5C__DLL_PREPEND(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, - cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) - - cache_ptr->epoch_markers_active += 1; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__autoadjust__ageout__insert_new_marker() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__autoadjust__ageout__remove_all_markers - * - * Purpose: Remove all epoch markers from the LRU list and mark them - * as inactive. - * - * Return: SUCCEED on success/FAIL on failure. - * - * Programmer: John Mainzer, 11/22/04 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr) -{ - int ring_buf_index; - int i; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - while (cache_ptr->epoch_markers_active > 0) { - /* get the index of the last epoch marker in the LRU list - * and remove it from the ring buffer. - */ - - ring_buf_index = cache_ptr->epoch_marker_ringbuf_first; - i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index]; - - cache_ptr->epoch_marker_ringbuf_first = - (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1); - - if (cache_ptr->epoch_marker_ringbuf_size <= 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow") - cache_ptr->epoch_marker_ringbuf_size -= 1; - - if (cache_ptr->epoch_marker_active[i] != TRUE) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") - - /* remove the epoch marker from the LRU list */ - H5C__DLL_REMOVE(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, - cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) - - /* mark the epoch marker as unused. */ - cache_ptr->epoch_marker_active[i] = FALSE; - - assert(cache_ptr->epoch_markers[i].addr == (haddr_t)i); - assert(cache_ptr->epoch_markers[i].next == NULL); - assert(cache_ptr->epoch_markers[i].prev == NULL); - - /* decrement the number of active epoch markers */ - cache_ptr->epoch_markers_active -= 1; - - assert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size); - } - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__autoadjust__ageout__remove_all_markers() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__autoadjust__ageout__remove_excess_markers - * - * Purpose: Remove epoch markers from the end of the LRU list and - * mark them as inactive until the number of active markers - * equals the current value of - * cache_ptr->resize_ctl.epochs_before_eviction. - * - * Return: SUCCEED on success/FAIL on failure. - * - * Programmer: John Mainzer, 11/19/04 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr) -{ - int ring_buf_index; - int i; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - if (cache_ptr->epoch_markers_active <= cache_ptr->resize_ctl.epochs_before_eviction) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry") - - while (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction) { - /* get the index of the last epoch marker in the LRU list - * and remove it from the ring buffer. - */ - ring_buf_index = cache_ptr->epoch_marker_ringbuf_first; - i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index]; - - cache_ptr->epoch_marker_ringbuf_first = - (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1); - - if (cache_ptr->epoch_marker_ringbuf_size <= 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow") - cache_ptr->epoch_marker_ringbuf_size -= 1; - - if (cache_ptr->epoch_marker_active[i] != TRUE) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") - - /* remove the epoch marker from the LRU list */ - H5C__DLL_REMOVE(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, - cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) - - /* mark the epoch marker as unused. */ - cache_ptr->epoch_marker_active[i] = FALSE; - - assert(cache_ptr->epoch_markers[i].addr == (haddr_t)i); - assert(cache_ptr->epoch_markers[i].next == NULL); - assert(cache_ptr->epoch_markers[i].prev == NULL); - - /* decrement the number of active epoch markers */ - cache_ptr->epoch_markers_active -= 1; - - assert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size); - } - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__autoadjust__ageout__remove_excess_markers() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__flash_increase_cache_size - * - * Purpose: If there is not at least new_entry_size - old_entry_size - * bytes of free space in the cache and the current - * max_cache_size is less than cache_ptr->resize_ctl.max_size, - * perform a flash increase in the cache size and then reset - * the full cache hit rate statistics, and exit. - * - * Return: Non-negative on success/Negative on failure. - * - * Programmer: John Mainzer, 12/31/07 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t new_entry_size) -{ - size_t new_max_cache_size = 0; - size_t old_max_cache_size = 0; - size_t new_min_clean_size = 0; - size_t old_min_clean_size = 0; - size_t space_needed; - enum H5C_resize_status status = flash_increase; /* may change */ - double hit_rate; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(cache_ptr->flash_size_increase_possible); - assert(new_entry_size > cache_ptr->flash_size_increase_threshold); - assert(old_entry_size < new_entry_size); - - if (old_entry_size >= new_entry_size) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "old_entry_size >= new_entry_size") - - space_needed = new_entry_size - old_entry_size; - if (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) && - (cache_ptr->max_cache_size < cache_ptr->resize_ctl.max_size)) { - switch (cache_ptr->resize_ctl.flash_incr_mode) { - case H5C_flash_incr__off: - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, - "flash_size_increase_possible but H5C_flash_incr__off?!") - break; - - case H5C_flash_incr__add_space: - if (cache_ptr->index_size < cache_ptr->max_cache_size) { - assert((cache_ptr->max_cache_size - cache_ptr->index_size) < space_needed); - space_needed -= cache_ptr->max_cache_size - cache_ptr->index_size; - } - space_needed = (size_t)(((double)space_needed) * cache_ptr->resize_ctl.flash_multiple); - new_max_cache_size = cache_ptr->max_cache_size + space_needed; - break; - - default: /* should be unreachable */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?") - break; - } - - if (new_max_cache_size > cache_ptr->resize_ctl.max_size) - new_max_cache_size = cache_ptr->resize_ctl.max_size; - assert(new_max_cache_size > cache_ptr->max_cache_size); - - new_min_clean_size = (size_t)((double)new_max_cache_size * cache_ptr->resize_ctl.min_clean_fraction); - assert(new_min_clean_size <= new_max_cache_size); - - old_max_cache_size = cache_ptr->max_cache_size; - old_min_clean_size = cache_ptr->min_clean_size; - - cache_ptr->max_cache_size = new_max_cache_size; - cache_ptr->min_clean_size = new_min_clean_size; - - /* update flash cache size increase fields as appropriate */ - assert(cache_ptr->flash_size_increase_possible); - - switch (cache_ptr->resize_ctl.flash_incr_mode) { - case H5C_flash_incr__off: - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, - "flash_size_increase_possible but H5C_flash_incr__off?!") - break; - - case H5C_flash_incr__add_space: - cache_ptr->flash_size_increase_threshold = - (size_t)((double)cache_ptr->max_cache_size * cache_ptr->resize_ctl.flash_threshold); - break; - - default: /* should be unreachable */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?") - break; - } - - /* note that we don't cycle the epoch markers. We can - * argue either way as to whether we should, but for now - * we don't. - */ - - if (cache_ptr->resize_ctl.rpt_fcn != NULL) { - /* get the hit rate for the reporting function. Should still - * be good as we haven't reset the hit rate statistics. - */ - if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate") - - (cache_ptr->resize_ctl.rpt_fcn)(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status, - old_max_cache_size, new_max_cache_size, old_min_clean_size, - new_min_clean_size); - } - - if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0) - /* this should be impossible... */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed") - } - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__flash_increase_cache_size() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__flush_invalidate_cache - * - * Purpose: Flush and destroy the entries contained in the target - * cache. - * - * If the cache contains protected entries, the function will - * fail, as protected entries cannot be either flushed or - * destroyed. However all unprotected entries should be - * flushed and destroyed before the function returns failure. - * - * While pinned entries can usually be flushed, they cannot - * be destroyed. However, they should be unpinned when all - * the entries that reference them have been destroyed (thus - * reduding the pinned entry's reference count to 0, allowing - * it to be unpinned). - * - * If pinned entries are present, the function makes repeated - * passes through the cache, flushing all dirty entries - * (including the pinned dirty entries where permitted) and - * destroying all unpinned entries. This process is repeated - * until either the cache is empty, or the number of pinned - * entries stops decreasing on each pass. - * - * Return: Non-negative on success/Negative on failure or if there was - * a request to flush all items and something was protected. - * - * Programmer: John Mainzer - * 3/24/05 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__flush_invalidate_cache(H5F_t *f, unsigned flags) -{ - H5C_t *cache_ptr; - H5C_ring_t ring; - herr_t ret_value = SUCCEED; - - FUNC_ENTER_PACKAGE - - assert(f); - assert(f->shared); - cache_ptr = f->shared->cache; - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(cache_ptr->slist_ptr); - assert(cache_ptr->slist_enabled); - -#ifdef H5C_DO_SANITY_CHECKS - { - int32_t i; - uint32_t index_len = 0; - uint32_t slist_len = 0; - size_t index_size = (size_t)0; - size_t clean_index_size = (size_t)0; - size_t dirty_index_size = (size_t)0; - size_t slist_size = (size_t)0; - - assert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0); - assert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); - assert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); - assert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); - assert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0); - assert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0); - - for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) { - index_len += cache_ptr->index_ring_len[i]; - index_size += cache_ptr->index_ring_size[i]; - clean_index_size += cache_ptr->clean_index_ring_size[i]; - dirty_index_size += cache_ptr->dirty_index_ring_size[i]; - - slist_len += cache_ptr->slist_ring_len[i]; - slist_size += cache_ptr->slist_ring_size[i]; - } /* end for */ - - assert(cache_ptr->index_len == index_len); - assert(cache_ptr->index_size == index_size); - assert(cache_ptr->clean_index_size == clean_index_size); - assert(cache_ptr->dirty_index_size == dirty_index_size); - assert(cache_ptr->slist_len == slist_len); - assert(cache_ptr->slist_size == slist_size); - } -#endif /* H5C_DO_SANITY_CHECKS */ - - /* remove ageout markers if present */ - if (cache_ptr->epoch_markers_active > 0) - if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers") - - /* flush invalidate each ring, starting from the outermost ring and - * working inward. - */ - ring = H5C_RING_USER; - while (ring < H5C_RING_NTYPES) { - if (H5C__flush_invalidate_ring(f, ring, flags) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed") - ring++; - } /* end while */ - -#ifndef NDEBUG - /* Invariants, after destroying all entries in the hash table */ - if (!(flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG)) { - assert(cache_ptr->index_size == 0); - assert(cache_ptr->clean_index_size == 0); - assert(cache_ptr->pel_len == 0); - assert(cache_ptr->pel_size == 0); - } /* end if */ - else { - H5C_cache_entry_t *entry_ptr; /* Cache entry */ - unsigned u; /* Local index variable */ - - /* All rings except ring 4 should be empty now */ - /* (Ring 4 has the superblock) */ - for (u = H5C_RING_USER; u < H5C_RING_SB; u++) { - assert(cache_ptr->index_ring_len[u] == 0); - assert(cache_ptr->index_ring_size[u] == 0); - assert(cache_ptr->clean_index_ring_size[u] == 0); - } /* end for */ - - /* Check that any remaining pinned entries are in the superblock ring */ - entry_ptr = cache_ptr->pel_head_ptr; - while (entry_ptr) { - /* Check ring */ - assert(entry_ptr->ring == H5C_RING_SB); - - /* Advance to next entry in pinned entry list */ - entry_ptr = entry_ptr->next; - } /* end while */ - } /* end else */ - - assert(cache_ptr->dirty_index_size == 0); - assert(cache_ptr->slist_len == 0); - assert(cache_ptr->slist_size == 0); - assert(cache_ptr->pl_len == 0); - assert(cache_ptr->pl_size == 0); - assert(cache_ptr->LRU_list_len == 0); - assert(cache_ptr->LRU_list_size == 0); -#endif /* NDEBUG */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__flush_invalidate_cache() */ - -/*------------------------------------------------------------------------- - * Function: H5C__flush_invalidate_ring - * - * Purpose: Flush and destroy the entries contained in the target - * cache and ring. - * - * If the ring contains protected entries, the function will - * fail, as protected entries cannot be either flushed or - * destroyed. However all unprotected entries should be - * flushed and destroyed before the function returns failure. - * - * While pinned entries can usually be flushed, they cannot - * be destroyed. However, they should be unpinned when all - * the entries that reference them have been destroyed (thus - * reduding the pinned entry's reference count to 0, allowing - * it to be unpinned). - * - * If pinned entries are present, the function makes repeated - * passes through the cache, flushing all dirty entries - * (including the pinned dirty entries where permitted) and - * destroying all unpinned entries. This process is repeated - * until either the cache is empty, or the number of pinned - * entries stops decreasing on each pass. - * - * If flush dependencies appear in the target ring, the - * function makes repeated passes through the cache flushing - * entries in flush dependency order. - * - * Return: Non-negative on success/Negative on failure or if there was - * a request to flush all items and something was protected. - * - * Programmer: John Mainzer - * 9/1/15 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) -{ - H5C_t *cache_ptr; - hbool_t restart_slist_scan; - uint32_t protected_entries = 0; - int32_t i; - uint32_t cur_ring_pel_len; - uint32_t old_ring_pel_len; - unsigned cooked_flags; - unsigned evict_flags; - H5SL_node_t *node_ptr = NULL; - H5C_cache_entry_t *entry_ptr = NULL; - H5C_cache_entry_t *next_entry_ptr = NULL; -#ifdef H5C_DO_SANITY_CHECKS - uint32_t initial_slist_len = 0; - size_t initial_slist_size = 0; -#endif /* H5C_DO_SANITY_CHECKS */ - herr_t ret_value = SUCCEED; - - FUNC_ENTER_PACKAGE - - assert(f); - assert(f->shared); - - cache_ptr = f->shared->cache; - - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(cache_ptr->slist_enabled); - assert(cache_ptr->slist_ptr); - assert(ring > H5C_RING_UNDEFINED); - assert(ring < H5C_RING_NTYPES); - - assert(cache_ptr->epoch_markers_active == 0); - - /* Filter out the flags that are not relevant to the flush/invalidate. - */ - cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG; - evict_flags = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG; - - /* The flush procedure here is a bit strange. - * - * In the outer while loop we make at least one pass through the - * cache, and then repeat until either all the pinned entries in - * the ring unpin themselves, or until the number of pinned entries - * in the ring stops declining. In this later case, we scream and die. - * - * Since the fractal heap can dirty, resize, and/or move entries - * in is flush callback, it is possible that the cache will still - * contain dirty entries at this point. If so, we must make more - * passes through the skip list to allow it to empty. - * - * Further, since clean entries can be dirtied, resized, and/or moved - * as the result of a flush call back (either the entries own, or that - * for some other cache entry), we can no longer promise to flush - * the cache entries in increasing address order. - * - * Instead, we make a pass through - * the skip list, and then a pass through the "clean" entries, and - * then repeating as needed. Thus it is quite possible that an - * entry will be evicted from the cache only to be re-loaded later - * in the flush process. - * - * The bottom line is that entries will probably be flushed in close - * to increasing address order, but there are no guarantees. - */ - - /* compute the number of pinned entries in this ring */ - entry_ptr = cache_ptr->pel_head_ptr; - cur_ring_pel_len = 0; - while (entry_ptr != NULL) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(entry_ptr->ring >= ring); - if (entry_ptr->ring == ring) - cur_ring_pel_len++; - - entry_ptr = entry_ptr->next; - } /* end while */ - old_ring_pel_len = cur_ring_pel_len; - - while (cache_ptr->index_ring_len[ring] > 0) { - /* first, try to flush-destroy any dirty entries. Do this by - * making a scan through the slist. Note that new dirty entries - * may be created by the flush call backs. Thus it is possible - * that the slist will not be empty after we finish the scan. - */ - -#ifdef H5C_DO_SANITY_CHECKS - /* Depending on circumstances, H5C__flush_single_entry() will - * remove dirty entries from the slist as it flushes them. - * Thus for sanity checks we must make note of the initial - * slist length and size before we do any flushes. - */ - initial_slist_len = cache_ptr->slist_len; - initial_slist_size = cache_ptr->slist_size; - - /* There is also the possibility that entries will be - * dirtied, resized, moved, and/or removed from the cache - * as the result of calls to the flush callbacks. We use - * the slist_len_increase and slist_size_increase increase - * fields in struct H5C_t to track these changes for purpose - * of sanity checking. - * - * To this end, we must zero these fields before we start - * the pass through the slist. - */ - cache_ptr->slist_len_increase = 0; - cache_ptr->slist_size_increase = 0; -#endif /* H5C_DO_SANITY_CHECKS */ - - /* Set the cache_ptr->slist_changed to false. - * - * This flag is set to TRUE by H5C__flush_single_entry if the slist - * is modified by a pre_serialize, serialize, or notify callback. - * - * H5C__flush_invalidate_ring() uses this flag to detect any - * modifications to the slist that might corrupt the scan of - * the slist -- and restart the scan in this event. - */ - cache_ptr->slist_changed = FALSE; - - /* this done, start the scan of the slist */ - restart_slist_scan = TRUE; - while (restart_slist_scan || (node_ptr != NULL)) { - if (restart_slist_scan) { - restart_slist_scan = FALSE; - - /* Start at beginning of skip list */ - node_ptr = H5SL_first(cache_ptr->slist_ptr); - if (node_ptr == NULL) - /* the slist is empty -- break out of inner loop */ - break; - - /* Get cache entry for this node */ - next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - if (NULL == next_entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") - - assert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(next_entry_ptr->is_dirty); - assert(next_entry_ptr->in_slist); - assert(next_entry_ptr->ring >= ring); - } /* end if */ - - entry_ptr = next_entry_ptr; - - /* It is possible that entries will be dirtied, resized, - * flushed, or removed from the cache via the take ownership - * flag as the result of pre_serialize or serialized callbacks. - * - * This in turn can corrupt the scan through the slist. - * - * We test for slist modifications in the pre_serialize - * and serialize callbacks, and restart the scan of the - * slist if we find them. However, best we do some extra - * sanity checking just in case. - */ - assert(entry_ptr != NULL); - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(entry_ptr->in_slist); - assert(entry_ptr->is_dirty); - assert(entry_ptr->ring >= ring); - - /* increment node pointer now, before we delete its target - * from the slist. - */ - node_ptr = H5SL_next(node_ptr); - if (node_ptr != NULL) { - next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - if (NULL == next_entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") - - assert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(next_entry_ptr->is_dirty); - assert(next_entry_ptr->in_slist); - assert(next_entry_ptr->ring >= ring); - assert(entry_ptr != next_entry_ptr); - } /* end if */ - else - next_entry_ptr = NULL; - - /* Note that we now remove nodes from the slist as we flush - * the associated entries, instead of leaving them there - * until we are done, and then destroying all nodes in - * the slist. - * - * While this optimization used to be easy, with the possibility - * of new entries being added to the slist in the midst of the - * flush, we must keep the slist in canonical form at all - * times. - */ - if (((!entry_ptr->flush_me_last) || - ((entry_ptr->flush_me_last) && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) && - (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) { - if (entry_ptr->is_protected) { - /* We have major problems -- but lets flush - * everything we can before we flag an error. - */ - protected_entries++; - } /* end if */ - else if (entry_ptr->is_pinned) { - if (H5C__flush_single_entry(f, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed") - - if (cache_ptr->slist_changed) { - /* The slist has been modified by something - * other than the simple removal of the - * of the flushed entry after the flush. - * - * This has the potential to corrupt the - * scan through the slist, so restart it. - */ - restart_slist_scan = TRUE; - cache_ptr->slist_changed = FALSE; - H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr); - } /* end if */ - } /* end else-if */ - else { - if (H5C__flush_single_entry(f, entry_ptr, - (cooked_flags | H5C__DURING_FLUSH_FLAG | - H5C__FLUSH_INVALIDATE_FLAG | - H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed") - - if (cache_ptr->slist_changed) { - /* The slist has been modified by something - * other than the simple removal of the - * of the flushed entry after the flush. - * - * This has the potential to corrupt the - * scan through the slist, so restart it. - */ - restart_slist_scan = TRUE; - cache_ptr->slist_changed = FALSE; - H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr) - } /* end if */ - } /* end else */ - } /* end if */ - } /* end while loop scanning skip list */ - -#ifdef H5C_DO_SANITY_CHECKS - /* It is possible that entries were added to the slist during - * the scan, either before or after scan pointer. The following - * asserts take this into account. - * - * Don't bother with the sanity checks if node_ptr != NULL, as - * in this case we broke out of the loop because it got changed - * out from under us. - */ - - if (node_ptr == NULL) { - assert(cache_ptr->slist_len == - (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase)); - assert(cache_ptr->slist_size == - (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase)); - } /* end if */ -#endif /* H5C_DO_SANITY_CHECKS */ - - /* Since we are doing a destroy, we must make a pass through - * the hash table and try to flush - destroy all entries that - * remain. - * - * It used to be that all entries remaining in the cache at - * this point had to be clean, but with the fractal heap mods - * this may not be the case. If so, we will flush entries out - * in increasing address order. - * - * Writes to disk are possible here. - */ - - /* Reset the counters so that we can detect insertions, loads, - * and moves caused by the pre_serialize and serialize calls. - */ - cache_ptr->entries_loaded_counter = 0; - cache_ptr->entries_inserted_counter = 0; - cache_ptr->entries_relocated_counter = 0; - - next_entry_ptr = cache_ptr->il_head; - while (next_entry_ptr != NULL) { - entry_ptr = next_entry_ptr; - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(entry_ptr->ring >= ring); - - next_entry_ptr = entry_ptr->il_next; - assert((next_entry_ptr == NULL) || (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC)); - - if (((!entry_ptr->flush_me_last) || - (entry_ptr->flush_me_last && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) && - (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) { - - if (entry_ptr->is_protected) { - /* we have major problems -- but lets flush and - * destroy everything we can before we flag an - * error. - */ - protected_entries++; - - if (!entry_ptr->in_slist) - assert(!(entry_ptr->is_dirty)); - } /* end if */ - else if (!entry_ptr->is_pinned) { - /* if *entry_ptr is dirty, it is possible - * that one or more other entries may be - * either removed from the cache, loaded - * into the cache, or moved to a new location - * in the file as a side effect of the flush. - * - * It's also possible that removing a clean - * entry will remove the last child of a proxy - * entry, allowing it to be removed also and - * invalidating the next_entry_ptr. - * - * If either of these happen, and one of the target - * or proxy entries happens to be the next entry in - * the hash bucket, we could either find ourselves - * either scanning a non-existent entry, scanning - * through a different bucket, or skipping an entry. - * - * Neither of these are good, so restart the - * the scan at the head of the hash bucket - * after the flush if we detect that the next_entry_ptr - * becomes invalid. - * - * This is not as inefficient at it might seem, - * as hash buckets typically have at most two - * or three entries. - */ - cache_ptr->entry_watched_for_removal = next_entry_ptr; - if (H5C__flush_single_entry(f, entry_ptr, - (cooked_flags | H5C__DURING_FLUSH_FLAG | - H5C__FLUSH_INVALIDATE_FLAG | - H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed") - - /* Restart the index list scan if necessary. Must - * do this if the next entry is evicted, and also if - * one or more entries are inserted, loaded, or moved - * as these operations can result in part of the scan - * being skipped -- which can cause a spurious failure - * if this results in the size of the pinned entry - * failing to decline during the pass. - */ - if (((NULL != next_entry_ptr) && (NULL == cache_ptr->entry_watched_for_removal)) || - (cache_ptr->entries_loaded_counter > 0) || - (cache_ptr->entries_inserted_counter > 0) || - (cache_ptr->entries_relocated_counter > 0)) { - - next_entry_ptr = cache_ptr->il_head; - - cache_ptr->entries_loaded_counter = 0; - cache_ptr->entries_inserted_counter = 0; - cache_ptr->entries_relocated_counter = 0; - - H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr) - } /* end if */ - else - cache_ptr->entry_watched_for_removal = NULL; - } /* end if */ - } /* end if */ - } /* end for loop scanning hash table */ - - /* We can't do anything if entries are pinned. The - * hope is that the entries will be unpinned as the - * result of destroys of entries that reference them. - * - * We detect this by noting the change in the number - * of pinned entries from pass to pass. If it stops - * shrinking before it hits zero, we scream and die. - */ - old_ring_pel_len = cur_ring_pel_len; - entry_ptr = cache_ptr->pel_head_ptr; - cur_ring_pel_len = 0; - - while (entry_ptr != NULL) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(entry_ptr->ring >= ring); - - if (entry_ptr->ring == ring) - cur_ring_pel_len++; - - entry_ptr = entry_ptr->next; - } /* end while */ - - /* Check if the number of pinned entries in the ring is positive, and - * it is not declining. Scream and die if so. - */ - if ((cur_ring_pel_len > 0) && (cur_ring_pel_len >= old_ring_pel_len)) { - /* Don't error if allowed to have pinned entries remaining */ - if (evict_flags) - HGOTO_DONE(TRUE) - - HGOTO_ERROR( - H5E_CACHE, H5E_CANTFLUSH, FAIL, - "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = %d, ring = %d", - (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring) - } /* end if */ - - assert(protected_entries == cache_ptr->pl_len); - - if ((protected_entries > 0) && (protected_entries == cache_ptr->index_len)) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, - "Only protected entries left in cache, protected_entries = %d", - (int)protected_entries) - } /* main while loop */ - - /* Invariants, after destroying all entries in the ring */ - for (i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++) { - assert(cache_ptr->index_ring_len[i] == 0); - assert(cache_ptr->index_ring_size[i] == (size_t)0); - assert(cache_ptr->clean_index_ring_size[i] == (size_t)0); - assert(cache_ptr->dirty_index_ring_size[i] == (size_t)0); - - assert(cache_ptr->slist_ring_len[i] == 0); - assert(cache_ptr->slist_ring_size[i] == (size_t)0); - } /* end for */ - - assert(protected_entries <= cache_ptr->pl_len); - - if (protected_entries > 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries") - else if (cur_ring_pel_len > 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__flush_invalidate_ring() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__flush_ring - * - * Purpose: Flush the entries contained in the specified cache and - * ring. All entries in rings outside the specified ring - * must have been flushed on entry. - * - * If the cache contains protected entries in the specified - * ring, the function will fail, as protected entries cannot - * be flushed. However all unprotected entries in the target - * ring should be flushed before the function returns failure. - * - * If flush dependencies appear in the target ring, the - * function makes repeated passes through the slist flushing - * entries in flush dependency order. - * - * Return: Non-negative on success/Negative on failure or if there was - * a request to flush all items and something was protected. - * - * Programmer: John Mainzer - * 9/1/15 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) -{ - H5C_t *cache_ptr = f->shared->cache; - hbool_t flushed_entries_last_pass; - hbool_t flush_marked_entries; - hbool_t ignore_protected; - hbool_t tried_to_flush_protected_entry = FALSE; - hbool_t restart_slist_scan; - uint32_t protected_entries = 0; - H5SL_node_t *node_ptr = NULL; - H5C_cache_entry_t *entry_ptr = NULL; - H5C_cache_entry_t *next_entry_ptr = NULL; -#ifdef H5C_DO_SANITY_CHECKS - uint32_t initial_slist_len = 0; - size_t initial_slist_size = 0; -#endif /* H5C_DO_SANITY_CHECKS */ - int i; - herr_t ret_value = SUCCEED; - - FUNC_ENTER_PACKAGE - - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(cache_ptr->slist_enabled); - assert(cache_ptr->slist_ptr); - assert((flags & H5C__FLUSH_INVALIDATE_FLAG) == 0); - assert(ring > H5C_RING_UNDEFINED); - assert(ring < H5C_RING_NTYPES); - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - ignore_protected = ((flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0); - flush_marked_entries = ((flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0); - - if (!flush_marked_entries) - for (i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++) - assert(cache_ptr->slist_ring_len[i] == 0); - - assert(cache_ptr->flush_in_progress); - - /* When we are only flushing marked entries, the slist will usually - * still contain entries when we have flushed everything we should. - * Thus we track whether we have flushed any entries in the last - * pass, and terminate if we haven't. - */ - flushed_entries_last_pass = TRUE; - - /* Set the cache_ptr->slist_changed to false. - * - * This flag is set to TRUE by H5C__flush_single_entry if the - * slist is modified by a pre_serialize, serialize, or notify callback. - * H5C_flush_cache uses this flag to detect any modifications - * to the slist that might corrupt the scan of the slist -- and - * restart the scan in this event. - */ - cache_ptr->slist_changed = FALSE; - - while ((cache_ptr->slist_ring_len[ring] > 0) && (protected_entries == 0) && (flushed_entries_last_pass)) { - flushed_entries_last_pass = FALSE; - -#ifdef H5C_DO_SANITY_CHECKS - /* For sanity checking, try to verify that the skip list has - * the expected size and number of entries at the end of each - * internal while loop (see below). - * - * Doing this get a bit tricky, as depending on flags, we may - * or may not flush all the entries in the slist. - * - * To make things more entertaining, with the advent of the - * fractal heap, the entry serialize callback can cause entries - * to be dirtied, resized, and/or moved. Also, the - * pre_serialize callback can result in an entry being - * removed from the cache via the take ownership flag. - * - * To deal with this, we first make note of the initial - * skip list length and size: - */ - initial_slist_len = cache_ptr->slist_len; - initial_slist_size = cache_ptr->slist_size; - - /* As mentioned above, there is the possibility that - * entries will be dirtied, resized, flushed, or removed - * from the cache via the take ownership flag during - * our pass through the skip list. To capture the number - * of entries added, and the skip list size delta, - * zero the slist_len_increase and slist_size_increase of - * the cache's instance of H5C_t. These fields will be - * updated elsewhere to account for slist insertions and/or - * dirty entry size changes. - */ - cache_ptr->slist_len_increase = 0; - cache_ptr->slist_size_increase = 0; - - /* at the end of the loop, use these values to compute the - * expected slist length and size and compare this with the - * value recorded in the cache's instance of H5C_t. - */ -#endif /* H5C_DO_SANITY_CHECKS */ - - restart_slist_scan = TRUE; - while ((restart_slist_scan) || (node_ptr != NULL)) { - if (restart_slist_scan) { - restart_slist_scan = FALSE; - - /* Start at beginning of skip list */ - node_ptr = H5SL_first(cache_ptr->slist_ptr); - if (node_ptr == NULL) - /* the slist is empty -- break out of inner loop */ - break; - - /* Get cache entry for this node */ - next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - if (NULL == next_entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") - - assert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(next_entry_ptr->is_dirty); - assert(next_entry_ptr->in_slist); - } /* end if */ - - entry_ptr = next_entry_ptr; - - /* With the advent of the fractal heap, the free space - * manager, and the version 3 cache, it is possible - * that the pre-serialize or serialize callback will - * dirty, resize, or take ownership of other entries - * in the cache. - * - * To deal with this, there is code to detect any - * change in the skip list not directly under the control - * of this function. If such modifications are detected, - * we must re-start the scan of the skip list to avoid - * the possibility that the target of the next_entry_ptr - * may have been flushed or deleted from the cache. - * - * To verify that all such possibilities have been dealt - * with, we do a bit of extra sanity checking on - * entry_ptr. - */ - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(entry_ptr->in_slist); - assert(entry_ptr->is_dirty); - - if (!flush_marked_entries || entry_ptr->flush_marker) - assert(entry_ptr->ring >= ring); - - /* Advance node pointer now, before we delete its target - * from the slist. - */ - node_ptr = H5SL_next(node_ptr); - if (node_ptr != NULL) { - next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - if (NULL == next_entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") - - assert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(next_entry_ptr->is_dirty); - assert(next_entry_ptr->in_slist); - - if (!flush_marked_entries || next_entry_ptr->flush_marker) - assert(next_entry_ptr->ring >= ring); - - assert(entry_ptr != next_entry_ptr); - } /* end if */ - else - next_entry_ptr = NULL; - - if ((!flush_marked_entries || entry_ptr->flush_marker) && - ((!entry_ptr->flush_me_last) || - ((entry_ptr->flush_me_last) && ((cache_ptr->num_last_entries >= cache_ptr->slist_len) || - (flush_marked_entries && entry_ptr->flush_marker)))) && - ((entry_ptr->flush_dep_nchildren == 0) || (entry_ptr->flush_dep_ndirty_children == 0)) && - (entry_ptr->ring == ring)) { - - assert(entry_ptr->flush_dep_nunser_children == 0); - - if (entry_ptr->is_protected) { - /* we probably have major problems -- but lets - * flush everything we can before we decide - * whether to flag an error. - */ - tried_to_flush_protected_entry = TRUE; - protected_entries++; - } /* end if */ - else { - if (H5C__flush_single_entry(f, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry") - - if (cache_ptr->slist_changed) { - /* The slist has been modified by something - * other than the simple removal of the - * of the flushed entry after the flush. - * - * This has the potential to corrupt the - * scan through the slist, so restart it. - */ - restart_slist_scan = TRUE; - cache_ptr->slist_changed = FALSE; - H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr) - } /* end if */ - - flushed_entries_last_pass = TRUE; - } /* end else */ - } /* end if */ - } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */ - -#ifdef H5C_DO_SANITY_CHECKS - /* Verify that the slist size and length are as expected. */ - assert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) == - cache_ptr->slist_len); - assert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) == - cache_ptr->slist_size); -#endif /* H5C_DO_SANITY_CHECKS */ - } /* while */ - - assert(protected_entries <= cache_ptr->pl_len); - - if (((cache_ptr->pl_len > 0) && !ignore_protected) || tried_to_flush_protected_entry) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items") - -#ifdef H5C_DO_SANITY_CHECKS - if (!flush_marked_entries) { - assert(cache_ptr->slist_ring_len[ring] == 0); - assert(cache_ptr->slist_ring_size[ring] == 0); - } /* end if */ -#endif /* H5C_DO_SANITY_CHECKS */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__flush_ring() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__flush_single_entry - * - * Purpose: Flush or clear (and evict if requested) the cache entry - * with the specified address and type. If the type is NULL, - * any unprotected entry at the specified address will be - * flushed (and possibly evicted). - * - * Attempts to flush a protected entry will result in an - * error. - * - * If the H5C__FLUSH_INVALIDATE_FLAG flag is set, the entry will - * be cleared and not flushed, and the call can't be part of a - * sequence of flushes. - * - * The function does nothing silently if there is no entry - * at the supplied address, or if the entry found has the - * wrong type. - * - * Return: Non-negative on success/Negative on failure or if there was - * an attempt to flush a protected item. - * - * Programmer: John Mainzer, 5/5/04 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) -{ - H5C_t *cache_ptr; /* Cache for file */ - hbool_t destroy; /* external flag */ - hbool_t clear_only; /* external flag */ - hbool_t free_file_space; /* external flag */ - hbool_t take_ownership; /* external flag */ - hbool_t del_from_slist_on_destroy; /* external flag */ - hbool_t during_flush; /* external flag */ - hbool_t write_entry; /* internal flag */ - hbool_t destroy_entry; /* internal flag */ - hbool_t generate_image; /* internal flag */ - hbool_t update_page_buffer; /* internal flag */ - hbool_t was_dirty; - hbool_t suppress_image_entry_writes = FALSE; - hbool_t suppress_image_entry_frees = FALSE; - haddr_t entry_addr = HADDR_UNDEF; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - assert(f); - cache_ptr = f->shared->cache; - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(entry_ptr); - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(entry_ptr->ring != H5C_RING_UNDEFINED); - assert(entry_ptr->type); - - /* setup external flags from the flags parameter */ - destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0); - clear_only = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0); - free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0); - take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0); - del_from_slist_on_destroy = ((flags & H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) != 0); - during_flush = ((flags & H5C__DURING_FLUSH_FLAG) != 0); - generate_image = ((flags & H5C__GENERATE_IMAGE_FLAG) != 0); - update_page_buffer = ((flags & H5C__UPDATE_PAGE_BUFFER_FLAG) != 0); - - /* Set the flag for destroying the entry, based on the 'take ownership' - * and 'destroy' flags - */ - if (take_ownership) - destroy_entry = FALSE; - else - destroy_entry = destroy; - - /* we will write the entry to disk if it exists, is dirty, and if the - * clear only flag is not set. - */ - if (entry_ptr->is_dirty && !clear_only) - write_entry = TRUE; - else - write_entry = FALSE; - - /* if we have received close warning, and we have been instructed to - * generate a metadata cache image, and we have actually constructed - * the entry images, set suppress_image_entry_frees to TRUE. - * - * Set suppress_image_entry_writes to TRUE if indicated by the - * image_ctl flags. - */ - if (cache_ptr->close_warning_received && cache_ptr->image_ctl.generate_image && - cache_ptr->num_entries_in_image > 0 && cache_ptr->image_entries != NULL) { - - /* Sanity checks */ - assert(entry_ptr->image_up_to_date || !(entry_ptr->include_in_image)); - assert(entry_ptr->image_ptr || !(entry_ptr->include_in_image)); - assert((!clear_only) || !(entry_ptr->include_in_image)); - assert((!take_ownership) || !(entry_ptr->include_in_image)); - assert((!free_file_space) || !(entry_ptr->include_in_image)); - - suppress_image_entry_frees = TRUE; - - if (cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES) - suppress_image_entry_writes = TRUE; - } /* end if */ - - /* run initial sanity checks */ -#ifdef H5C_DO_SANITY_CHECKS - if (cache_ptr->slist_enabled) { - if (entry_ptr->in_slist) { - assert(entry_ptr->is_dirty); - if (entry_ptr->flush_marker && !entry_ptr->is_dirty) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks") - } /* end if */ - else { - assert(!entry_ptr->is_dirty); - assert(!entry_ptr->flush_marker); - if (entry_ptr->is_dirty || entry_ptr->flush_marker) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks") - } /* end else */ - } - else { /* slist is disabled */ - assert(!entry_ptr->in_slist); - if (!entry_ptr->is_dirty) - if (entry_ptr->flush_marker) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush marked clean entry?") - } -#endif /* H5C_DO_SANITY_CHECKS */ - - if (entry_ptr->is_protected) - /* Attempt to flush a protected entry -- scream and die. */ - HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry") - - /* Set entry_ptr->flush_in_progress = TRUE and set - * entry_ptr->flush_marker = FALSE - * - * We will set flush_in_progress back to FALSE at the end if the - * entry still exists at that point. - */ - entry_ptr->flush_in_progress = TRUE; - entry_ptr->flush_marker = FALSE; - - /* Preserve current dirty state for later */ - was_dirty = entry_ptr->is_dirty; - - /* The entry is dirty, and we are doing a flush, a flush destroy or have - * been requested to generate an image. In those cases, serialize the - * entry. - */ - if (write_entry || generate_image) { - assert(entry_ptr->is_dirty); - if (NULL == entry_ptr->image_ptr) { - if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, - "memory allocation failed for on disk image buffer") - -#if H5C_DO_MEMORY_SANITY_CHECKS - H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, - H5C_IMAGE_EXTRA_SPACE); -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ - - } /* end if */ - - if (!entry_ptr->image_up_to_date) { - /* Sanity check */ - assert(!entry_ptr->prefetched); - - /* Generate the entry's image */ - if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image") - } /* end if ( ! (entry_ptr->image_up_to_date) ) */ - } /* end if */ - - /* Finally, write the image to disk. - * - * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the - * in the entry's type, we silently skip the write. This - * flag should only be used in test code. - */ - if (write_entry) { - assert(entry_ptr->is_dirty); - -#ifdef H5C_DO_SANITY_CHECKS - if (cache_ptr->check_write_permitted && !cache_ptr->write_permitted) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!") -#endif /* H5C_DO_SANITY_CHECKS */ - - /* Write the image to disk unless the write is suppressed. - * - * This happens if both suppress_image_entry_writes and - * entry_ptr->include_in_image are TRUE, or if the - * H5AC__CLASS_SKIP_WRITES is set in the entry's type. This - * flag should only be used in test code - */ - if ((!suppress_image_entry_writes || !entry_ptr->include_in_image) && - ((entry_ptr->type->flags & H5C__CLASS_SKIP_WRITES) == 0)) { - H5FD_mem_t mem_type = H5FD_MEM_DEFAULT; - -#ifdef H5_HAVE_PARALLEL - if (cache_ptr->coll_write_list) { - if (H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item") - } /* end if */ - else { -#endif /* H5_HAVE_PARALLEL */ - if (entry_ptr->prefetched) { - assert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); - mem_type = cache_ptr->class_table_ptr[entry_ptr->prefetch_type_id]->mem_type; - } /* end if */ - else - mem_type = entry_ptr->type->mem_type; - - if (H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file") -#ifdef H5_HAVE_PARALLEL - } -#endif /* H5_HAVE_PARALLEL */ - } /* end if */ - - /* if the entry has a notify callback, notify it that we have - * just flushed the entry. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_FLUSH, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client of entry flush") - } /* if ( write_entry ) */ - - /* At this point, all pre-serialize and serialize calls have been - * made if it was appropriate to make them. Similarly, the entry - * has been written to disk if desired. - * - * Thus it is now safe to update the cache data structures for the - * flush. - */ - - /* start by updating the statistics */ - if (clear_only) { - /* only log a clear if the entry was dirty */ - if (was_dirty) - H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) - } - else if (write_entry) { - assert(was_dirty); - - /* only log a flush if we actually wrote to disk */ - H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) - } /* end else if */ - - /* Note that the algorithm below is (very) similar to the set of operations - * in H5C_remove_entry() and should be kept in sync with changes - * to that code. - QAK, 2016/11/30 - */ - - /* Update the cache internal data structures. */ - if (destroy) { - /* Sanity checks */ - if (take_ownership) - assert(!destroy_entry); - else - assert(destroy_entry); - - assert(!entry_ptr->is_pinned); - - /* Update stats, while entry is still in the cache */ - H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) - - /* If the entry's type has a 'notify' callback and the entry is about - * to be removed from the cache, send a 'before eviction' notice while - * the entry is still fully integrated in the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict") - - /* Update the cache internal data structures as appropriate - * for a destroy. Specifically: - * - * 1) Delete it from the index - * - * 2) Delete it from the skip list if requested. - * - * 3) Delete it from the collective read access list. - * - * 4) Update the replacement policy for eviction - * - * 5) Remove it from the tag list for this object - * - * Finally, if the destroy_entry flag is set, discard the - * entry. - */ - H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL) - - if (entry_ptr->in_slist && del_from_slist_on_destroy) - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) - -#ifdef H5_HAVE_PARALLEL - /* Check for collective read access flag */ - if (entry_ptr->coll_access) { - entry_ptr->coll_access = FALSE; - H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL) - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ - - H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL) - - /* Remove entry from tag list */ - if (H5C__untag_entry(cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list") - - /* verify that the entry is no longer part of any flush dependencies */ - assert(entry_ptr->flush_dep_nparents == 0); - assert(entry_ptr->flush_dep_nchildren == 0); - } /* end if */ - else { - assert(clear_only || write_entry); - assert(entry_ptr->is_dirty); - assert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist)); - - /* We are either doing a flush or a clear. - * - * A clear and a flush are the same from the point of - * view of the replacement policy and the slist. - * Hence no differentiation between them. - */ - H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL) - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) - - /* mark the entry as clean and update the index for - * entry clean. Also, call the clear callback - * if defined. - */ - entry_ptr->is_dirty = FALSE; - - H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, FAIL); - - /* Check for entry changing status and do notifications, etc. */ - if (was_dirty) { - /* If the entry's type has a 'notify' callback send a - * 'entry cleaned' notice now that the entry is fully - * integrated into the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify client about entry dirty flag cleared") - - /* Propagate the clean flag up the flush dependency chain - * if appropriate - */ - if (entry_ptr->flush_dep_ndirty_children != 0) - assert(entry_ptr->flush_dep_ndirty_children == 0); - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_clean(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean flag") - } /* end if */ - } /* end else */ - - /* reset the flush_in progress flag */ - entry_ptr->flush_in_progress = FALSE; - - /* capture the cache entry address for the log_flush call at the - * end before the entry_ptr gets freed - */ - entry_addr = entry_ptr->addr; - - /* Internal cache data structures should now be up to date, and - * consistent with the status of the entry. - * - * Now discard the entry if appropriate. - */ - if (destroy) { - /* Sanity check */ - assert(0 == entry_ptr->flush_dep_nparents); - - /* if both suppress_image_entry_frees and entry_ptr->include_in_image - * are true, simply set entry_ptr->image_ptr to NULL, as we have - * another pointer to the buffer in an instance of H5C_image_entry_t - * in cache_ptr->image_entries. - * - * Otherwise, free the buffer if it exists. - */ - if (suppress_image_entry_frees && entry_ptr->include_in_image) - entry_ptr->image_ptr = NULL; - else if (entry_ptr->image_ptr != NULL) - entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr); - - /* If the entry is not a prefetched entry, verify that the flush - * dependency parents addresses array has been transferred. - * - * If the entry is prefetched, the free_isr routine will dispose of - * the flush dependency parents addresses array if necessary. - */ - if (!entry_ptr->prefetched) { - assert(0 == entry_ptr->fd_parent_count); - assert(NULL == entry_ptr->fd_parent_addrs); - } /* end if */ - - /* Check whether we should free the space in the file that - * the entry occupies - */ - if (free_file_space) { - hsize_t fsf_size; - - /* Sanity checks */ - assert(H5_addr_defined(entry_ptr->addr)); - assert(!H5F_IS_TMP_ADDR(f, entry_ptr->addr)); -#ifndef NDEBUG - { - size_t curr_len; - - /* Get the actual image size for the thing again */ - entry_ptr->type->image_len((void *)entry_ptr, &curr_len); - assert(curr_len == entry_ptr->size); - } -#endif /* NDEBUG */ - - /* If the file space free size callback is defined, use - * it to get the size of the block of file space to free. - * Otherwise use entry_ptr->size. - */ - if (entry_ptr->type->fsf_size) { - if ((entry_ptr->type->fsf_size)((void *)entry_ptr, &fsf_size) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to get file space free size") - } /* end if */ - else /* no file space free size callback -- use entry size */ - fsf_size = entry_ptr->size; - - /* Release the space on disk */ - if (H5MF_xfree(f, entry_ptr->type->mem_type, entry_ptr->addr, fsf_size) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free file space for cache entry") - } /* end if ( free_file_space ) */ - - /* Reset the pointer to the cache the entry is within. -QAK */ - entry_ptr->cache_ptr = NULL; - - /* increment entries_removed_counter and set - * last_entry_removed_ptr. As we are likely abuut to - * free the entry, recall that last_entry_removed_ptr - * must NEVER be dereferenced. - * - * Recall that these fields are maintained to allow functions - * that perform scans of lists of entries to detect the - * unexpected removal of entries (via expunge, eviction, - * or take ownership at present), so that they can re-start - * their scans if necessary. - * - * Also check if the entry we are watching for removal is being - * removed (usually the 'next' entry for an iteration) and reset - * it to indicate that it was removed. - */ - cache_ptr->entries_removed_counter++; - cache_ptr->last_entry_removed_ptr = entry_ptr; - - if (entry_ptr == cache_ptr->entry_watched_for_removal) - cache_ptr->entry_watched_for_removal = NULL; - - /* Check for actually destroying the entry in memory */ - /* (As opposed to taking ownership of it) */ - if (destroy_entry) { - if (entry_ptr->is_dirty) { - /* Reset dirty flag */ - entry_ptr->is_dirty = FALSE; - - /* If the entry's type has a 'notify' callback send a - * 'entry cleaned' notice now that the entry is fully - * integrated into the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify client about entry dirty flag cleared") - } /* end if */ - - /* we are about to discard the in core representation -- - * set the magic field to bad magic so we can detect a - * freed entry if we see one. - */ - entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC; - - /* verify that the image has been freed */ - assert(entry_ptr->image_ptr == NULL); - - if (entry_ptr->type->free_icr((void *)entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed") - } /* end if */ - else { - assert(take_ownership); - - /* Client is taking ownership of the entry. Set bad magic here too - * so the cache will choke unless the entry is re-inserted properly - */ - entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC; - } /* end else */ - } /* if (destroy) */ - - /* Check if we have to update the page buffer with cleared entries - * so it doesn't go out of date - */ - if (update_page_buffer) { - /* Sanity check */ - assert(!destroy); - assert(entry_ptr->image_ptr); - - if (f->shared->page_buf && (f->shared->page_buf->page_size >= entry_ptr->size)) - if (H5PB_update_entry(f->shared->page_buf, entry_ptr->addr, entry_ptr->size, - entry_ptr->image_ptr) > 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Failed to update PB with metadata cache") - } /* end if */ - - if (cache_ptr->log_flush) - if ((cache_ptr->log_flush)(cache_ptr, entry_addr, was_dirty, flags) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed") - -done: - assert((ret_value != SUCCEED) || (destroy_entry) || (!entry_ptr->flush_in_progress)); - assert((ret_value != SUCCEED) || (destroy_entry) || (take_ownership) || (!entry_ptr->is_dirty)); - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__flush_single_entry() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__verify_len_eoa - * - * Purpose: Verify that 'len' does not exceed eoa when 'actual' is - * false i.e. 'len" is the initial speculative length from - * get_load_size callback with null image pointer. - * If exceed, adjust 'len' accordingly. - * - * Verify that 'len' should not exceed eoa when 'actual' is - * true i.e. 'len' is the actual length from get_load_size - * callback with non-null image pointer. - * If exceed, return error. - * - * Return: FAIL if error is detected, SUCCEED otherwise. - * - * Programmer: Vailin Choi - * 9/6/15 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len, hbool_t actual) -{ - H5FD_mem_t cooked_type; /* Modified type, accounting for switching global heaps */ - haddr_t eoa; /* End-of-allocation in the file */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces - * type to H5FD_MEM_DRAW via its call to H5F__accum_read(). - * Thus we do the same for purposes of computing the EOA - * for sanity checks. - */ - cooked_type = (type->mem_type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type->mem_type; - - /* Get the file's end-of-allocation value */ - eoa = H5F_get_eoa(f, cooked_type); - if (!H5_addr_defined(eoa)) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid EOA address for file") - - /* Check for bad address in general */ - if (H5_addr_gt(addr, eoa)) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "address of object past end of allocation") - - /* Check if the amount of data to read will be past the EOA */ - if (H5_addr_gt((addr + *len), eoa)) { - if (actual) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "actual len exceeds EOA") - else - /* Trim down the length of the metadata */ - *len = (size_t)(eoa - addr); - } /* end if */ - - if (*len <= 0) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "len not positive after adjustment for EOA") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__verify_len_eoa() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__load_entry - * - * Purpose: Attempt to load the entry at the specified disk address - * and with the specified type into memory. If successful. - * return the in memory address of the entry. Return NULL - * on failure. - * - * Note that this function simply loads the entry into - * core. It does not insert it into the cache. - * - * Return: Non-NULL on success / NULL on failure. - * - * Programmer: John Mainzer, 5/18/04 - * - *------------------------------------------------------------------------- - */ -static void * -H5C__load_entry(H5F_t *f, -#ifdef H5_HAVE_PARALLEL - hbool_t coll_access, -#endif /* H5_HAVE_PARALLEL */ - const H5C_class_t *type, haddr_t addr, void *udata) -{ - hbool_t dirty = FALSE; /* Flag indicating whether thing was dirtied during deserialize */ - uint8_t *image = NULL; /* Buffer for disk image */ - void *thing = NULL; /* Pointer to thing loaded */ - H5C_cache_entry_t *entry = NULL; /* Alias for thing loaded, as cache entry */ - size_t len; /* Size of image in file */ -#ifdef H5_HAVE_PARALLEL - int mpi_rank = 0; /* MPI process rank */ - MPI_Comm comm = MPI_COMM_NULL; /* File MPI Communicator */ - int mpi_code; /* MPI error code */ -#endif /* H5_HAVE_PARALLEL */ - void *ret_value = NULL; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity checks */ - assert(f); - assert(f->shared); - assert(f->shared->cache); - assert(type); - assert(H5_addr_defined(addr)); - assert(type->get_initial_load_size); - if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) - assert(type->get_final_load_size); - else - assert(NULL == type->get_final_load_size); - assert(type->deserialize); - - /* Can't see how skip reads could be usefully combined with - * the speculative read flag. Hence disallow. - */ - assert(!((type->flags & H5C__CLASS_SKIP_READS) && (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG))); - - /* Call the get_initial_load_size callback, to retrieve the initial size of image */ - if (type->get_initial_load_size(udata, &len) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't retrieve image size") - assert(len > 0); - - /* Check for possible speculative read off the end of the file */ - if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) - if (H5C__verify_len_eoa(f, type, addr, &len, FALSE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid len with respect to EOA") - - /* Allocate the buffer for reading the on-disk entry image */ - if (NULL == (image = (uint8_t *)H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer") -#if H5C_DO_MEMORY_SANITY_CHECKS - H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE); -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ - -#ifdef H5_HAVE_PARALLEL - if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) { - if ((mpi_rank = H5F_mpi_get_rank(f)) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank") - if ((comm = H5F_mpi_get_comm(f)) == MPI_COMM_NULL) - HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed") - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ - - /* Get the on-disk entry image */ - if (0 == (type->flags & H5C__CLASS_SKIP_READS)) { - unsigned tries, max_tries; /* The # of read attempts */ - unsigned retries; /* The # of retries */ - htri_t chk_ret; /* return from verify_chksum callback */ - size_t actual_len = len; /* The actual length, after speculative reads have been resolved */ - uint64_t nanosec = 1; /* # of nanoseconds to sleep between retries */ - void *new_image; /* Pointer to image */ - hbool_t len_changed = TRUE; /* Whether to re-check speculative entries */ - - /* Get the # of read attempts */ - max_tries = tries = H5F_GET_READ_ATTEMPTS(f); - - /* - * This do/while loop performs the following till the metadata checksum - * is correct or the file's number of allowed read attempts are reached. - * --read the metadata - * --determine the actual size of the metadata - * --perform checksum verification - */ - do { - if (actual_len != len) { - if (NULL == (new_image = H5MM_realloc(image, len + H5C_IMAGE_EXTRA_SPACE))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()") - image = (uint8_t *)new_image; -#if H5C_DO_MEMORY_SANITY_CHECKS - H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE); -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ - } /* end if */ - -#ifdef H5_HAVE_PARALLEL - if (!coll_access || 0 == mpi_rank) { -#endif /* H5_HAVE_PARALLEL */ - if (H5F_block_read(f, type->mem_type, addr, len, image) < 0) { -#ifdef H5_HAVE_PARALLEL - if (coll_access) { - /* Push an error, but still participate in following MPI_Bcast */ - memset(image, 0, len); - HDONE_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*") - } - else -#endif - HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*") - } - -#ifdef H5_HAVE_PARALLEL - } /* end if */ - /* if the collective metadata read optimization is turned on, - * bcast the metadata read from process 0 to all ranks in the file - * communicator - */ - if (coll_access) { - int buf_size; - - H5_CHECKED_ASSIGN(buf_size, int, len, size_t); - if (MPI_SUCCESS != (mpi_code = MPI_Bcast(image, buf_size, MPI_BYTE, 0, comm))) - HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code) - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ - - /* If the entry could be read speculatively and the length is still - * changing, check for updating the actual size - */ - if ((type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) && len_changed) { - /* Retrieve the actual length */ - actual_len = len; - if (type->get_final_load_size(image, len, udata, &actual_len) < 0) - continue; /* Transfer control to while() and count towards retries */ - - /* Check for the length changing */ - if (actual_len != len) { - /* Verify that the length isn't past the EOA for the file */ - if (H5C__verify_len_eoa(f, type, addr, &actual_len, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA") - - /* Expand buffer to new size */ - if (NULL == (new_image = H5MM_realloc(image, actual_len + H5C_IMAGE_EXTRA_SPACE))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()") - image = (uint8_t *)new_image; -#if H5C_DO_MEMORY_SANITY_CHECKS - H5MM_memcpy(image + actual_len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE); -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ - - if (actual_len > len) { -#ifdef H5_HAVE_PARALLEL - if (!coll_access || 0 == mpi_rank) { -#endif /* H5_HAVE_PARALLEL */ - /* If the thing's image needs to be bigger for a speculatively - * loaded thing, go get the on-disk image again (the extra portion). - */ - if (H5F_block_read(f, type->mem_type, addr + len, actual_len - len, image + len) < - 0) { -#ifdef H5_HAVE_PARALLEL - if (coll_access) { - /* Push an error, but still participate in following MPI_Bcast */ - memset(image + len, 0, actual_len - len); - HDONE_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image") - } - else -#endif - HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image") - } - -#ifdef H5_HAVE_PARALLEL - } - /* If the collective metadata read optimization is turned on, - * Bcast the metadata read from process 0 to all ranks in the file - * communicator */ - if (coll_access) { - int buf_size; - - H5_CHECKED_ASSIGN(buf_size, int, actual_len - len, size_t); - if (MPI_SUCCESS != - (mpi_code = MPI_Bcast(image + len, buf_size, MPI_BYTE, 0, comm))) - HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code) - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ - } /* end if */ - } /* end if (actual_len != len) */ - else { - /* The length has stabilized */ - len_changed = FALSE; - - /* Set the final length */ - len = actual_len; - } /* else */ - } /* end if */ - - /* If there's no way to verify the checksum for a piece of metadata - * (usually because there's no checksum in the file), leave now - */ - if (type->verify_chksum == NULL) - break; - - /* Verify the checksum for the metadata image */ - if ((chk_ret = type->verify_chksum(image, actual_len, udata)) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "failure from verify_chksum callback") - if (chk_ret == TRUE) - break; - - /* Sleep for some time */ - H5_nanosleep(nanosec); - nanosec *= 2; /* Double the sleep time next time */ - } while (--tries); - - /* Check for too many tries */ - if (tries == 0) - HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "incorrect metadata checksum after all read attempts") - - /* Calculate and track the # of retries */ - retries = max_tries - tries; - if (retries) /* Does not track 0 retry */ - if (H5F_track_metadata_read_retries(f, (unsigned)type->mem_type, retries) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "cannot track read tries = %u ", retries) - - /* Set the final length (in case it wasn't set earlier) */ - len = actual_len; - } /* end if !H5C__CLASS_SKIP_READS */ - - /* Deserialize the on-disk image into the native memory form */ - if (NULL == (thing = type->deserialize(image, len, udata, &dirty))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't deserialize image") - - entry = (H5C_cache_entry_t *)thing; - - /* In general, an entry should be clean just after it is loaded. - * - * However, when this code is used in the metadata cache, it is - * possible that object headers will be dirty at this point, as - * the deserialize function will alter object headers if necessary to - * fix an old bug. - * - * In the following assert: - * - * assert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) ); - * - * note that type ids 5 & 6 are associated with object headers in the - * metadata cache. - * - * When we get to using H5C for other purposes, we may wish to - * tighten up the assert so that the loophole only applies to the - * metadata cache. - */ - - assert((dirty == FALSE) || (type->id == 5 || type->id == 6)); - - entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC; - entry->cache_ptr = f->shared->cache; - entry->addr = addr; - entry->size = len; - assert(entry->size < H5C_MAX_ENTRY_SIZE); - entry->image_ptr = image; - entry->image_up_to_date = !dirty; - entry->type = type; - entry->is_dirty = dirty; - entry->dirtied = FALSE; - entry->is_protected = FALSE; - entry->is_read_only = FALSE; - entry->ro_ref_count = 0; - entry->is_pinned = FALSE; - entry->in_slist = FALSE; - entry->flush_marker = FALSE; -#ifdef H5_HAVE_PARALLEL - entry->clear_on_unprotect = FALSE; - entry->flush_immediately = FALSE; - entry->coll_access = coll_access; -#endif /* H5_HAVE_PARALLEL */ - entry->flush_in_progress = FALSE; - entry->destroy_in_progress = FALSE; - - entry->ring = H5C_RING_UNDEFINED; - - /* Initialize flush dependency fields */ - entry->flush_dep_parent = NULL; - entry->flush_dep_nparents = 0; - entry->flush_dep_parent_nalloc = 0; - entry->flush_dep_nchildren = 0; - entry->flush_dep_ndirty_children = 0; - entry->flush_dep_nunser_children = 0; - entry->ht_next = NULL; - entry->ht_prev = NULL; - entry->il_next = NULL; - entry->il_prev = NULL; - - entry->next = NULL; - entry->prev = NULL; - -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - entry->aux_next = NULL; - entry->aux_prev = NULL; -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - -#ifdef H5_HAVE_PARALLEL - entry->coll_next = NULL; - entry->coll_prev = NULL; -#endif /* H5_HAVE_PARALLEL */ - - /* initialize cache image related fields */ - entry->include_in_image = FALSE; - entry->lru_rank = 0; - entry->image_dirty = FALSE; - entry->fd_parent_count = 0; - entry->fd_parent_addrs = NULL; - entry->fd_child_count = 0; - entry->fd_dirty_child_count = 0; - entry->image_fd_height = 0; - entry->prefetched = FALSE; - entry->prefetch_type_id = 0; - entry->age = 0; - entry->prefetched_dirty = FALSE; -#ifndef NDEBUG /* debugging field */ - entry->serialization_count = 0; -#endif /* NDEBUG */ - - /* initialize tag list fields */ - entry->tl_next = NULL; - entry->tl_prev = NULL; - entry->tag_info = NULL; - - H5C__RESET_CACHE_ENTRY_STATS(entry); - - ret_value = thing; - -done: - /* Cleanup on error */ - if (NULL == ret_value) { - /* Release resources */ - if (thing && type->free_icr(thing) < 0) - HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "free_icr callback failed") - if (image) - image = (uint8_t *)H5MM_xfree(image); - } /* end if */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__load_entry() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__make_space_in_cache - * - * Purpose: Attempt to evict cache entries until the index_size - * is at least needed_space below max_cache_size. - * - * In passing, also attempt to bring cLRU_list_size to a - * value greater than min_clean_size. - * - * Depending on circumstances, both of these goals may - * be impossible, as in parallel mode, we must avoid generating - * a write as part of a read (to avoid deadlock in collective - * I/O), and in all cases, it is possible (though hopefully - * highly unlikely) that the protected list may exceed the - * maximum size of the cache. - * - * Thus the function simply does its best, returning success - * unless an error is encountered. - * - * Observe that this function cannot occasion a read. - * - * Return: Non-negative on success/Negative on failure. - * - * Programmer: John Mainzer, 5/14/04 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) -{ - H5C_t *cache_ptr = f->shared->cache; -#if H5C_COLLECT_CACHE_STATS - int32_t clean_entries_skipped = 0; - int32_t dirty_pf_entries_skipped = 0; - int32_t total_entries_scanned = 0; -#endif /* H5C_COLLECT_CACHE_STATS */ - uint32_t entries_examined = 0; - uint32_t initial_list_len; - size_t empty_space; - hbool_t reentrant_call = FALSE; - hbool_t prev_is_dirty = FALSE; - hbool_t didnt_flush_entry = FALSE; - hbool_t restart_scan; - H5C_cache_entry_t *entry_ptr; - H5C_cache_entry_t *prev_ptr; - H5C_cache_entry_t *next_ptr; - uint32_t num_corked_entries = 0; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity checks */ - assert(f); - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size)); - - /* check to see if cache_ptr->msic_in_progress is TRUE. If it, this - * is a re-entrant call via a client callback called in the make - * space in cache process. To avoid an infinite recursion, set - * reentrant_call to TRUE, and goto done. - */ - if (cache_ptr->msic_in_progress) { - reentrant_call = TRUE; - HGOTO_DONE(SUCCEED); - } /* end if */ - - cache_ptr->msic_in_progress = TRUE; - - if (write_permitted) { - restart_scan = FALSE; - initial_list_len = cache_ptr->LRU_list_len; - entry_ptr = cache_ptr->LRU_tail_ptr; - - if (cache_ptr->index_size >= cache_ptr->max_cache_size) - empty_space = 0; - else - empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; - - while ((((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) || - ((empty_space + cache_ptr->clean_index_size) < (cache_ptr->min_clean_size))) && - (entries_examined <= (2 * initial_list_len)) && (entry_ptr != NULL)) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(!(entry_ptr->is_protected)); - assert(!(entry_ptr->is_read_only)); - assert((entry_ptr->ro_ref_count) == 0); - - next_ptr = entry_ptr->next; - prev_ptr = entry_ptr->prev; - - if (prev_ptr != NULL) - prev_is_dirty = prev_ptr->is_dirty; - - if (entry_ptr->is_dirty && (entry_ptr->tag_info && entry_ptr->tag_info->corked)) { - /* Skip "dirty" corked entries. */ - ++num_corked_entries; - didnt_flush_entry = TRUE; - } - else if ((entry_ptr->type->id != H5AC_EPOCH_MARKER_ID) && !entry_ptr->flush_in_progress && - !entry_ptr->prefetched_dirty) { - didnt_flush_entry = FALSE; - if (entry_ptr->is_dirty) { -#if H5C_COLLECT_CACHE_STATS - if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) - cache_ptr->entries_scanned_to_make_space++; -#endif /* H5C_COLLECT_CACHE_STATS */ - - /* reset entries_removed_counter and - * last_entry_removed_ptr prior to the call to - * H5C__flush_single_entry() so that we can spot - * unexpected removals of entries from the cache, - * and set the restart_scan flag if proceeding - * would be likely to cause us to scan an entry - * that is no longer in the cache. - */ - cache_ptr->entries_removed_counter = 0; - cache_ptr->last_entry_removed_ptr = NULL; - - if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") - - if ((cache_ptr->entries_removed_counter > 1) || - (cache_ptr->last_entry_removed_ptr == prev_ptr)) - - restart_scan = TRUE; - } - else if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size -#ifdef H5_HAVE_PARALLEL - && !(entry_ptr->coll_access) -#endif /* H5_HAVE_PARALLEL */ - ) { -#if H5C_COLLECT_CACHE_STATS - cache_ptr->entries_scanned_to_make_space++; -#endif /* H5C_COLLECT_CACHE_STATS */ - - if (H5C__flush_single_entry(f, entry_ptr, - H5C__FLUSH_INVALIDATE_FLAG | - H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") - } - else { - /* We have enough space so don't flush clean entry. */ -#if H5C_COLLECT_CACHE_STATS - clean_entries_skipped++; -#endif /* H5C_COLLECT_CACHE_STATS */ - didnt_flush_entry = TRUE; - } - -#if H5C_COLLECT_CACHE_STATS - total_entries_scanned++; -#endif /* H5C_COLLECT_CACHE_STATS */ - } - else { - - /* Skip epoch markers, entries that are in the process - * of being flushed, and entries marked as prefetched_dirty - * (occurs in the R/O case only). - */ - didnt_flush_entry = TRUE; - -#if H5C_COLLECT_CACHE_STATS - if (entry_ptr->prefetched_dirty) - dirty_pf_entries_skipped++; -#endif /* H5C_COLLECT_CACHE_STATS */ - } - - if (prev_ptr != NULL) { - if (didnt_flush_entry) - /* epoch markers don't get flushed, and we don't touch - * entries that are in the process of being flushed. - * Hence no need for sanity checks, as we haven't - * flushed anything. Thus just set entry_ptr to prev_ptr - * and go on. - */ - entry_ptr = prev_ptr; - else if (restart_scan || prev_ptr->is_dirty != prev_is_dirty || prev_ptr->next != next_ptr || - prev_ptr->is_protected || prev_ptr->is_pinned) { - /* something has happened to the LRU -- start over - * from the tail. - */ - restart_scan = FALSE; - entry_ptr = cache_ptr->LRU_tail_ptr; - H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr) - } - else - entry_ptr = prev_ptr; - } - else - entry_ptr = NULL; - - entries_examined++; - - if (cache_ptr->index_size >= cache_ptr->max_cache_size) - empty_space = 0; - else - empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; - - assert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size)); - } - -#if H5C_COLLECT_CACHE_STATS - cache_ptr->calls_to_msic++; - - cache_ptr->total_entries_skipped_in_msic += clean_entries_skipped; - cache_ptr->total_dirty_pf_entries_skipped_in_msic += dirty_pf_entries_skipped; - cache_ptr->total_entries_scanned_in_msic += total_entries_scanned; - - if (clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic) - cache_ptr->max_entries_skipped_in_msic = clean_entries_skipped; - - if (dirty_pf_entries_skipped > cache_ptr->max_dirty_pf_entries_skipped_in_msic) - cache_ptr->max_dirty_pf_entries_skipped_in_msic = dirty_pf_entries_skipped; - - if (total_entries_scanned > cache_ptr->max_entries_scanned_in_msic) - cache_ptr->max_entries_scanned_in_msic = total_entries_scanned; -#endif /* H5C_COLLECT_CACHE_STATS */ - - /* NEED: work on a better assert for corked entries */ - assert((entries_examined > (2 * initial_list_len)) || - ((cache_ptr->pl_size + cache_ptr->pel_size + cache_ptr->min_clean_size) > - cache_ptr->max_cache_size) || - ((cache_ptr->clean_index_size + empty_space) >= cache_ptr->min_clean_size) || - ((num_corked_entries))); -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - - assert((entries_examined > (2 * initial_list_len)) || - (cache_ptr->cLRU_list_size <= cache_ptr->clean_index_size)); - assert((entries_examined > (2 * initial_list_len)) || - (cache_ptr->dLRU_list_size <= cache_ptr->dirty_index_size)); - -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - } - else { - assert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS); - -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - initial_list_len = cache_ptr->cLRU_list_len; - entry_ptr = cache_ptr->cLRU_tail_ptr; - - while (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) && - (entries_examined <= initial_list_len) && (entry_ptr != NULL)) { - assert(!(entry_ptr->is_protected)); - assert(!(entry_ptr->is_read_only)); - assert((entry_ptr->ro_ref_count) == 0); - assert(!(entry_ptr->is_dirty)); - - prev_ptr = entry_ptr->aux_prev; - - if (!entry_ptr->prefetched_dirty -#ifdef H5_HAVE_PARALLEL - && !entry_ptr->coll_access -#endif /* H5_HAVE_PARALLEL */ - ) { - if (H5C__flush_single_entry( - f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") - } /* end if */ - - /* we are scanning the clean LRU, so the serialize function - * will not be called on any entry -- thus there is no - * concern about the list being modified out from under - * this function. - */ - - entry_ptr = prev_ptr; - entries_examined++; - } -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - } - -done: - /* Sanity checks */ - assert(cache_ptr->msic_in_progress); - if (!reentrant_call) - cache_ptr->msic_in_progress = FALSE; - assert((!reentrant_call) || (cache_ptr->msic_in_progress)); - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__make_space_in_cache() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__validate_lru_list - * - * Purpose: Debugging function that scans the LRU list for errors. - * - * If an error is detected, the function generates a - * diagnostic and returns FAIL. If no error is detected, - * the function returns SUCCEED. - * - * Return: FAIL if error is detected, SUCCEED otherwise. - * - * Programmer: John Mainzer, 7/14/05 - * - *------------------------------------------------------------------------- - */ -#ifdef H5C_DO_EXTREME_SANITY_CHECKS -herr_t -H5C__validate_lru_list(H5C_t *cache_ptr) -{ - int32_t len = 0; - size_t size = 0; - H5C_cache_entry_t *entry_ptr = NULL; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - if (((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_tail_ptr == NULL)) && - (cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list head/tail check failed") - - if ((cache_ptr->LRU_list_len == 1) && - ((cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr) || (cache_ptr->LRU_head_ptr == NULL) || - (cache_ptr->LRU_head_ptr->size != cache_ptr->LRU_list_size))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list sanity check failed") - - if ((cache_ptr->LRU_list_len >= 1) && - ((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_head_ptr->prev != NULL) || - (cache_ptr->LRU_tail_ptr == NULL) || (cache_ptr->LRU_tail_ptr->next != NULL))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list sanity check failed") - - entry_ptr = cache_ptr->LRU_head_ptr; - while (entry_ptr != NULL) { - if ((entry_ptr != cache_ptr->LRU_head_ptr) && - ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") - - if ((entry_ptr != cache_ptr->LRU_tail_ptr) && - ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") - - if (entry_ptr->is_pinned || entry_ptr->pinned_from_client || entry_ptr->pinned_from_cache) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "invalid entry 'pin origin' fields") - - len++; - size += entry_ptr->size; - entry_ptr = entry_ptr->next; - } - - if ((cache_ptr->LRU_list_len != (uint32_t)len) || (cache_ptr->LRU_list_size != size)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list length/size check failed") - -done: - if (ret_value != SUCCEED) - assert(0); - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__validate_lru_list() */ -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__validate_pinned_entry_list - * - * Purpose: Debugging function that scans the pinned entry list for - * errors. - * - * If an error is detected, the function generates a - * diagnostic and returns FAIL. If no error is detected, - * the function returns SUCCEED. - * - * Return: FAIL if error is detected, SUCCEED otherwise. - * - * Programmer: John Mainzer, 4/25/14 - * - *------------------------------------------------------------------------- - */ -#ifdef H5C_DO_EXTREME_SANITY_CHECKS -herr_t -H5C__validate_pinned_entry_list(H5C_t *cache_ptr) -{ - int32_t len = 0; - size_t size = 0; - H5C_cache_entry_t *entry_ptr = NULL; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - if (((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_tail_ptr == NULL)) && - (cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list head/tail check failed") - - if ((cache_ptr->pel_len == 1) && - ((cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr) || (cache_ptr->pel_head_ptr == NULL) || - (cache_ptr->pel_head_ptr->size != cache_ptr->pel_size))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list sanity check failed") - - if ((cache_ptr->pel_len >= 1) && - ((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_head_ptr->prev != NULL) || - (cache_ptr->pel_tail_ptr == NULL) || (cache_ptr->pel_tail_ptr->next != NULL))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list sanity check failed") - - entry_ptr = cache_ptr->pel_head_ptr; - while (entry_ptr != NULL) { - if ((entry_ptr != cache_ptr->pel_head_ptr) && - ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") - - if ((entry_ptr != cache_ptr->pel_tail_ptr) && - ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") - - if (!entry_ptr->is_pinned) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list contains unpinned entry") - - if (!(entry_ptr->pinned_from_client || entry_ptr->pinned_from_cache)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "invalid entry 'pin origin' fields") - - len++; - size += entry_ptr->size; - entry_ptr = entry_ptr->next; - } - - if ((cache_ptr->pel_len != (uint32_t)len) || (cache_ptr->pel_size != size)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list length/size check failed") - -done: - if (ret_value != SUCCEED) - assert(0); - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__validate_pinned_entry_list() */ -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__validate_protected_entry_list - * - * Purpose: Debugging function that scans the protected entry list for - * errors. - * - * If an error is detected, the function generates a - * diagnostic and returns FAIL. If no error is detected, - * the function returns SUCCEED. - * - * Return: FAIL if error is detected, SUCCEED otherwise. - * - * Programmer: John Mainzer, 4/25/14 - * - *------------------------------------------------------------------------- - */ -#ifdef H5C_DO_EXTREME_SANITY_CHECKS -herr_t -H5C__validate_protected_entry_list(H5C_t *cache_ptr) -{ - int32_t len = 0; - size_t size = 0; - H5C_cache_entry_t *entry_ptr = NULL; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - if (((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_tail_ptr == NULL)) && - (cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list head/tail check failed") - - if ((cache_ptr->pl_len == 1) && - ((cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr) || (cache_ptr->pl_head_ptr == NULL) || - (cache_ptr->pl_head_ptr->size != cache_ptr->pl_size))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list sanity check failed") - - if ((cache_ptr->pl_len >= 1) && - ((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_head_ptr->prev != NULL) || - (cache_ptr->pl_tail_ptr == NULL) || (cache_ptr->pl_tail_ptr->next != NULL))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list sanity check failed") - - entry_ptr = cache_ptr->pl_head_ptr; - while (entry_ptr != NULL) { - if ((entry_ptr != cache_ptr->pl_head_ptr) && - ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") - - if ((entry_ptr != cache_ptr->pl_tail_ptr) && - ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") - - if (!entry_ptr->is_protected) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list contains unprotected entry") - - if (entry_ptr->is_read_only && (entry_ptr->ro_ref_count <= 0)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "read-only entry has non-positive ref count") - - len++; - size += entry_ptr->size; - entry_ptr = entry_ptr->next; - } - - if ((cache_ptr->pl_len != (uint32_t)len) || (cache_ptr->pl_size != size)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list length/size check failed") - -done: - if (ret_value != SUCCEED) - assert(0); - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__validate_protected_entry_list() */ -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__entry_in_skip_list - * - * Purpose: Debugging function that scans skip list to see if it - * is in present. We need this, as it is possible for - * an entry to be in the skip list twice. - * - * Return: FALSE if the entry is not in the skip list, and TRUE - * if it is. - * - * Programmer: John Mainzer, 11/1/14 - * - *------------------------------------------------------------------------- - */ -#ifdef H5C_DO_SLIST_SANITY_CHECKS -hbool_t -H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr) -{ - H5SL_node_t *node_ptr; - hbool_t in_slist; - hbool_t ret_value; - - FUNC_ENTER_PACKAGE - - /* Assertions */ - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(cache_ptr->slist_ptr); - - node_ptr = H5SL_first(cache_ptr->slist_ptr); - in_slist = FALSE; - while ((node_ptr != NULL) && (!in_slist)) { - H5C_cache_entry_t *entry_ptr; - - entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - - assert(entry_ptr); - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(entry_ptr->is_dirty); - assert(entry_ptr->in_slist); - - if (entry_ptr == target_ptr) - in_slist = TRUE; - else - node_ptr = H5SL_next(node_ptr); - } - - /* Set return value */ - ret_value = in_slist; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__entry_in_skip_list() */ -#endif /* H5C_DO_SLIST_SANITY_CHECKS */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__flush_marked_entries - * - * Purpose: Flushes all marked entries in the cache. - * - * Return: FAIL if error is detected, SUCCEED otherwise. - * - * Programmer: Mike McGreevy - * November 3, 2010 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C__flush_marked_entries(H5F_t *f) -{ - herr_t ret_value = SUCCEED; - - FUNC_ENTER_PACKAGE - - /* Assertions */ - assert(f != NULL); - - /* Enable the slist, as it is needed in the flush */ - if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed") - - /* Flush all marked entries */ - if (H5C_flush_cache(f, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache") - - /* Disable the slist. Set the clear_slist parameter to TRUE - * since we called H5C_flush_cache() with the - * H5C__FLUSH_MARKED_ENTRIES_FLAG. - */ - if (H5C_set_slist_enabled(f->shared->cache, FALSE, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist failed") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__flush_marked_entries */ - -/*------------------------------------------------------------------------- - * - * Function: H5C_cork - * - * Purpose: To cork/uncork/get cork status of an object depending on "action": - * H5C__SET_CORK: - * To cork the object - * Return error if the object is already corked - * H5C__UNCORK: - * To uncork the object - * Return error if the object is not corked - * H5C__GET_CORKED: - * To retrieve the cork status of an object in - * the parameter "corked" - * - * Return: Success: Non-negative - * Failure: Negative - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked) -{ - H5C_tag_info_t *tag_info = NULL; - herr_t ret_value = SUCCEED; - - FUNC_ENTER_NOAPI_NOINIT - - /* Assertions */ - assert(cache_ptr != NULL); - assert(H5_addr_defined(obj_addr)); - assert(action == H5C__SET_CORK || action == H5C__UNCORK || action == H5C__GET_CORKED); - - /* Search the list of corked object addresses in the cache */ - HASH_FIND(hh, cache_ptr->tag_list, &obj_addr, sizeof(haddr_t), tag_info); - - if (H5C__GET_CORKED == action) { - assert(corked); - if (tag_info != NULL && tag_info->corked) - *corked = TRUE; - else - *corked = FALSE; - } - else { - /* Sanity check */ - assert(H5C__SET_CORK == action || H5C__UNCORK == action); - - /* Perform appropriate action */ - if (H5C__SET_CORK == action) { - /* Check if this is the first entry for this tagged object */ - if (NULL == tag_info) { - /* Allocate new tag info struct */ - if (NULL == (tag_info = H5FL_CALLOC(H5C_tag_info_t))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "can't allocate tag info for cache entry") - - /* Set the tag for all entries */ - tag_info->tag = obj_addr; - - /* Insert tag info into hash table */ - HASH_ADD(hh, cache_ptr->tag_list, tag, sizeof(haddr_t), tag_info); - } - else { - /* Check for object already corked */ - if (tag_info->corked) - HGOTO_ERROR(H5E_CACHE, H5E_CANTCORK, FAIL, "object already corked") - assert(tag_info->entry_cnt > 0 && tag_info->head); - } - - /* Set the corked status for the entire object */ - tag_info->corked = TRUE; - cache_ptr->num_objs_corked++; - } - else { - /* Sanity check */ - if (NULL == tag_info) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNCORK, FAIL, "tag info pointer is NULL") - - /* Check for already uncorked */ - if (!tag_info->corked) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNCORK, FAIL, "object already uncorked") - - /* Set the corked status for the entire object */ - tag_info->corked = FALSE; - cache_ptr->num_objs_corked--; - - /* Remove the tag info from the tag list, if there's no more entries with this tag */ - if (0 == tag_info->entry_cnt) { - /* Sanity check */ - assert(NULL == tag_info->head); - - HASH_DELETE(hh, cache_ptr->tag_list, tag_info); - - /* Release the tag info */ - tag_info = H5FL_FREE(H5C_tag_info_t, tag_info); - } - else - assert(NULL != tag_info->head); - } - } - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_cork() */ - -/*------------------------------------------------------------------------- - * Function: H5C__mark_flush_dep_dirty() - * - * Purpose: Recursively propagate the flush_dep_ndirty_children flag - * up the dependency chain in response to entry either - * becoming dirty or having its flush_dep_ndirty_children - * increased from 0. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Neil Fortner - * 11/13/12 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry) -{ - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity checks */ - assert(entry); - - /* Iterate over the parent entries, if any */ - for (u = 0; u < entry->flush_dep_nparents; u++) { - /* Sanity check */ - assert(entry->flush_dep_parent[u]->flush_dep_ndirty_children < - entry->flush_dep_parent[u]->flush_dep_nchildren); - - /* Adjust the parent's number of dirty children */ - entry->flush_dep_parent[u]->flush_dep_ndirty_children++; - - /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */ - if (entry->flush_dep_parent[u]->type->notify && - (entry->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED, - entry->flush_dep_parent[u]) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify parent about child entry dirty flag set") - } /* end for */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__mark_flush_dep_dirty() */ - -/*------------------------------------------------------------------------- - * Function: H5C__mark_flush_dep_clean() - * - * Purpose: Recursively propagate the flush_dep_ndirty_children flag - * up the dependency chain in response to entry either - * becoming clean or having its flush_dep_ndirty_children - * reduced to 0. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Neil Fortner - * 11/13/12 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry) -{ - int i; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity checks */ - assert(entry); - - /* Iterate over the parent entries, if any */ - /* Note reverse iteration order, in case the callback removes the flush - * dependency - QAK, 2017/08/12 - */ - for (i = ((int)entry->flush_dep_nparents) - 1; i >= 0; i--) { - /* Sanity check */ - assert(entry->flush_dep_parent[i]->flush_dep_ndirty_children > 0); - - /* Adjust the parent's number of dirty children */ - entry->flush_dep_parent[i]->flush_dep_ndirty_children--; - - /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */ - if (entry->flush_dep_parent[i]->type->notify && - (entry->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED, - entry->flush_dep_parent[i]) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify parent about child entry dirty flag reset") - } /* end for */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__mark_flush_dep_clean() */ - -/*------------------------------------------------------------------------- - * Function: H5C__mark_flush_dep_serialized() - * - * Purpose: Decrement the flush_dep_nunser_children fields of all the - * target entry's flush dependency parents in response to - * the target entry becoming serialized. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 8/30/16 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry_ptr) -{ - int i; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity checks */ - assert(entry_ptr); - - /* Iterate over the parent entries, if any */ - /* Note reverse iteration order, in case the callback removes the flush - * dependency - QAK, 2017/08/12 - */ - for (i = ((int)entry_ptr->flush_dep_nparents) - 1; i >= 0; i--) { - /* Sanity checks */ - assert(entry_ptr->flush_dep_parent); - assert(entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children > 0); - - /* decrement the parents number of unserialized children */ - entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children--; - - /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */ - if (entry_ptr->flush_dep_parent[i]->type->notify && - (entry_ptr->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, - entry_ptr->flush_dep_parent[i]) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify parent about child entry serialized flag set") - } /* end for */ + default: /* should be unreachable */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?") + break; + } /* end switch */ + } /* end if */ done: FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__mark_flush_dep_serialized() */ +} /* H5C_set_cache_auto_resize_config() */ /*------------------------------------------------------------------------- - * Function: H5C__mark_flush_dep_unserialized() - * - * Purpose: Increment the flush_dep_nunser_children fields of all the - * target entry's flush dependency parents in response to - * the target entry becoming unserialized. + * Function: H5C_set_evictions_enabled() * - * Return: Non-negative on success/Negative on failure + * Purpose: Set cache_ptr->evictions_enabled to the value of the + * evictions enabled parameter. * - * Programmer: John Mainzer - * 8/30/16 + * Return: SUCCEED on success, and FAIL on failure. * *------------------------------------------------------------------------- */ -static herr_t -H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry_ptr) +herr_t +H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled) { - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_PACKAGE + FUNC_ENTER_NOAPI(FAIL) - /* Sanity checks */ - assert(entry_ptr); + if (cache_ptr == NULL) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry") - /* Iterate over the parent entries, if any */ - for (u = 0; u < entry_ptr->flush_dep_nparents; u++) { - /* Sanity check */ - assert(entry_ptr->flush_dep_parent); - assert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children < - entry_ptr->flush_dep_parent[u]->flush_dep_nchildren); - - /* increment parents number of usserialized children */ - entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children++; - - /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */ - if (entry_ptr->flush_dep_parent[u]->type->notify && - (entry_ptr->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, - entry_ptr->flush_dep_parent[u]) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify parent about child entry serialized flag reset") - } /* end for */ + /* There is no fundamental reason why we should not permit + * evictions to be disabled while automatic resize is enabled. + * However, allowing it would greatly complicate testing + * the feature. Hence the following: + */ + if ((evictions_enabled != TRUE) && ((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) || + (cache_ptr->resize_ctl.decr_mode != H5C_decr__off))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't disable evictions when auto resize enabled") + + cache_ptr->evictions_enabled = evictions_enabled; done: FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__mark_flush_dep_unserialized() */ +} /* H5C_set_evictions_enabled() */ -#ifndef NDEBUG /*------------------------------------------------------------------------- - * Function: H5C__assert_flush_dep_nocycle() + * Function: H5C_set_slist_enabled() * - * Purpose: Assert recursively that base_entry is not the same as - * entry, and perform the same assertion on all of entry's - * flush dependency parents. This is used to detect cycles - * created by flush dependencies. + * Purpose: Enable or disable the slist as directed. * - * Return: void + * The slist (skip list) is an address ordered list of + * dirty entries in the metadata cache. However, this + * list is only needed during flush and close, where we + * use it to write entries in more or less increasing + * address order. * - * Programmer: Neil Fortner - * 12/10/12 + * This function sets up and enables further operations + * on the slist, or disable the slist. This in turn + * allows us to avoid the overhead of maintaining the + * slist when it is not needed. * - *------------------------------------------------------------------------- - */ -static void -H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, const H5C_cache_entry_t *base_entry) -{ - unsigned u; /* Local index variable */ - - FUNC_ENTER_PACKAGE_NOERR - - /* Sanity checks */ - assert(entry); - assert(base_entry); - - /* Make sure the entries are not the same */ - assert(base_entry != entry); - - /* Iterate over entry's parents (if any) */ - for (u = 0; u < entry->flush_dep_nparents; u++) - H5C__assert_flush_dep_nocycle(entry->flush_dep_parent[u], base_entry); - - FUNC_LEAVE_NOAPI_VOID -} /* H5C__assert_flush_dep_nocycle() */ -#endif /* NDEBUG */ - -/*------------------------------------------------------------------------- - * Function: H5C__serialize_cache * - * Purpose: Serialize (i.e. construct an on disk image) for all entries - * in the metadata cache including clean entries. + * If the slist_enabled parameter is TRUE, the function * - * Note that flush dependencies and "flush me last" flags - * must be observed in the serialization process. + * 1) Verifies that the slist is empty. * - * Note also that entries may be loaded, flushed, evicted, - * expunged, relocated, resized, or removed from the cache - * during this process, just as these actions may occur during - * a regular flush. + * 2) Scans the index list, and inserts all dirty entries + * into the slist. * - * However, we are given that the cache will contain no protected - * entries on entry to this routine (although entries may be - * briefly protected and then unprotected during the serialize - * process). + * 3) Sets cache_ptr->slist_enabled = TRUE. * - * The objective of this routine is serialize all entries and - * to force all entries into their actual locations on disk. + * Note that the clear_slist parameter is ignored if + * the slist_enabed parameter is TRUE. * - * The initial need for this routine is to settle all entries - * in the cache prior to construction of the metadata cache - * image so that the size of the cache image can be calculated. * - * Return: Non-negative on success/Negative on failure or if there was - * a request to flush all items and something was protected. + * If the slist_enabled_parameter is FALSE, the function + * shuts down the slist. * - * Programmer: John Mainzer - * 7/22/15 + * Normally the slist will be empty at this point, however + * that need not be the case if H5C_flush_cache() has been + * called with the H5C__FLUSH_MARKED_ENTRIES_FLAG. * - *------------------------------------------------------------------------- - */ -herr_t -H5C__serialize_cache(H5F_t *f) -{ -#ifdef H5C_DO_SANITY_CHECKS - int i; - uint32_t index_len = 0; - size_t index_size = (size_t)0; - size_t clean_index_size = (size_t)0; - size_t dirty_index_size = (size_t)0; - size_t slist_size = (size_t)0; - uint32_t slist_len = 0; -#endif /* H5C_DO_SANITY_CHECKS */ - H5C_ring_t ring; - H5C_t *cache_ptr; - herr_t ret_value = SUCCEED; - - FUNC_ENTER_PACKAGE - - /* Sanity checks */ - assert(f); - assert(f->shared); - cache_ptr = f->shared->cache; - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(cache_ptr->slist_ptr); - -#ifdef H5C_DO_SANITY_CHECKS - assert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0); - assert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); - assert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); - assert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); - assert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0); - assert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0); - - for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) { - index_len += cache_ptr->index_ring_len[i]; - index_size += cache_ptr->index_ring_size[i]; - clean_index_size += cache_ptr->clean_index_ring_size[i]; - dirty_index_size += cache_ptr->dirty_index_ring_size[i]; - - slist_len += cache_ptr->slist_ring_len[i]; - slist_size += cache_ptr->slist_ring_size[i]; - } /* end for */ - - assert(cache_ptr->index_len == index_len); - assert(cache_ptr->index_size == index_size); - assert(cache_ptr->clean_index_size == clean_index_size); - assert(cache_ptr->dirty_index_size == dirty_index_size); - assert(cache_ptr->slist_len == slist_len); - assert(cache_ptr->slist_size == slist_size); -#endif /* H5C_DO_SANITY_CHECKS */ - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - -#ifndef NDEBUG - /* if this is a debug build, set the serialization_count field of - * each entry in the cache to zero before we start the serialization. - * This allows us to detect the case in which any entry is serialized - * more than once (a performance issues), and more importantly, the - * case is which any flush dependency parent is serializes more than - * once (a correctness issue). - */ - { - H5C_cache_entry_t *scan_ptr = NULL; - - scan_ptr = cache_ptr->il_head; - while (scan_ptr != NULL) { - assert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - scan_ptr->serialization_count = 0; - scan_ptr = scan_ptr->il_next; - } /* end while */ - } /* end block */ -#endif /* NDEBUG */ - - /* set cache_ptr->serialization_in_progress to TRUE, and back - * to FALSE at the end of the function. Must maintain this flag - * to support H5C_get_serialization_in_progress(), which is in - * turn required to support sanity checking in some cache - * clients. - */ - assert(!cache_ptr->serialization_in_progress); - cache_ptr->serialization_in_progress = TRUE; - - /* Serialize each ring, starting from the outermost ring and - * working inward. - */ - ring = H5C_RING_USER; - while (ring < H5C_RING_NTYPES) { - assert(cache_ptr->close_warning_received); - switch (ring) { - case H5C_RING_USER: - break; - - case H5C_RING_RDFSM: - /* Settle raw data FSM */ - if (!cache_ptr->rdfsm_settled) - if (H5MF_settle_raw_data_fsm(f, &cache_ptr->rdfsm_settled) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed") - break; - - case H5C_RING_MDFSM: - /* Settle metadata FSM */ - if (!cache_ptr->mdfsm_settled) - if (H5MF_settle_meta_data_fsm(f, &cache_ptr->mdfsm_settled) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed") - break; - - case H5C_RING_SBE: - case H5C_RING_SB: - break; - - default: - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!") - break; - } /* end switch */ - - if (H5C__serialize_ring(f, ring) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialize ring failed") - - ring++; - } /* end while */ - -#ifndef NDEBUG - /* Verify that no entry has been serialized more than once. - * FD parents with multiple serializations should have been caught - * elsewhere, so no specific check for them here. - */ - { - H5C_cache_entry_t *scan_ptr = NULL; - - scan_ptr = cache_ptr->il_head; - while (scan_ptr != NULL) { - assert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(scan_ptr->serialization_count <= 1); - - scan_ptr = scan_ptr->il_next; - } /* end while */ - } /* end block */ -#endif /* NDEBUG */ - -done: - cache_ptr->serialization_in_progress = FALSE; - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__serialize_cache() */ - -/*------------------------------------------------------------------------- - * Function: H5C__serialize_ring + * Thus shutdown proceeds as follows: * - * Purpose: Serialize the entries contained in the specified cache and - * ring. All entries in rings outside the specified ring - * must have been serialized on entry. + * 1) Test to see if the slist is empty. If it is, proceed + * to step 3. * - * If the cache contains protected entries in the specified - * ring, the function will fail, as protected entries cannot - * be serialized. However all unprotected entries in the - * target ring should be serialized before the function - * returns failure. + * 2) Test to see if the clear_slist parameter is TRUE. * - * If flush dependencies appear in the target ring, the - * function makes repeated passes through the index list - * serializing entries in flush dependency order. + * If it is, remove all entries from the slist. * - * All entries outside the H5C_RING_SBE are marked for - * inclusion in the cache image. Entries in H5C_RING_SBE - * and below are marked for exclusion from the image. + * If it isn't, throw an error. * - * Return: Non-negative on success/Negative on failure or if there was - * a request to flush all items and something was protected. + * 3) set cache_ptr->slist_enabled = FALSE. * - * Programmer: John Mainzer - * 9/11/15 + * Return: SUCCEED on success, and FAIL on failure. * *------------------------------------------------------------------------- */ -static herr_t -H5C__serialize_ring(H5F_t *f, H5C_ring_t ring) +herr_t +H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_slist) { - hbool_t done = FALSE; - H5C_t *cache_ptr; H5C_cache_entry_t *entry_ptr; - herr_t ret_value = SUCCEED; + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_PACKAGE + FUNC_ENTER_NOAPI(FAIL) - /* Sanity checks */ - assert(f); - assert(f->shared); - cache_ptr = f->shared->cache; - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(ring > H5C_RING_UNDEFINED); - assert(ring < H5C_RING_NTYPES); + if (cache_ptr == NULL) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry") - assert(cache_ptr->serialization_in_progress); + if (slist_enabled) { + if (cache_ptr->slist_enabled) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already enabled?") + if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty?") - /* The objective here is to serialize all entries in the cache ring - * in flush dependency order. - * - * The basic algorithm is to scan the cache index list looking for - * unserialized entries that are either not in a flush dependency - * relationship, or which have no unserialized children. Any such - * entry is serialized and its flush dependency parents (if any) are - * informed -- allowing them to decrement their userialized child counts. - * - * However, this algorithm is complicated by the ability - * of client serialization callbacks to perform operations on - * on the cache which can result in the insertion, deletion, - * relocation, resize, dirty, flush, eviction, or removal (via the - * take ownership flag) of entries. Changes in the flush dependency - * structure are also possible. - * - * On the other hand, the algorithm is simplified by the fact that - * we are serializing, not flushing. Thus, as long as all entries - * are serialized correctly, it doesn't matter if we have to go back - * and serialize an entry a second time. - * - * These possible actions result in the following modifications to - * the basic algorithm: - * - * 1) In the event of an entry expunge, eviction or removal, we must - * restart the scan as it is possible that the next entry in our - * scan is no longer in the cache. Were we to examine this entry, - * we would be accessing deallocated memory. - * - * 2) A resize, dirty, or insertion of an entry may result in the - * the increment of a flush dependency parent's dirty and/or - * unserialized child count. In the context of serializing the - * the cache, this is a non-issue, as even if we have already - * serialized the parent, it will be marked dirty and its image - * marked out of date if appropriate when the child is serialized. - * - * However, this is a major issue for a flush, as were this to happen - * in a flush, it would violate the invariant that the flush dependency - * feature is intended to enforce. As the metadata cache has no - * control over the behavior of cache clients, it has no way of - * preventing this behaviour. However, it should detect it if at all - * possible. - * - * Do this by maintaining a count of the number of times each entry is - * serialized during a cache serialization. If any flush dependency - * parent is serialized more than once, throw an assertion failure. - * - * 3) An entry relocation will typically change the location of the - * entry in the index list. This shouldn't cause problems as we - * will scan the index list until we make a complete pass without - * finding anything to serialize -- making relocations of either - * the current or next entries irrelevant. - * - * Note that since a relocation may result in our skipping part of - * the index list, we must always do at least one more pass through - * the index list after an entry relocation. - * - * 4) Changes in the flush dependency structure are possible on - * entry insertion, load, expunge, evict, or remove. Destruction - * of a flush dependency has no effect, as it can only relax the - * flush dependencies. Creation of a flush dependency can create - * an unserialized child of a flush dependency parent where all - * flush dependency children were previously serialized. Should - * this child dirty the flush dependency parent when it is serialized, - * the parent will be re-serialized. - * - * Per the discussion of 2) above, this is a non issue for cache - * serialization, and a major problem for cache flush. Using the - * same detection mechanism, throw an assertion failure if this - * condition appears. - * - * Observe that either eviction or removal of entries as a result of - * a serialization is not a problem as long as the flush dependency - * tree does not change beyond the removal of a leaf. - */ - while (!done) { - /* Reset the counters so that we can detect insertions, loads, - * moves, and flush dependency height changes caused by the pre_serialize - * and serialize callbacks. + /* set cache_ptr->slist_enabled to TRUE so that the slist + * maintenance macros will be enabled. */ - cache_ptr->entries_loaded_counter = 0; - cache_ptr->entries_inserted_counter = 0; - cache_ptr->entries_relocated_counter = 0; + cache_ptr->slist_enabled = TRUE; - done = TRUE; /* set to FALSE if any activity in inner loop */ + /* scan the index list and insert all dirty entries in the slist */ entry_ptr = cache_ptr->il_head; while (entry_ptr != NULL) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - - /* Verify that either the entry is already serialized, or - * that it is assigned to either the target or an inner - * ring. - */ - assert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date)); - - /* Skip flush me last entries or inner ring entries */ - if (!entry_ptr->flush_me_last && entry_ptr->ring == ring) { - - /* if we encounter an unserialized entry in the current - * ring that is not marked flush me last, we are not done. - */ - if (!entry_ptr->image_up_to_date) - done = FALSE; - - /* Serialize the entry if its image is not up to date - * and it has no unserialized flush dependency children. - */ - if (!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) { - assert(entry_ptr->serialization_count == 0); - - /* Serialize the entry */ - if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed") - - assert(entry_ptr->flush_dep_nunser_children == 0); - assert(entry_ptr->serialization_count == 0); - -#ifndef NDEBUG - /* Increment serialization counter (to detect multiple serializations) */ - entry_ptr->serialization_count++; -#endif /* NDEBUG */ - } /* end if */ - } /* end if */ - - /* Check for the cache being perturbed during the entry serialize */ - if ((cache_ptr->entries_loaded_counter > 0) || (cache_ptr->entries_inserted_counter > 0) || - (cache_ptr->entries_relocated_counter > 0)) { + if (entry_ptr->is_dirty) + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL); + entry_ptr = entry_ptr->il_next; + } -#if H5C_COLLECT_CACHE_STATS - H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr); -#endif /* H5C_COLLECT_CACHE_STATS */ + /* we don't maintain a dirty index len, so we can't do a cross + * check against it. Note that there is no point in cross checking + * against the dirty LRU size, as the dirty LRU may not be maintained, + * and in any case, there is no requirement that all dirty entries + * will reside on the dirty LRU. + */ + assert(cache_ptr->dirty_index_size == cache_ptr->slist_size); + } + else { /* take down the skip list */ + if (!cache_ptr->slist_enabled) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already disabled?") - /* Reset the counters */ - cache_ptr->entries_loaded_counter = 0; - cache_ptr->entries_inserted_counter = 0; - cache_ptr->entries_relocated_counter = 0; + if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) { + if (clear_slist) { + H5SL_node_t *node_ptr; - /* Restart scan */ - entry_ptr = cache_ptr->il_head; - } /* end if */ + node_ptr = H5SL_first(cache_ptr->slist_ptr); + while (node_ptr != NULL) { + entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL); + node_ptr = H5SL_first(cache_ptr->slist_ptr); + } + } else - /* Advance to next entry */ - entry_ptr = entry_ptr->il_next; - } /* while ( entry_ptr != NULL ) */ - } /* while ( ! done ) */ - - /* Reset the counters so that we can detect insertions, loads, - * moves, and flush dependency height changes caused by the pre_serialize - * and serialize callbacks. - */ - cache_ptr->entries_loaded_counter = 0; - cache_ptr->entries_inserted_counter = 0; - cache_ptr->entries_relocated_counter = 0; - - /* At this point, all entries not marked "flush me last" and in - * the current ring or outside it should be serialized and have up - * to date images. Scan the index list again to serialize the - * "flush me last" entries (if they are in the current ring) and to - * verify that all other entries have up to date images. - */ - entry_ptr = cache_ptr->il_head; - while (entry_ptr != NULL) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(entry_ptr->ring > H5C_RING_UNDEFINED); - assert(entry_ptr->ring < H5C_RING_NTYPES); - assert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date)); - - if (entry_ptr->ring == ring) { - if (entry_ptr->flush_me_last) { - if (!entry_ptr->image_up_to_date) { - assert(entry_ptr->serialization_count == 0); - assert(entry_ptr->flush_dep_nunser_children == 0); - - /* Serialize the entry */ - if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed") - - /* Check for the cache changing */ - if ((cache_ptr->entries_loaded_counter > 0) || - (cache_ptr->entries_inserted_counter > 0) || - (cache_ptr->entries_relocated_counter > 0)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, - "flush_me_last entry serialization triggered restart") - - assert(entry_ptr->flush_dep_nunser_children == 0); - assert(entry_ptr->serialization_count == 0); -#ifndef NDEBUG - /* Increment serialization counter (to detect multiple serializations) */ - entry_ptr->serialization_count++; -#endif /* NDEBUG */ - } /* end if */ - } /* end if */ - else { - assert(entry_ptr->image_up_to_date); - assert(entry_ptr->serialization_count <= 1); - assert(entry_ptr->flush_dep_nunser_children == 0); - } /* end else */ - } /* if ( entry_ptr->ring == ring ) */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty?") + } + + cache_ptr->slist_enabled = FALSE; - entry_ptr = entry_ptr->il_next; - } /* while ( entry_ptr != NULL ) */ + assert(0 == cache_ptr->slist_len); + assert(0 == cache_ptr->slist_size); + } done: - assert(cache_ptr->serialization_in_progress); FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__serialize_ring() */ +} /* H5C_set_slist_enabled() */ /*------------------------------------------------------------------------- - * Function: H5C__serialize_single_entry + * Function: H5C_unsettle_ring() + * + * Purpose: Advise the metadata cache that the specified free space + * manager ring is no longer settled (if it was on entry). + * + * If the target free space manager ring is already + * unsettled, do nothing, and return SUCCEED. + * + * If the target free space manager ring is settled, and + * we are not in the process of a file shutdown, mark + * the ring as unsettled, and return SUCCEED. * - * Purpose: Serialize the cache entry pointed to by the entry_ptr - * parameter. + * If the target free space manager is settled, and we + * are in the process of a file shutdown, post an error + * message, and return FAIL. * * Return: Non-negative on success/Negative on failure * - * Programmer: John Mainzer, 7/24/15 - * *------------------------------------------------------------------------- */ -static herr_t -H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) +herr_t +H5C_unsettle_ring(H5F_t *f, H5C_ring_t ring) { + H5C_t *cache_ptr; herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_PACKAGE + FUNC_ENTER_NOAPI(FAIL) /* Sanity checks */ assert(f); - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(entry_ptr); - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(!entry_ptr->prefetched); - assert(!entry_ptr->image_up_to_date); - assert(entry_ptr->is_dirty); - assert(!entry_ptr->is_protected); - assert(!entry_ptr->flush_in_progress); - assert(entry_ptr->type); - - /* Set entry_ptr->flush_in_progress to TRUE so the target entry - * will not be evicted out from under us. Must set it back to FALSE - * when we are done. - */ - entry_ptr->flush_in_progress = TRUE; - - /* Allocate buffer for the entry image if required. */ - if (NULL == entry_ptr->image_ptr) { - assert(entry_ptr->size > 0); - if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer") -#if H5C_DO_MEMORY_SANITY_CHECKS - H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE, - H5C_IMAGE_EXTRA_SPACE); -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ - } /* end if */ + assert(f->shared); + assert(f->shared->cache); + assert((H5C_RING_RDFSM == ring) || (H5C_RING_MDFSM == ring)); + cache_ptr = f->shared->cache; + + switch (ring) { + case H5C_RING_RDFSM: + if (cache_ptr->rdfsm_settled) { + if (cache_ptr->close_warning_received) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle") + cache_ptr->rdfsm_settled = FALSE; + } /* end if */ + break; - /* Generate image for entry */ - if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry") + case H5C_RING_MDFSM: + if (cache_ptr->mdfsm_settled) { + if (cache_ptr->close_warning_received) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle") + cache_ptr->mdfsm_settled = FALSE; + } /* end if */ + break; - /* Reset the flush_in progress flag */ - entry_ptr->flush_in_progress = FALSE; + default: + assert(FALSE); /* this should be un-reachable */ + break; + } /* end switch */ done: - assert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress)); - assert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date)); FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__serialize_single_entry() */ +} /* H5C_unsettle_ring() */ /*------------------------------------------------------------------------- - * Function: H5C__generate_image - * - * Purpose: Serialize an entry and generate its image. + * Function: H5C_validate_resize_config() * - * Note: This may cause the entry to be re-sized and/or moved in - * the cache. + * Purpose: Run a sanity check on the specified sections of the + * provided instance of struct H5C_auto_size_ctl_t. * - * As we will not update the metadata cache's data structures - * until we we finish the write, we must touch up these - * data structures for size and location changes even if we - * are about to delete the entry from the cache (i.e. on a - * flush destroy). + * Do nothing and return SUCCEED if no errors are detected, + * and flag an error and return FAIL otherwise. * * Return: Non-negative on success/Negative on failure * - * Programmer: Mohamad Chaarawi - * 2/10/16 - * *------------------------------------------------------------------------- */ -static herr_t -H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) +herr_t +H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr, unsigned int tests) { - haddr_t new_addr = HADDR_UNDEF; - haddr_t old_addr = HADDR_UNDEF; - size_t new_len = 0; - unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET; - herr_t ret_value = SUCCEED; + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_PACKAGE + FUNC_ENTER_NOAPI(FAIL) - /* Sanity check */ - assert(f); - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(entry_ptr); - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(!entry_ptr->image_up_to_date); - assert(entry_ptr->is_dirty); - assert(!entry_ptr->is_protected); - assert(entry_ptr->type); - - /* make note of the entry's current address */ - old_addr = entry_ptr->addr; - - /* Call client's pre-serialize callback, if there's one */ - if ((entry_ptr->type->pre_serialize) && - ((entry_ptr->type->pre_serialize)(f, (void *)entry_ptr, entry_ptr->addr, entry_ptr->size, &new_addr, - &new_len, &serialize_flags) < 0)) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry") - - /* Check for any flags set in the pre-serialize callback */ - if (serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) { - /* Check for unexpected flags from serialize callback */ - if (serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG | H5C__SERIALIZE_MOVED_FLAG)) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)") + if (config_ptr == NULL) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry") -#ifdef H5_HAVE_PARALLEL - /* In the parallel case, resizes and moves in - * the serialize operation can cause problems. - * If they occur, scream and die. - * - * At present, in the parallel case, the aux_ptr - * will only be set if there is more than one - * process. Thus we can use this to detect - * the parallel case. - * - * This works for now, but if we start using the - * aux_ptr for other purposes, we will have to - * change this test accordingly. - * - * NB: While this test detects entryies that attempt - * to resize or move themselves during a flush - * in the parallel case, it will not detect an - * entry that dirties, resizes, and/or moves - * other entries during its flush. - */ - if (cache_ptr->aux_ptr != NULL) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occurred in parallel case") -#endif + if (config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version") - /* If required, resize the buffer and update the entry and the cache - * data structures - */ - if (serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) { - /* Sanity check */ - assert(new_len > 0); - - /* Allocate a new image buffer */ - if (NULL == - (entry_ptr->image_ptr = H5MM_realloc(entry_ptr->image_ptr, new_len + H5C_IMAGE_EXTRA_SPACE))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, - "memory allocation failed for on disk image buffer") - -#if H5C_DO_MEMORY_SANITY_CHECKS - H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + new_len, H5C_IMAGE_SANITY_VALUE, - H5C_IMAGE_EXTRA_SPACE); -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ - - /* Update statistics for resizing the entry */ - H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_len); - - /* Update the hash table for the size change */ - H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len, entry_ptr, - !entry_ptr->is_dirty, FAIL); - - /* The entry can't be protected since we are in the process of - * flushing it. Thus we must update the replacement policy data - * structures for the size change. The macro deals with the pinned - * case. - */ - H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len, FAIL); + if ((tests & H5C_RESIZE_CFG__VALIDATE_GENERAL) != 0) { + if (config_ptr->max_size > H5C__MAX_MAX_CACHE_SIZE) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "max_size too big") + if (config_ptr->min_size < H5C__MIN_MAX_CACHE_SIZE) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size too small") + if (config_ptr->min_size > config_ptr->max_size) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size > max_size") + if (config_ptr->set_initial_size && ((config_ptr->initial_size < config_ptr->min_size) || + (config_ptr->initial_size > config_ptr->max_size))) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "initial_size must be in the interval [min_size, max_size]") + if ((config_ptr->min_clean_fraction < 0.0) || (config_ptr->min_clean_fraction > 1.0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_clean_fraction must be in the interval [0.0, 1.0]") + if (config_ptr->epoch_length < H5C__MIN_AR_EPOCH_LENGTH) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too small") + if (config_ptr->epoch_length > H5C__MAX_AR_EPOCH_LENGTH) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too big") + } /* H5C_RESIZE_CFG__VALIDATE_GENERAL */ + + if ((tests & H5C_RESIZE_CFG__VALIDATE_INCREMENT) != 0) { + if ((config_ptr->incr_mode != H5C_incr__off) && (config_ptr->incr_mode != H5C_incr__threshold)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid incr_mode") + + if (config_ptr->incr_mode == H5C_incr__threshold) { + if ((config_ptr->lower_hr_threshold < 0.0) || (config_ptr->lower_hr_threshold > 1.0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "lower_hr_threshold must be in the range [0.0, 1.0]") + if (config_ptr->increment < 1.0) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "increment must be greater than or equal to 1.0") - /* As we haven't updated the cache data structures for - * for the flush or flush destroy yet, the entry should - * be in the slist if the slist is enabled. Since - * H5C__UPDATE_SLIST_FOR_SIZE_CHANGE() is a no-op if the - * slist is enabled, call it un-conditionally. + /* no need to check max_increment, as it is a size_t, + * and thus must be non-negative. */ - assert(entry_ptr->is_dirty); - assert((entry_ptr->in_slist) || (!cache_ptr->slist_enabled)); + } /* H5C_incr__threshold */ - H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len); + switch (config_ptr->flash_incr_mode) { + case H5C_flash_incr__off: + /* nothing to do here */ + break; - /* Finally, update the entry for its new size */ - entry_ptr->size = new_len; - } /* end if */ + case H5C_flash_incr__add_space: + if ((config_ptr->flash_multiple < 0.1) || (config_ptr->flash_multiple > 10.0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "flash_multiple must be in the range [0.1, 10.0]") + if ((config_ptr->flash_threshold < 0.1) || (config_ptr->flash_threshold > 1.0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "flash_threshold must be in the range [0.1, 1.0]") + break; - /* If required, udate the entry and the cache data structures - * for a move - */ - if (serialize_flags & H5C__SERIALIZE_MOVED_FLAG) { - /* Update stats and entries relocated counter */ - H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) + default: + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid flash_incr_mode") + break; + } /* end switch */ + } /* H5C_RESIZE_CFG__VALIDATE_INCREMENT */ - /* We must update cache data structures for the change in address */ - if (entry_ptr->addr == old_addr) { - /* Delete the entry from the hash table and the slist */ - H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL); - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE); + if ((tests & H5C_RESIZE_CFG__VALIDATE_DECREMENT) != 0) { + if ((config_ptr->decr_mode != H5C_decr__off) && (config_ptr->decr_mode != H5C_decr__threshold) && + (config_ptr->decr_mode != H5C_decr__age_out) && + (config_ptr->decr_mode != H5C_decr__age_out_with_threshold)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid decr_mode") - /* Update the entry for its new address */ - entry_ptr->addr = new_addr; + if (config_ptr->decr_mode == H5C_decr__threshold) { + if (config_ptr->upper_hr_threshold > 1.0) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be <= 1.0") + if ((config_ptr->decrement > 1.0) || (config_ptr->decrement < 0.0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "decrement must be in the interval [0.0, 1.0]") - /* And then reinsert in the index and slist */ - H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL); - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL); - } /* end if */ - else /* move is already done for us -- just do sanity checks */ - assert(entry_ptr->addr == new_addr); - } /* end if */ - } /* end if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) */ - - /* Serialize object into buffer */ - if (entry_ptr->type->serialize(f, entry_ptr->image_ptr, entry_ptr->size, (void *)entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry") - -#if H5C_DO_MEMORY_SANITY_CHECKS - assert(0 == memcmp(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, - H5C_IMAGE_EXTRA_SPACE)); -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ - - entry_ptr->image_up_to_date = TRUE; - - /* Propagate the fact that the entry is serialized up the - * flush dependency chain if appropriate. Since the image must - * have been out of date for this function to have been called - * (see assertion on entry), no need to check that -- only check - * for flush dependency parents. - */ - assert(entry_ptr->flush_dep_nunser_children == 0); + /* no need to check max_decrement as it is a size_t + * and thus must be non-negative. + */ + } /* H5C_decr__threshold */ + + if ((config_ptr->decr_mode == H5C_decr__age_out) || + (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) { + if (config_ptr->epochs_before_eviction < 1) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction must be positive") + if (config_ptr->epochs_before_eviction > H5C__MAX_EPOCH_MARKERS) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction too big") + if (config_ptr->apply_empty_reserve && + (config_ptr->empty_reserve > 1.0 || config_ptr->empty_reserve < 0.0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "empty_reserve must be in the interval [0.0, 1.0]") + + /* no need to check max_decrement as it is a size_t + * and thus must be non-negative. + */ + } /* H5C_decr__age_out || H5C_decr__age_out_with_threshold */ + + if (config_ptr->decr_mode == H5C_decr__age_out_with_threshold) + if ((config_ptr->upper_hr_threshold > 1.0) || (config_ptr->upper_hr_threshold < 0.0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "upper_hr_threshold must be in the interval [0.0, 1.0]") + } /* H5C_RESIZE_CFG__VALIDATE_DECREMENT */ - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_serialized(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents") + if ((tests & H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) != 0) { + if ((config_ptr->incr_mode == H5C_incr__threshold) && + ((config_ptr->decr_mode == H5C_decr__threshold) || + (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) && + (config_ptr->lower_hr_threshold >= config_ptr->upper_hr_threshold)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "conflicting threshold fields in config") + } /* H5C_RESIZE_CFG__VALIDATE_INTERACTIONS */ done: FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__generate_image */ +} /* H5C_validate_resize_config() */ /*------------------------------------------------------------------------- + * Function: H5C_cork * - * Function: H5C_remove_entry - * - * Purpose: Remove an entry from the cache. Must be not protected, pinned, - * dirty, involved in flush dependencies, etc. - * - * Return: Non-negative on success/Negative on failure + * Purpose: To cork/uncork/get cork status of an object depending on "action": + * H5C__SET_CORK: + * To cork the object + * Return error if the object is already corked + * H5C__UNCORK: + * To uncork the object + * Return error if the object is not corked + * H5C__GET_CORKED: + * To retrieve the cork status of an object in + * the parameter "corked" * - * Programmer: Quincey Koziol - * September 17, 2016 + * Return: Success: Non-negative + * Failure: Negative * *------------------------------------------------------------------------- */ herr_t -H5C_remove_entry(void *_entry) +H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked) { - H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry to remove */ - H5C_t *cache; /* Cache for file */ - herr_t ret_value = SUCCEED; /* Return value */ + H5C_tag_info_t *tag_info = NULL; + herr_t ret_value = SUCCEED; - FUNC_ENTER_NOAPI(FAIL) + FUNC_ENTER_NOAPI_NOINIT - /* Sanity checks */ - assert(entry); - assert(entry->ring != H5C_RING_UNDEFINED); - cache = entry->cache_ptr; - assert(cache); - assert(cache->magic == H5C__H5C_T_MAGIC); - - /* Check for error conditions */ - if (entry->is_dirty) - HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove dirty entry from cache") - if (entry->is_protected) - HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove protected entry from cache") - if (entry->is_pinned) - HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove pinned entry from cache") - /* NOTE: If these two errors are getting tripped because the entry is - * in a flush dependency with a freedspace entry, move the checks - * after the "before evict" message is sent, and add the - * "child being evicted" message to the "before evict" notify - * section below. QAK - 2017/08/03 - */ - if (entry->flush_dep_nparents > 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, - "can't remove entry with flush dependency parents from cache") - if (entry->flush_dep_nchildren > 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, - "can't remove entry with flush dependency children from cache") - - /* Additional internal cache consistency checks */ - assert(!entry->in_slist); - assert(!entry->flush_marker); - assert(!entry->flush_in_progress); - - /* Note that the algorithm below is (very) similar to the set of operations - * in H5C__flush_single_entry() and should be kept in sync with changes - * to that code. - QAK, 2016/11/30 - */ + /* Assertions */ + assert(cache_ptr != NULL); + assert(H5_addr_defined(obj_addr)); + assert(action == H5C__SET_CORK || action == H5C__UNCORK || action == H5C__GET_CORKED); - /* Update stats, as if we are "destroying" and taking ownership of the entry */ - H5C__UPDATE_STATS_FOR_EVICTION(cache, entry, TRUE) + /* Search the list of corked object addresses in the cache */ + HASH_FIND(hh, cache_ptr->tag_list, &obj_addr, sizeof(haddr_t), tag_info); - /* If the entry's type has a 'notify' callback, send a 'before eviction' - * notice while the entry is still fully integrated in the cache. - */ - if (entry->type->notify && (entry->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict") - - /* Update the cache internal data structures as appropriate for a destroy. - * Specifically: - * 1) Delete it from the index - * 2) Delete it from the collective read access list - * 3) Update the replacement policy for eviction - * 4) Remove it from the tag list for this object - */ + if (H5C__GET_CORKED == action) { + assert(corked); + if (tag_info != NULL && tag_info->corked) + *corked = TRUE; + else + *corked = FALSE; + } + else { + /* Sanity check */ + assert(H5C__SET_CORK == action || H5C__UNCORK == action); - H5C__DELETE_FROM_INDEX(cache, entry, FAIL) + /* Perform appropriate action */ + if (H5C__SET_CORK == action) { + /* Check if this is the first entry for this tagged object */ + if (NULL == tag_info) { + /* Allocate new tag info struct */ + if (NULL == (tag_info = H5FL_CALLOC(H5C_tag_info_t))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "can't allocate tag info for cache entry") -#ifdef H5_HAVE_PARALLEL - /* Check for collective read access flag */ - if (entry->coll_access) { - entry->coll_access = FALSE; - H5C__REMOVE_FROM_COLL_LIST(cache, entry, FAIL) - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ + /* Set the tag for all entries */ + tag_info->tag = obj_addr; - H5C__UPDATE_RP_FOR_EVICTION(cache, entry, FAIL) + /* Insert tag info into hash table */ + HASH_ADD(hh, cache_ptr->tag_list, tag, sizeof(haddr_t), tag_info); + } + else { + /* Check for object already corked */ + if (tag_info->corked) + HGOTO_ERROR(H5E_CACHE, H5E_CANTCORK, FAIL, "object already corked") + assert(tag_info->entry_cnt > 0 && tag_info->head); + } - /* Remove entry from tag list */ - if (H5C__untag_entry(cache, entry) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list") + /* Set the corked status for the entire object */ + tag_info->corked = TRUE; + cache_ptr->num_objs_corked++; + } + else { + /* Sanity check */ + if (NULL == tag_info) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNCORK, FAIL, "tag info pointer is NULL") - /* Increment entries_removed_counter and set last_entry_removed_ptr. - * As we me be about to free the entry, recall that last_entry_removed_ptr - * must NEVER be dereferenced. - * - * Recall that these fields are maintained to allow functions that perform - * scans of lists of entries to detect the unexpected removal of entries - * (via expunge, eviction, or take ownership at present), so that they can - * re-start their scans if necessary. - * - * Also check if the entry we are watching for removal is being - * removed (usually the 'next' entry for an iteration) and reset - * it to indicate that it was removed. - */ - cache->entries_removed_counter++; - cache->last_entry_removed_ptr = entry; - if (entry == cache->entry_watched_for_removal) - cache->entry_watched_for_removal = NULL; + /* Check for already uncorked */ + if (!tag_info->corked) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNCORK, FAIL, "object already uncorked") - /* Internal cache data structures should now be up to date, and - * consistent with the status of the entry. - * - * Now clean up internal cache fields if appropriate. - */ + /* Set the corked status for the entire object */ + tag_info->corked = FALSE; + cache_ptr->num_objs_corked--; - /* Free the buffer for the on disk image */ - if (entry->image_ptr != NULL) - entry->image_ptr = H5MM_xfree(entry->image_ptr); + /* Remove the tag info from the tag list, if there's no more entries with this tag */ + if (0 == tag_info->entry_cnt) { + /* Sanity check */ + assert(NULL == tag_info->head); - /* Reset the pointer to the cache the entry is within */ - entry->cache_ptr = NULL; + HASH_DELETE(hh, cache_ptr->tag_list, tag_info); - /* Client is taking ownership of the entry. Set bad magic here so the - * cache will choke unless the entry is re-inserted properly - */ - entry->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC; + /* Release the tag info */ + tag_info = H5FL_FREE(H5C_tag_info_t, tag_info); + } + else + assert(NULL != tag_info->head); + } + } done: FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__remove_entry() */ +} /* H5C_cork() */ diff --git a/src/H5CS.c b/src/H5CS.c index bb2a1ea..66ac0d7 100644 --- a/src/H5CS.c +++ b/src/H5CS.c @@ -213,7 +213,7 @@ H5CS_pop(void) /* Pop the function. */ fstack->nused--; - FUNC_LEAVE_NOAPI_NOFS(SUCCEED); + FUNC_LEAVE_NOAPI_NOFS(SUCCEED) } /* end H5CS_pop() */ /*------------------------------------------------------------------------- diff --git a/src/H5CX.c b/src/H5CX.c index b2ddbc1..d2bd1ba 100644 --- a/src/H5CX.c +++ b/src/H5CX.c @@ -972,7 +972,7 @@ done: if (*api_state) { /* Release the (possibly partially allocated) API state struct */ if (H5CX_free_state(*api_state) < 0) - HDONE_ERROR(H5E_CONTEXT, H5E_CANTRELEASE, FAIL, "unable to release API state") + HDONE_ERROR(H5E_CONTEXT, H5E_CANTRELEASE, FAIL, "unable to release API state"); *api_state = NULL; } /* end if */ } /* end if */ @@ -1095,7 +1095,7 @@ H5CX_free_state(H5CX_state_t *api_state) HGOTO_ERROR(H5E_CONTEXT, H5E_CANTRELEASE, FAIL, "unable to release VOL connector info object") /* Decrement connector ID */ if (H5I_dec_ref(api_state->vol_connector_prop.connector_id) < 0) - HDONE_ERROR(H5E_CONTEXT, H5E_CANTDEC, FAIL, "can't close VOL connector ID") + HDONE_ERROR(H5E_CONTEXT, H5E_CANTDEC, FAIL, "can't close VOL connector ID"); } /* end if */ /* Free the state */ diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c index 0411e8e..19d31f1 100644 --- a/src/H5Cdbg.c +++ b/src/H5Cdbg.c @@ -13,8 +13,6 @@ /*------------------------------------------------------------------------- * * Created: H5Cdbg.c - * July 8 2016 - * Quincey Koziol * * Purpose: Debugging Routines for the generic cache structure or entries. * @@ -69,9 +67,6 @@ * * Return: Non-negative on success/Negative on failure * - * Programmer: John Mainzer - * 10/10/10 - * *------------------------------------------------------------------------- */ herr_t @@ -86,7 +81,6 @@ H5C_dump_cache(H5C_t *cache_ptr, const char *cache_name) /* Sanity check */ assert(cache_ptr != NULL); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(cache_name != NULL); /* First, create a skip list */ @@ -101,7 +95,6 @@ H5C_dump_cache(H5C_t *cache_ptr, const char *cache_name) entry_ptr = cache_ptr->index[i]; while (entry_ptr != NULL) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); if (H5SL_insert(slist_ptr, entry_ptr, &(entry_ptr->addr)) < 0) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "can't insert entry in skip list") @@ -131,8 +124,6 @@ H5C_dump_cache(H5C_t *cache_ptr, const char *cache_name) i = 0; entry_ptr = (H5C_cache_entry_t *)H5SL_remove_first(slist_ptr); while (entry_ptr != NULL) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - /* Print entry */ fprintf(stdout, "%s%5d ", cache_ptr->prefix, i); fprintf(stdout, " 0x%16llx ", (long long)(entry_ptr->addr)); @@ -178,9 +169,6 @@ done: * * Return: Non-negative on success/Negative on failure * - * Programmer: John Mainzer - * 10/10/10 - * *------------------------------------------------------------------------- */ herr_t @@ -193,7 +181,6 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name) /* Sanity check */ assert(cache_ptr != NULL); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(cache_name != NULL); fprintf(stdout, "\n\nDump of metadata cache LRU \"%s\"\n", cache_name); @@ -218,8 +205,6 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name) entry_ptr = cache_ptr->LRU_head_ptr; while (entry_ptr != NULL) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - /* Print entry */ fprintf(stdout, "%s%5d ", cache_ptr->prefix, i); fprintf(stdout, " 0x%16llx ", (long long)(entry_ptr->addr)); @@ -247,7 +232,6 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name) #endif /* NDEBUG */ /*------------------------------------------------------------------------- - * * Function: H5C_dump_cache_skip_list * * Purpose: Debugging routine that prints a summary of the contents of @@ -256,9 +240,6 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name) * * Return: Non-negative on success/Negative on failure * - * Programmer: John Mainzer - * 11/15/14 - * *------------------------------------------------------------------------- */ #ifndef NDEBUG @@ -273,7 +254,6 @@ H5C_dump_cache_skip_list(H5C_t *cache_ptr, char *calling_fcn) FUNC_ENTER_NOAPI_NOERR assert(cache_ptr != NULL); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(calling_fcn != NULL); fprintf(stdout, "\n\nDumping metadata cache skip list from %s.\n", calling_fcn); @@ -297,7 +277,6 @@ H5C_dump_cache_skip_list(H5C_t *cache_ptr, char *calling_fcn) entry_ptr = NULL; while (entry_ptr != NULL) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); fprintf(stdout, "%s%d 0x%016llx %4lld %d/%d %d %s\n", cache_ptr->prefix, i, (long long)(entry_ptr->addr), (long long)(entry_ptr->size), (int)(entry_ptr->is_protected), (int)(entry_ptr->is_pinned), (int)(entry_ptr->is_dirty), @@ -329,9 +308,6 @@ H5C_dump_cache_skip_list(H5C_t *cache_ptr, char *calling_fcn) * * Return: Non-negative on success/Negative on failure * - * Programmer: John Mainzer - * 1/20/06 - * *------------------------------------------------------------------------- */ herr_t @@ -341,8 +317,7 @@ H5C_set_prefix(H5C_t *cache_ptr, char *prefix) FUNC_ENTER_NOAPI(FAIL) - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC) || (prefix == NULL) || - (HDstrlen(prefix) >= H5C__PREFIX_LEN)) + if (cache_ptr == NULL || prefix == NULL || HDstrlen(prefix) >= H5C__PREFIX_LEN) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad param(s) on entry") HDstrncpy(&(cache_ptr->prefix[0]), prefix, (size_t)(H5C__PREFIX_LEN)); @@ -360,9 +335,6 @@ done: * * Return: Non-negative on success/Negative on failure * - * Programmer: John Mainzer - * 6/2/04 - * *------------------------------------------------------------------------- */ herr_t @@ -416,12 +388,7 @@ H5C_stats(H5C_t *cache_ptr, const char *cache_name, FUNC_ENTER_NOAPI(FAIL) - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - /* This would normally be an assert, but we need to use an HGOTO_ERROR - * call to shut up the compiler. - */ - if ((NULL == cache_ptr) || (cache_ptr->magic != H5C__H5C_T_MAGIC) || (NULL == cache_name)) + if (NULL == cache_ptr || NULL == cache_name) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr or cache_name") #if H5C_COLLECT_CACHE_STATS @@ -694,15 +661,12 @@ done: } /* H5C_stats() */ /*------------------------------------------------------------------------- - * * Function: H5C_stats__reset * * Purpose: Reset the stats fields to their initial values. * * Return: void * - * Programmer: John Mainzer, 4/28/04 - * *------------------------------------------------------------------------- */ void @@ -710,7 +674,7 @@ void H5C_stats__reset(H5C_t *cache_ptr) #else /* NDEBUG */ #if H5C_COLLECT_CACHE_STATS -H5C_stats__reset(H5C_t *cache_ptr) +H5C_stats__reset(H5C_t *cache_ptr) #else /* H5C_COLLECT_CACHE_STATS */ H5C_stats__reset(H5C_t H5_ATTR_UNUSED *cache_ptr) #endif /* H5C_COLLECT_CACHE_STATS */ @@ -721,7 +685,6 @@ H5C_stats__reset(H5C_t H5_ATTR_UNUSED *cache_ptr) #endif /* H5C_COLLECT_CACHE_STATS */ assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); #if H5C_COLLECT_CACHE_STATS for (i = 0; i <= cache_ptr->max_type_id; i++) { @@ -827,9 +790,6 @@ H5C_stats__reset(H5C_t H5_ATTR_UNUSED *cache_ptr) * Return: SUCCEED on success/FAIL on failure. Note that * *fd_exists_ptr is undefined on failure. * - * Programmer: John Mainzer - * 9/28/16 - * *------------------------------------------------------------------------- */ #ifndef NDEBUG @@ -845,18 +805,14 @@ H5C_flush_dependency_exists(H5C_t *cache_ptr, haddr_t parent_addr, haddr_t child /* Sanity checks */ assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(H5_addr_defined(parent_addr)); assert(H5_addr_defined(child_addr)); assert(fd_exists_ptr); - H5C__SEARCH_INDEX(cache_ptr, parent_addr, parent_ptr, FAIL) - H5C__SEARCH_INDEX(cache_ptr, child_addr, child_ptr, FAIL) + H5C__SEARCH_INDEX(cache_ptr, parent_addr, parent_ptr, FAIL); + H5C__SEARCH_INDEX(cache_ptr, child_addr, child_ptr, FAIL); if (parent_ptr && child_ptr) { - assert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(child_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - if (child_ptr->flush_dep_nparents > 0) { unsigned u; /* Local index variable */ @@ -881,7 +837,6 @@ done: #endif /* NDEBUG */ /*------------------------------------------------------------------------- - * * Function: H5C_validate_index_list * * Purpose: Debugging function that scans the index list for errors. @@ -892,8 +847,6 @@ done: * * Return: FAIL if error is detected, SUCCEED otherwise. * - * Programmer: John Mainzer, 9/16/16 - * *------------------------------------------------------------------------- */ #ifndef NDEBUG @@ -916,7 +869,6 @@ H5C_validate_index_list(H5C_t *cache_ptr) /* Sanity checks */ assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); for (i = 0; i < H5C_RING_NTYPES; i++) { index_ring_len[i] = 0; @@ -997,7 +949,6 @@ done: #endif /* NDEBUG */ /*------------------------------------------------------------------------- - * * Function: H5C_get_entry_ptr_from_addr() * * Purpose: Debugging function that attempts to look up an entry in the @@ -1028,8 +979,6 @@ done: * * Return: FAIL if error is detected, SUCCEED otherwise. * - * Programmer: John Mainzer, 5/30/14 - * *------------------------------------------------------------------------- */ #ifndef NDEBUG @@ -1043,11 +992,10 @@ H5C_get_entry_ptr_from_addr(H5C_t *cache_ptr, haddr_t addr, void **entry_ptr_ptr /* Sanity checks */ assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(H5_addr_defined(addr)); assert(entry_ptr_ptr); - H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL) + H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL); if (entry_ptr == NULL) /* the entry doesn't exist in the cache -- report this @@ -1074,9 +1022,6 @@ done: * * Return: Current value of cache_ptr->serialization_in_progress. * - * Programmer: John Mainzer - * 8/24/15 - * *------------------------------------------------------------------------- */ #ifndef NDEBUG @@ -1087,14 +1032,12 @@ H5C_get_serialization_in_progress(const H5C_t *cache_ptr) /* Sanity check */ assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); FUNC_LEAVE_NOAPI(cache_ptr->serialization_in_progress) } /* H5C_get_serialization_in_progress() */ #endif /* NDEBUG */ /*------------------------------------------------------------------------- - * * Function: H5C_cache_is_clean() * * Purpose: Debugging function that verifies that all rings in the @@ -1106,8 +1049,6 @@ H5C_get_serialization_in_progress(const H5C_t *cache_ptr) * * Return: TRUE if the indicated ring(s) are clean, and FALSE otherwise. * - * Programmer: John Mainzer, 6/18/16 - * *------------------------------------------------------------------------- */ #ifndef NDEBUG @@ -1121,13 +1062,12 @@ H5C_cache_is_clean(const H5C_t *cache_ptr, H5C_ring_t inner_ring) /* Sanity checks */ assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(inner_ring >= H5C_RING_USER); assert(inner_ring <= H5C_RING_SB); while (ring <= inner_ring) { if (cache_ptr->dirty_index_ring_size[ring] > 0) - HGOTO_DONE(FALSE) + HGOTO_DONE(FALSE); ring++; } /* end while */ @@ -1138,7 +1078,6 @@ done: #endif /* NDEBUG */ /*------------------------------------------------------------------------- - * * Function: H5C_verify_entry_type() * * Purpose: Debugging function that attempts to look up an entry in the @@ -1158,8 +1097,6 @@ done: * * Return: FAIL if error is detected, SUCCEED otherwise. * - * Programmer: John Mainzer, 5/30/14 - * *------------------------------------------------------------------------- */ #ifndef NDEBUG @@ -1174,13 +1111,12 @@ H5C_verify_entry_type(H5C_t *cache_ptr, haddr_t addr, const H5C_class_t *expecte /* Sanity checks */ assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(H5_addr_defined(addr)); assert(expected_type); assert(in_cache_ptr); assert(type_ok_ptr); - H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL) + H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL); if (entry_ptr == NULL) /* the entry doesn't exist in the cache -- report this @@ -1200,3 +1136,457 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C_verify_entry_type() */ #endif /* NDEBUG */ + +/*------------------------------------------------------------------------- + * Function: H5C_def_auto_resize_rpt_fcn + * + * Purpose: Print results of a automatic cache resize. + * + * This function should only be used where printf() behaves + * well -- i.e. not on Windows. + * + * Return: void + * + *------------------------------------------------------------------------- + */ +void +H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, +#ifndef NDEBUG + int32_t version, +#else + int32_t H5_ATTR_UNUSED version, +#endif + double hit_rate, enum H5C_resize_status status, size_t old_max_cache_size, + size_t new_max_cache_size, size_t old_min_clean_size, size_t new_min_clean_size) +{ + assert(cache_ptr != NULL); + assert(version == H5C__CURR_AUTO_RESIZE_RPT_FCN_VER); + + switch (status) { + case in_spec: + fprintf(stdout, "%sAuto cache resize -- no change. (hit rate = %lf)\n", cache_ptr->prefix, + hit_rate); + break; + + case increase: + assert(hit_rate < cache_ptr->resize_ctl.lower_hr_threshold); + assert(old_max_cache_size < new_max_cache_size); + + fprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n", + cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold); + fprintf(stdout, "%scache size increased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix, + old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size); + break; + + case flash_increase: + assert(old_max_cache_size < new_max_cache_size); + + fprintf(stdout, "%sflash cache resize(%d) -- size threshold = %zu.\n", cache_ptr->prefix, + (int)(cache_ptr->resize_ctl.flash_incr_mode), cache_ptr->flash_size_increase_threshold); + fprintf(stdout, "%s cache size increased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix, + old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size); + break; + + case decrease: + assert(old_max_cache_size > new_max_cache_size); + + switch (cache_ptr->resize_ctl.decr_mode) { + case H5C_decr__off: + fprintf(stdout, "%sAuto cache resize -- decrease off. HR = %lf\n", cache_ptr->prefix, + hit_rate); + break; + + case H5C_decr__threshold: + assert(hit_rate > cache_ptr->resize_ctl.upper_hr_threshold); + + fprintf(stdout, "%sAuto cache resize -- decrease by threshold. HR = %lf > %6.5lf\n", + cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.upper_hr_threshold); + fprintf(stdout, "%sout of bounds high (%6.5lf).\n", cache_ptr->prefix, + cache_ptr->resize_ctl.upper_hr_threshold); + break; + + case H5C_decr__age_out: + fprintf(stdout, "%sAuto cache resize -- decrease by ageout. HR = %lf\n", + cache_ptr->prefix, hit_rate); + break; + + case H5C_decr__age_out_with_threshold: + assert(hit_rate > cache_ptr->resize_ctl.upper_hr_threshold); + + fprintf(stdout, + "%sAuto cache resize -- decrease by ageout with threshold. HR = %lf > %6.5lf\n", + cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.upper_hr_threshold); + break; + + default: + fprintf(stdout, "%sAuto cache resize -- decrease by unknown mode. HR = %lf\n", + cache_ptr->prefix, hit_rate); + } + + fprintf(stdout, "%s cache size decreased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix, + old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size); + break; + + case at_max_size: + fprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n", + cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold); + fprintf(stdout, "%s cache already at maximum size so no change.\n", cache_ptr->prefix); + break; + + case at_min_size: + fprintf(stdout, "%sAuto cache resize -- hit rate (%lf) -- can't decrease.\n", cache_ptr->prefix, + hit_rate); + fprintf(stdout, "%s cache already at minimum size.\n", cache_ptr->prefix); + break; + + case increase_disabled: + fprintf(stdout, "%sAuto cache resize -- increase disabled -- HR = %lf.", cache_ptr->prefix, + hit_rate); + break; + + case decrease_disabled: + fprintf(stdout, "%sAuto cache resize -- decrease disabled -- HR = %lf.\n", cache_ptr->prefix, + hit_rate); + break; + + case not_full: + assert(hit_rate < cache_ptr->resize_ctl.lower_hr_threshold); + + fprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n", + cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold); + fprintf(stdout, "%s cache not full so no increase in size.\n", cache_ptr->prefix); + break; + + default: + fprintf(stdout, "%sAuto cache resize -- unknown status code.\n", cache_ptr->prefix); + break; + } +} /* H5C_def_auto_resize_rpt_fcn() */ + +/*------------------------------------------------------------------------- + * Function: H5C__validate_lru_list + * + * Purpose: Debugging function that scans the LRU list for errors. + * + * If an error is detected, the function generates a + * diagnostic and returns FAIL. If no error is detected, + * the function returns SUCCEED. + * + * Return: FAIL if error is detected, SUCCEED otherwise. + * + *------------------------------------------------------------------------- + */ +#ifdef H5C_DO_EXTREME_SANITY_CHECKS +herr_t +H5C__validate_lru_list(H5C_t *cache_ptr) +{ + int32_t len = 0; + size_t size = 0; + H5C_cache_entry_t *entry_ptr = NULL; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(cache_ptr); + + if (((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_tail_ptr == NULL)) && + (cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list head/tail check failed") + + if ((cache_ptr->LRU_list_len == 1) && + ((cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr) || (cache_ptr->LRU_head_ptr == NULL) || + (cache_ptr->LRU_head_ptr->size != cache_ptr->LRU_list_size))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list sanity check failed") + + if ((cache_ptr->LRU_list_len >= 1) && + ((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_head_ptr->prev != NULL) || + (cache_ptr->LRU_tail_ptr == NULL) || (cache_ptr->LRU_tail_ptr->next != NULL))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list sanity check failed") + + entry_ptr = cache_ptr->LRU_head_ptr; + while (entry_ptr != NULL) { + if ((entry_ptr != cache_ptr->LRU_head_ptr) && + ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") + + if ((entry_ptr != cache_ptr->LRU_tail_ptr) && + ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") + + if (entry_ptr->is_pinned || entry_ptr->pinned_from_client || entry_ptr->pinned_from_cache) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "invalid entry 'pin origin' fields") + + len++; + size += entry_ptr->size; + entry_ptr = entry_ptr->next; + } + + if ((cache_ptr->LRU_list_len != (uint32_t)len) || (cache_ptr->LRU_list_size != size)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list length/size check failed") + +done: + if (ret_value != SUCCEED) + assert(0); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__validate_lru_list() */ +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + +/*------------------------------------------------------------------------- + * Function: H5C__validate_pinned_entry_list + * + * Purpose: Debugging function that scans the pinned entry list for + * errors. + * + * If an error is detected, the function generates a + * diagnostic and returns FAIL. If no error is detected, + * the function returns SUCCEED. + * + * Return: FAIL if error is detected, SUCCEED otherwise. + * + *------------------------------------------------------------------------- + */ +#ifdef H5C_DO_EXTREME_SANITY_CHECKS +herr_t +H5C__validate_pinned_entry_list(H5C_t *cache_ptr) +{ + int32_t len = 0; + size_t size = 0; + H5C_cache_entry_t *entry_ptr = NULL; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(cache_ptr); + + if (((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_tail_ptr == NULL)) && + (cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list head/tail check failed") + + if ((cache_ptr->pel_len == 1) && + ((cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr) || (cache_ptr->pel_head_ptr == NULL) || + (cache_ptr->pel_head_ptr->size != cache_ptr->pel_size))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list sanity check failed") + + if ((cache_ptr->pel_len >= 1) && + ((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_head_ptr->prev != NULL) || + (cache_ptr->pel_tail_ptr == NULL) || (cache_ptr->pel_tail_ptr->next != NULL))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list sanity check failed") + + entry_ptr = cache_ptr->pel_head_ptr; + while (entry_ptr != NULL) { + if ((entry_ptr != cache_ptr->pel_head_ptr) && + ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") + + if ((entry_ptr != cache_ptr->pel_tail_ptr) && + ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") + + if (!entry_ptr->is_pinned) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list contains unpinned entry") + + if (!(entry_ptr->pinned_from_client || entry_ptr->pinned_from_cache)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "invalid entry 'pin origin' fields") + + len++; + size += entry_ptr->size; + entry_ptr = entry_ptr->next; + } + + if ((cache_ptr->pel_len != (uint32_t)len) || (cache_ptr->pel_size != size)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list length/size check failed") + +done: + if (ret_value != SUCCEED) + assert(0); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__validate_pinned_entry_list() */ +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + +/*------------------------------------------------------------------------- + * Function: H5C__validate_protected_entry_list + * + * Purpose: Debugging function that scans the protected entry list for + * errors. + * + * If an error is detected, the function generates a + * diagnostic and returns FAIL. If no error is detected, + * the function returns SUCCEED. + * + * Return: FAIL if error is detected, SUCCEED otherwise. + * + *------------------------------------------------------------------------- + */ +#ifdef H5C_DO_EXTREME_SANITY_CHECKS +herr_t +H5C__validate_protected_entry_list(H5C_t *cache_ptr) +{ + int32_t len = 0; + size_t size = 0; + H5C_cache_entry_t *entry_ptr = NULL; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(cache_ptr); + + if (((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_tail_ptr == NULL)) && + (cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list head/tail check failed") + + if ((cache_ptr->pl_len == 1) && + ((cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr) || (cache_ptr->pl_head_ptr == NULL) || + (cache_ptr->pl_head_ptr->size != cache_ptr->pl_size))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list sanity check failed") + + if ((cache_ptr->pl_len >= 1) && + ((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_head_ptr->prev != NULL) || + (cache_ptr->pl_tail_ptr == NULL) || (cache_ptr->pl_tail_ptr->next != NULL))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list sanity check failed") + + entry_ptr = cache_ptr->pl_head_ptr; + while (entry_ptr != NULL) { + if ((entry_ptr != cache_ptr->pl_head_ptr) && + ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") + + if ((entry_ptr != cache_ptr->pl_tail_ptr) && + ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") + + if (!entry_ptr->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list contains unprotected entry") + + if (entry_ptr->is_read_only && (entry_ptr->ro_ref_count <= 0)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "read-only entry has non-positive ref count") + + len++; + size += entry_ptr->size; + entry_ptr = entry_ptr->next; + } + + if ((cache_ptr->pl_len != (uint32_t)len) || (cache_ptr->pl_size != size)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list length/size check failed") + +done: + if (ret_value != SUCCEED) + assert(0); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__validate_protected_entry_list() */ +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + +/*------------------------------------------------------------------------- + * Function: H5C__entry_in_skip_list + * + * Purpose: Debugging function that scans skip list to see if it + * is in present. We need this, as it is possible for + * an entry to be in the skip list twice. + * + * Return: FALSE if the entry is not in the skip list, and TRUE + * if it is. + * + *------------------------------------------------------------------------- + */ +#ifdef H5C_DO_SLIST_SANITY_CHECKS +hbool_t +H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr) +{ + H5SL_node_t *node_ptr; + hbool_t in_slist; + hbool_t ret_value; + + FUNC_ENTER_PACKAGE + + /* Assertions */ + assert(cache_ptr); + assert(cache_ptr->slist_ptr); + + node_ptr = H5SL_first(cache_ptr->slist_ptr); + in_slist = FALSE; + while ((node_ptr != NULL) && (!in_slist)) { + H5C_cache_entry_t *entry_ptr; + + entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); + + assert(entry_ptr); + assert(entry_ptr->is_dirty); + assert(entry_ptr->in_slist); + + if (entry_ptr == target_ptr) + in_slist = TRUE; + else + node_ptr = H5SL_next(node_ptr); + } + + /* Set return value */ + ret_value = in_slist; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__entry_in_skip_list() */ +#endif /* H5C_DO_SLIST_SANITY_CHECKS */ + +/*------------------------------------------------------------------------- + * Function: H5C__image_stats + * + * Purpose: Prints statistics specific to the cache image. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +#if H5C_COLLECT_CACHE_STATS +H5C__image_stats(H5C_t *cache_ptr, hbool_t print_header) +#else /* H5C_COLLECT_CACHE_STATS */ +H5C__image_stats(H5C_t *cache_ptr, hbool_t H5_ATTR_UNUSED print_header) +#endif /* H5C_COLLECT_CACHE_STATS */ +{ +#if H5C_COLLECT_CACHE_STATS + int i; + int64_t total_hits = 0; + int64_t total_misses = 0; + double hit_rate; + double prefetch_use_rate; +#endif /* H5C_COLLECT_CACHE_STATS */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + if (NULL == cache_ptr) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr") + +#if H5C_COLLECT_CACHE_STATS + for (i = 0; i <= cache_ptr->max_type_id; i++) { + total_hits += cache_ptr->hits[i]; + total_misses += cache_ptr->misses[i]; + } /* end for */ + + if ((total_hits > 0) || (total_misses > 0)) + hit_rate = 100.0 * ((double)(total_hits)) / ((double)(total_hits + total_misses)); + else + hit_rate = 0.0; + + if (cache_ptr->prefetches > 0) + prefetch_use_rate = 100.0 * ((double)(cache_ptr->prefetch_hits)) / ((double)(cache_ptr->prefetches)); + else + prefetch_use_rate = 0.0; + + if (print_header) { + fprintf(stdout, "\nhit prefetches prefetch image pf hit\n"); + fprintf(stdout, "rate: total: dirty: hits: flshs: evct: size: rate:\n"); + } /* end if */ + + fprintf(stdout, "%3.1lf %5lld %5lld %5lld %5lld %5lld %5lld %3.1lf\n", hit_rate, + (long long)(cache_ptr->prefetches), (long long)(cache_ptr->dirty_prefetches), + (long long)(cache_ptr->prefetch_hits), (long long)(cache_ptr->flushes[H5AC_PREFETCHED_ENTRY_ID]), + (long long)(cache_ptr->evictions[H5AC_PREFETCHED_ENTRY_ID]), + (long long)(cache_ptr->last_image_size), prefetch_use_rate); +#endif /* H5C_COLLECT_CACHE_STATS */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__image_stats() */ diff --git a/src/H5Centry.c b/src/H5Centry.c new file mode 100644 index 0000000..18f9689 --- /dev/null +++ b/src/H5Centry.c @@ -0,0 +1,4214 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*------------------------------------------------------------------------- + * + * Created: H5Centry.c + * + * Purpose: Routines which operate on cache entries. + * + *------------------------------------------------------------------------- + */ + +/****************/ +/* Module Setup */ +/****************/ + +#include "H5Cmodule.h" /* This source code file is part of the H5C module */ +#define H5F_FRIEND /* suppress error about including H5Fpkg */ + +/***********/ +/* Headers */ +/***********/ +#include "H5private.h" /* Generic Functions */ +#include "H5Cpkg.h" /* Cache */ +#include "H5CXprivate.h" /* API Contexts */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5Fpkg.h" /* Files */ +#include "H5MFprivate.h" /* File memory management */ +#include "H5MMprivate.h" /* Memory management */ + +/****************/ +/* Local Macros */ +/****************/ +#if H5C_DO_MEMORY_SANITY_CHECKS +#define H5C_IMAGE_EXTRA_SPACE 8 +#define H5C_IMAGE_SANITY_VALUE "DeadBeef" +#else /* H5C_DO_MEMORY_SANITY_CHECKS */ +#define H5C_IMAGE_EXTRA_SPACE 0 +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + +/******************/ +/* Local Typedefs */ +/******************/ + +/* Alias for pointer to cache entry, for use when allocating sequences of them */ +typedef H5C_cache_entry_t *H5C_cache_entry_ptr_t; + +/********************/ +/* Local Prototypes */ +/********************/ +static herr_t H5C__pin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr); +static herr_t H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp); +static herr_t H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp); +static herr_t H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr); +static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len, + hbool_t actual); +static void *H5C__load_entry(H5F_t *f, +#ifdef H5_HAVE_PARALLEL + hbool_t coll_access, +#endif /* H5_HAVE_PARALLEL */ + const H5C_class_t *type, haddr_t addr, void *udata); +static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry); +static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry); +static herr_t H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry); +static herr_t H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry); +#ifndef NDEBUG +static void H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, + const H5C_cache_entry_t *base_entry); +#endif +static herr_t H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_entry_ptr, + H5C_cache_entry_t **fd_children); +static herr_t H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t **entry_ptr_ptr, + const H5C_class_t *type, haddr_t addr, void *udata); + +/*********************/ +/* Package Variables */ +/*********************/ + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + +/*******************/ +/* Local Variables */ +/*******************/ + +/* Declare a free list to manage arrays of cache entries */ +H5FL_SEQ_DEFINE_STATIC(H5C_cache_entry_ptr_t); + +/*------------------------------------------------------------------------- + * Function: H5C__pin_entry_from_client() + * + * Purpose: Internal routine to pin a cache entry from a client action. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__pin_entry_from_client(H5C_t +#if !H5C_COLLECT_CACHE_STATS + H5_ATTR_UNUSED +#endif + *cache_ptr, + H5C_cache_entry_t *entry_ptr) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + assert(cache_ptr); + assert(entry_ptr); + assert(entry_ptr->is_protected); + + /* Check if the entry is already pinned */ + if (entry_ptr->is_pinned) { + /* Check if the entry was pinned through an explicit pin from a client */ + if (entry_ptr->pinned_from_client) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "entry is already pinned") + } /* end if */ + else { + entry_ptr->is_pinned = TRUE; + + H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr); + } /* end else */ + + /* Mark that the entry was pinned through an explicit pin from a client */ + entry_ptr->pinned_from_client = TRUE; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__pin_entry_from_client() */ + +/*------------------------------------------------------------------------- + * Function: H5C__unpin_entry_real() + * + * Purpose: Internal routine to unpin a cache entry. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp) +{ + herr_t ret_value = SUCCEED; /* Return value */ + +#ifdef H5C_DO_SANITY_CHECKS + FUNC_ENTER_PACKAGE +#else + FUNC_ENTER_PACKAGE_NOERR +#endif + + /* Sanity checking */ + assert(cache_ptr); + assert(entry_ptr); + assert(entry_ptr->is_pinned); + + /* If requested, update the replacement policy if the entry is not protected */ + if (update_rp && !entry_ptr->is_protected) + H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, FAIL); + + /* Unpin the entry now */ + entry_ptr->is_pinned = FALSE; + + /* Update the stats for an unpin operation */ + H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr); + +#ifdef H5C_DO_SANITY_CHECKS +done: +#endif + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__unpin_entry_real() */ + +/*------------------------------------------------------------------------- + * Function: H5C__unpin_entry_from_client() + * + * Purpose: Internal routine to unpin a cache entry from a client action. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checking */ + assert(cache_ptr); + assert(entry_ptr); + + /* Error checking (should be sanity checks?) */ + if (!entry_ptr->is_pinned) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry isn't pinned") + if (!entry_ptr->pinned_from_client) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry wasn't pinned by cache client") + + /* Check if the entry is not pinned from a flush dependency */ + if (!entry_ptr->pinned_from_cache) + if (H5C__unpin_entry_real(cache_ptr, entry_ptr, update_rp) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "can't unpin entry") + + /* Mark the entry as explicitly unpinned by the client */ + entry_ptr->pinned_from_client = FALSE; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__unpin_entry_from_client() */ + +/*------------------------------------------------------------------------- + * Function: H5C__generate_image + * + * Purpose: Serialize an entry and generate its image. + * + * Note: This may cause the entry to be re-sized and/or moved in + * the cache. + * + * As we will not update the metadata cache's data structures + * until we we finish the write, we must touch up these + * data structures for size and location changes even if we + * are about to delete the entry from the cache (i.e. on a + * flush destroy). + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) +{ + haddr_t new_addr = HADDR_UNDEF; + haddr_t old_addr = HADDR_UNDEF; + size_t new_len = 0; + unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + /* Sanity check */ + assert(f); + assert(cache_ptr); + assert(entry_ptr); + assert(!entry_ptr->image_up_to_date); + assert(entry_ptr->is_dirty); + assert(!entry_ptr->is_protected); + assert(entry_ptr->type); + + /* make note of the entry's current address */ + old_addr = entry_ptr->addr; + + /* Call client's pre-serialize callback, if there's one */ + if ((entry_ptr->type->pre_serialize) && + ((entry_ptr->type->pre_serialize)(f, (void *)entry_ptr, entry_ptr->addr, entry_ptr->size, &new_addr, + &new_len, &serialize_flags) < 0)) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry") + + /* Check for any flags set in the pre-serialize callback */ + if (serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) { + /* Check for unexpected flags from serialize callback */ + if (serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG | H5C__SERIALIZE_MOVED_FLAG)) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)") + +#ifdef H5_HAVE_PARALLEL + /* In the parallel case, resizes and moves in + * the serialize operation can cause problems. + * If they occur, scream and die. + * + * At present, in the parallel case, the aux_ptr + * will only be set if there is more than one + * process. Thus we can use this to detect + * the parallel case. + * + * This works for now, but if we start using the + * aux_ptr for other purposes, we will have to + * change this test accordingly. + * + * NB: While this test detects entryies that attempt + * to resize or move themselves during a flush + * in the parallel case, it will not detect an + * entry that dirties, resizes, and/or moves + * other entries during its flush. + */ + if (cache_ptr->aux_ptr != NULL) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occurred in parallel case") +#endif + + /* If required, resize the buffer and update the entry and the cache + * data structures + */ + if (serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) { + /* Sanity check */ + assert(new_len > 0); + + /* Allocate a new image buffer */ + if (NULL == + (entry_ptr->image_ptr = H5MM_realloc(entry_ptr->image_ptr, new_len + H5C_IMAGE_EXTRA_SPACE))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, + "memory allocation failed for on disk image buffer") + +#if H5C_DO_MEMORY_SANITY_CHECKS + H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + new_len, H5C_IMAGE_SANITY_VALUE, + H5C_IMAGE_EXTRA_SPACE); +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + + /* Update statistics for resizing the entry */ + H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_len); + + /* Update the hash table for the size change */ + H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len, entry_ptr, + !entry_ptr->is_dirty, FAIL); + + /* The entry can't be protected since we are in the process of + * flushing it. Thus we must update the replacement policy data + * structures for the size change. The macro deals with the pinned + * case. + */ + H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len, FAIL); + + /* As we haven't updated the cache data structures for + * for the flush or flush destroy yet, the entry should + * be in the slist if the slist is enabled. Since + * H5C__UPDATE_SLIST_FOR_SIZE_CHANGE() is a no-op if the + * slist is enabled, call it un-conditionally. + */ + assert(entry_ptr->is_dirty); + assert((entry_ptr->in_slist) || (!cache_ptr->slist_enabled)); + + H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len); + + /* Finally, update the entry for its new size */ + entry_ptr->size = new_len; + } /* end if */ + + /* If required, udate the entry and the cache data structures + * for a move + */ + if (serialize_flags & H5C__SERIALIZE_MOVED_FLAG) { + /* Update stats and entries relocated counter */ + H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr); + + /* We must update cache data structures for the change in address */ + if (entry_ptr->addr == old_addr) { + /* Delete the entry from the hash table and the slist */ + H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL); + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL); + + /* Update the entry for its new address */ + entry_ptr->addr = new_addr; + + /* And then reinsert in the index and slist */ + H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL); + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL); + } /* end if */ + else /* move is already done for us -- just do sanity checks */ + assert(entry_ptr->addr == new_addr); + } /* end if */ + } /* end if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) */ + + /* Serialize object into buffer */ + if (entry_ptr->type->serialize(f, entry_ptr->image_ptr, entry_ptr->size, (void *)entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry") + +#if H5C_DO_MEMORY_SANITY_CHECKS + assert(0 == memcmp(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, + H5C_IMAGE_EXTRA_SPACE)); +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + + entry_ptr->image_up_to_date = TRUE; + + /* Propagate the fact that the entry is serialized up the + * flush dependency chain if appropriate. Since the image must + * have been out of date for this function to have been called + * (see assertion on entry), no need to check that -- only check + * for flush dependency parents. + */ + assert(entry_ptr->flush_dep_nunser_children == 0); + + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_serialized(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__generate_image */ + +/*------------------------------------------------------------------------- + * Function: H5C__flush_single_entry + * + * Purpose: Flush or clear (and evict if requested) the cache entry + * with the specified address and type. If the type is NULL, + * any unprotected entry at the specified address will be + * flushed (and possibly evicted). + * + * Attempts to flush a protected entry will result in an + * error. + * + * If the H5C__FLUSH_INVALIDATE_FLAG flag is set, the entry will + * be cleared and not flushed, and the call can't be part of a + * sequence of flushes. + * + * The function does nothing silently if there is no entry + * at the supplied address, or if the entry found has the + * wrong type. + * + * Return: Non-negative on success/Negative on failure or if there was + * an attempt to flush a protected item. + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) +{ + H5C_t *cache_ptr; /* Cache for file */ + hbool_t destroy; /* external flag */ + hbool_t clear_only; /* external flag */ + hbool_t free_file_space; /* external flag */ + hbool_t take_ownership; /* external flag */ + hbool_t del_from_slist_on_destroy; /* external flag */ + hbool_t during_flush; /* external flag */ + hbool_t write_entry; /* internal flag */ + hbool_t destroy_entry; /* internal flag */ + hbool_t generate_image; /* internal flag */ + hbool_t update_page_buffer; /* internal flag */ + hbool_t was_dirty; + hbool_t suppress_image_entry_writes = FALSE; + hbool_t suppress_image_entry_frees = FALSE; + haddr_t entry_addr = HADDR_UNDEF; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(f); + cache_ptr = f->shared->cache; + assert(cache_ptr); + assert(entry_ptr); + assert(entry_ptr->ring != H5C_RING_UNDEFINED); + assert(entry_ptr->type); + + /* setup external flags from the flags parameter */ + destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0); + clear_only = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0); + free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0); + take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0); + del_from_slist_on_destroy = ((flags & H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) != 0); + during_flush = ((flags & H5C__DURING_FLUSH_FLAG) != 0); + generate_image = ((flags & H5C__GENERATE_IMAGE_FLAG) != 0); + update_page_buffer = ((flags & H5C__UPDATE_PAGE_BUFFER_FLAG) != 0); + + /* Set the flag for destroying the entry, based on the 'take ownership' + * and 'destroy' flags + */ + if (take_ownership) + destroy_entry = FALSE; + else + destroy_entry = destroy; + + /* we will write the entry to disk if it exists, is dirty, and if the + * clear only flag is not set. + */ + if (entry_ptr->is_dirty && !clear_only) + write_entry = TRUE; + else + write_entry = FALSE; + + /* if we have received close warning, and we have been instructed to + * generate a metadata cache image, and we have actually constructed + * the entry images, set suppress_image_entry_frees to TRUE. + * + * Set suppress_image_entry_writes to TRUE if indicated by the + * image_ctl flags. + */ + if (cache_ptr->close_warning_received && cache_ptr->image_ctl.generate_image && + cache_ptr->num_entries_in_image > 0 && cache_ptr->image_entries != NULL) { + + /* Sanity checks */ + assert(entry_ptr->image_up_to_date || !(entry_ptr->include_in_image)); + assert(entry_ptr->image_ptr || !(entry_ptr->include_in_image)); + assert((!clear_only) || !(entry_ptr->include_in_image)); + assert((!take_ownership) || !(entry_ptr->include_in_image)); + assert((!free_file_space) || !(entry_ptr->include_in_image)); + + suppress_image_entry_frees = TRUE; + + if (cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES) + suppress_image_entry_writes = TRUE; + } /* end if */ + + /* run initial sanity checks */ +#ifdef H5C_DO_SANITY_CHECKS + if (cache_ptr->slist_enabled) { + if (entry_ptr->in_slist) { + assert(entry_ptr->is_dirty); + if (entry_ptr->flush_marker && !entry_ptr->is_dirty) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks") + } /* end if */ + else { + assert(!entry_ptr->is_dirty); + assert(!entry_ptr->flush_marker); + if (entry_ptr->is_dirty || entry_ptr->flush_marker) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks") + } /* end else */ + } + else { /* slist is disabled */ + assert(!entry_ptr->in_slist); + if (!entry_ptr->is_dirty) + if (entry_ptr->flush_marker) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush marked clean entry?") + } +#endif /* H5C_DO_SANITY_CHECKS */ + + if (entry_ptr->is_protected) + /* Attempt to flush a protected entry -- scream and die. */ + HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry") + + /* Set entry_ptr->flush_in_progress = TRUE and set + * entry_ptr->flush_marker = FALSE + * + * We will set flush_in_progress back to FALSE at the end if the + * entry still exists at that point. + */ + entry_ptr->flush_in_progress = TRUE; + entry_ptr->flush_marker = FALSE; + + /* Preserve current dirty state for later */ + was_dirty = entry_ptr->is_dirty; + + /* The entry is dirty, and we are doing a flush, a flush destroy or have + * been requested to generate an image. In those cases, serialize the + * entry. + */ + if (write_entry || generate_image) { + assert(entry_ptr->is_dirty); + if (NULL == entry_ptr->image_ptr) { + if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, + "memory allocation failed for on disk image buffer") + +#if H5C_DO_MEMORY_SANITY_CHECKS + H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, + H5C_IMAGE_EXTRA_SPACE); +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + + } /* end if */ + + if (!entry_ptr->image_up_to_date) { + /* Sanity check */ + assert(!entry_ptr->prefetched); + + /* Generate the entry's image */ + if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image") + } /* end if ( ! (entry_ptr->image_up_to_date) ) */ + } /* end if */ + + /* Finally, write the image to disk. + * + * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the + * in the entry's type, we silently skip the write. This + * flag should only be used in test code. + */ + if (write_entry) { + assert(entry_ptr->is_dirty); + +#ifdef H5C_DO_SANITY_CHECKS + if (cache_ptr->check_write_permitted && !cache_ptr->write_permitted) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!") +#endif /* H5C_DO_SANITY_CHECKS */ + + /* Write the image to disk unless the write is suppressed. + * + * This happens if both suppress_image_entry_writes and + * entry_ptr->include_in_image are TRUE, or if the + * H5AC__CLASS_SKIP_WRITES is set in the entry's type. This + * flag should only be used in test code + */ + if ((!suppress_image_entry_writes || !entry_ptr->include_in_image) && + ((entry_ptr->type->flags & H5C__CLASS_SKIP_WRITES) == 0)) { + H5FD_mem_t mem_type = H5FD_MEM_DEFAULT; + +#ifdef H5_HAVE_PARALLEL + if (cache_ptr->coll_write_list) { + if (H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item") + } /* end if */ + else { +#endif /* H5_HAVE_PARALLEL */ + if (entry_ptr->prefetched) { + assert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); + mem_type = cache_ptr->class_table_ptr[entry_ptr->prefetch_type_id]->mem_type; + } /* end if */ + else + mem_type = entry_ptr->type->mem_type; + + if (H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file") +#ifdef H5_HAVE_PARALLEL + } +#endif /* H5_HAVE_PARALLEL */ + } /* end if */ + + /* if the entry has a notify callback, notify it that we have + * just flushed the entry. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_FLUSH, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client of entry flush") + } /* if ( write_entry ) */ + + /* At this point, all pre-serialize and serialize calls have been + * made if it was appropriate to make them. Similarly, the entry + * has been written to disk if desired. + * + * Thus it is now safe to update the cache data structures for the + * flush. + */ + + /* start by updating the statistics */ + if (clear_only) { + /* only log a clear if the entry was dirty */ + if (was_dirty) + H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr); + } + else if (write_entry) { + assert(was_dirty); + + /* only log a flush if we actually wrote to disk */ + H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr); + } /* end else if */ + + /* Note that the algorithm below is (very) similar to the set of operations + * in H5C_remove_entry() and should be kept in sync with changes + * to that code. - QAK, 2016/11/30 + */ + + /* Update the cache internal data structures. */ + if (destroy) { + /* Sanity checks */ + if (take_ownership) + assert(!destroy_entry); + else + assert(destroy_entry); + + assert(!entry_ptr->is_pinned); + + /* Update stats, while entry is still in the cache */ + H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership); + + /* If the entry's type has a 'notify' callback and the entry is about + * to be removed from the cache, send a 'before eviction' notice while + * the entry is still fully integrated in the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict") + + /* Update the cache internal data structures as appropriate + * for a destroy. Specifically: + * + * 1) Delete it from the index + * + * 2) Delete it from the skip list if requested. + * + * 3) Delete it from the collective read access list. + * + * 4) Update the replacement policy for eviction + * + * 5) Remove it from the tag list for this object + * + * Finally, if the destroy_entry flag is set, discard the + * entry. + */ + H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL); + + if (entry_ptr->in_slist && del_from_slist_on_destroy) + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush, FAIL); + +#ifdef H5_HAVE_PARALLEL + /* Check for collective read access flag */ + if (entry_ptr->coll_access) { + entry_ptr->coll_access = FALSE; + H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL); + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + + H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL); + + /* Remove entry from tag list */ + if (H5C__untag_entry(cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list") + + /* verify that the entry is no longer part of any flush dependencies */ + assert(entry_ptr->flush_dep_nparents == 0); + assert(entry_ptr->flush_dep_nchildren == 0); + } /* end if */ + else { + assert(clear_only || write_entry); + assert(entry_ptr->is_dirty); + assert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist)); + + /* We are either doing a flush or a clear. + * + * A clear and a flush are the same from the point of + * view of the replacement policy and the slist. + * Hence no differentiation between them. + */ + H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL); + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush, FAIL); + + /* mark the entry as clean and update the index for + * entry clean. Also, call the clear callback + * if defined. + */ + entry_ptr->is_dirty = FALSE; + + H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, FAIL); + + /* Check for entry changing status and do notifications, etc. */ + if (was_dirty) { + /* If the entry's type has a 'notify' callback send a + * 'entry cleaned' notice now that the entry is fully + * integrated into the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify client about entry dirty flag cleared") + + /* Propagate the clean flag up the flush dependency chain + * if appropriate + */ + if (entry_ptr->flush_dep_ndirty_children != 0) + assert(entry_ptr->flush_dep_ndirty_children == 0); + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_clean(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean flag") + } /* end if */ + } /* end else */ + + /* reset the flush_in progress flag */ + entry_ptr->flush_in_progress = FALSE; + + /* capture the cache entry address for the log_flush call at the + * end before the entry_ptr gets freed + */ + entry_addr = entry_ptr->addr; + + /* Internal cache data structures should now be up to date, and + * consistent with the status of the entry. + * + * Now discard the entry if appropriate. + */ + if (destroy) { + /* Sanity check */ + assert(0 == entry_ptr->flush_dep_nparents); + + /* if both suppress_image_entry_frees and entry_ptr->include_in_image + * are true, simply set entry_ptr->image_ptr to NULL, as we have + * another pointer to the buffer in an instance of H5C_image_entry_t + * in cache_ptr->image_entries. + * + * Otherwise, free the buffer if it exists. + */ + if (suppress_image_entry_frees && entry_ptr->include_in_image) + entry_ptr->image_ptr = NULL; + else if (entry_ptr->image_ptr != NULL) + entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr); + + /* If the entry is not a prefetched entry, verify that the flush + * dependency parents addresses array has been transferred. + * + * If the entry is prefetched, the free_isr routine will dispose of + * the flush dependency parents addresses array if necessary. + */ + if (!entry_ptr->prefetched) { + assert(0 == entry_ptr->fd_parent_count); + assert(NULL == entry_ptr->fd_parent_addrs); + } /* end if */ + + /* Check whether we should free the space in the file that + * the entry occupies + */ + if (free_file_space) { + hsize_t fsf_size; + + /* Sanity checks */ + assert(H5_addr_defined(entry_ptr->addr)); + assert(!H5F_IS_TMP_ADDR(f, entry_ptr->addr)); +#ifndef NDEBUG + { + size_t curr_len; + + /* Get the actual image size for the thing again */ + entry_ptr->type->image_len((void *)entry_ptr, &curr_len); + assert(curr_len == entry_ptr->size); + } +#endif + + /* If the file space free size callback is defined, use + * it to get the size of the block of file space to free. + * Otherwise use entry_ptr->size. + */ + if (entry_ptr->type->fsf_size) { + if ((entry_ptr->type->fsf_size)((void *)entry_ptr, &fsf_size) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to get file space free size") + } /* end if */ + else /* no file space free size callback -- use entry size */ + fsf_size = entry_ptr->size; + + /* Release the space on disk */ + if (H5MF_xfree(f, entry_ptr->type->mem_type, entry_ptr->addr, fsf_size) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free file space for cache entry") + } /* end if ( free_file_space ) */ + + /* Reset the pointer to the cache the entry is within. -QAK */ + entry_ptr->cache_ptr = NULL; + + /* increment entries_removed_counter and set + * last_entry_removed_ptr. As we are likely abuut to + * free the entry, recall that last_entry_removed_ptr + * must NEVER be dereferenced. + * + * Recall that these fields are maintained to allow functions + * that perform scans of lists of entries to detect the + * unexpected removal of entries (via expunge, eviction, + * or take ownership at present), so that they can re-start + * their scans if necessary. + * + * Also check if the entry we are watching for removal is being + * removed (usually the 'next' entry for an iteration) and reset + * it to indicate that it was removed. + */ + cache_ptr->entries_removed_counter++; + cache_ptr->last_entry_removed_ptr = entry_ptr; + + if (entry_ptr == cache_ptr->entry_watched_for_removal) + cache_ptr->entry_watched_for_removal = NULL; + + /* Check for actually destroying the entry in memory */ + /* (As opposed to taking ownership of it) */ + if (destroy_entry) { + if (entry_ptr->is_dirty) { + /* Reset dirty flag */ + entry_ptr->is_dirty = FALSE; + + /* If the entry's type has a 'notify' callback send a + * 'entry cleaned' notice now that the entry is fully + * integrated into the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify client about entry dirty flag cleared") + } /* end if */ + + /* verify that the image has been freed */ + assert(entry_ptr->image_ptr == NULL); + + if (entry_ptr->type->free_icr((void *)entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed") + } /* end if */ + else { + assert(take_ownership); + } /* end else */ + } /* if (destroy) */ + + /* Check if we have to update the page buffer with cleared entries + * so it doesn't go out of date + */ + if (update_page_buffer) { + /* Sanity check */ + assert(!destroy); + assert(entry_ptr->image_ptr); + + if (f->shared->page_buf && (f->shared->page_buf->page_size >= entry_ptr->size)) + if (H5PB_update_entry(f->shared->page_buf, entry_ptr->addr, entry_ptr->size, + entry_ptr->image_ptr) > 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Failed to update PB with metadata cache") + } /* end if */ + + if (cache_ptr->log_flush) + if ((cache_ptr->log_flush)(cache_ptr, entry_addr, was_dirty, flags) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed") + +done: + assert((ret_value != SUCCEED) || (destroy_entry) || (!entry_ptr->flush_in_progress)); + assert((ret_value != SUCCEED) || (destroy_entry) || (take_ownership) || (!entry_ptr->is_dirty)); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__flush_single_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C__verify_len_eoa + * + * Purpose: Verify that 'len' does not exceed eoa when 'actual' is + * false i.e. 'len" is the initial speculative length from + * get_load_size callback with null image pointer. + * If exceed, adjust 'len' accordingly. + * + * Verify that 'len' should not exceed eoa when 'actual' is + * true i.e. 'len' is the actual length from get_load_size + * callback with non-null image pointer. + * If exceed, return error. + * + * Return: FAIL if error is detected, SUCCEED otherwise. + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len, hbool_t actual) +{ + H5FD_mem_t cooked_type; /* Modified type, accounting for switching global heaps */ + haddr_t eoa; /* End-of-allocation in the file */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces + * type to H5FD_MEM_DRAW via its call to H5F__accum_read(). + * Thus we do the same for purposes of computing the EOA + * for sanity checks. + */ + cooked_type = (type->mem_type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type->mem_type; + + /* Get the file's end-of-allocation value */ + eoa = H5F_get_eoa(f, cooked_type); + if (!H5_addr_defined(eoa)) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid EOA address for file") + + /* Check for bad address in general */ + if (H5_addr_gt(addr, eoa)) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "address of object past end of allocation") + + /* Check if the amount of data to read will be past the EOA */ + if (H5_addr_gt((addr + *len), eoa)) { + if (actual) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "actual len exceeds EOA") + else + /* Trim down the length of the metadata */ + *len = (size_t)(eoa - addr); + } /* end if */ + + if (*len <= 0) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "len not positive after adjustment for EOA") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__verify_len_eoa() */ + +/*------------------------------------------------------------------------- + * Function: H5C__load_entry + * + * Purpose: Attempt to load the entry at the specified disk address + * and with the specified type into memory. If successful. + * return the in memory address of the entry. Return NULL + * on failure. + * + * Note that this function simply loads the entry into + * core. It does not insert it into the cache. + * + * Return: Non-NULL on success / NULL on failure. + * + *------------------------------------------------------------------------- + */ +void * +H5C__load_entry(H5F_t *f, +#ifdef H5_HAVE_PARALLEL + hbool_t coll_access, +#endif /* H5_HAVE_PARALLEL */ + const H5C_class_t *type, haddr_t addr, void *udata) +{ + hbool_t dirty = FALSE; /* Flag indicating whether thing was dirtied during deserialize */ + uint8_t *image = NULL; /* Buffer for disk image */ + void *thing = NULL; /* Pointer to thing loaded */ + H5C_cache_entry_t *entry = NULL; /* Alias for thing loaded, as cache entry */ + size_t len; /* Size of image in file */ +#ifdef H5_HAVE_PARALLEL + int mpi_rank = 0; /* MPI process rank */ + MPI_Comm comm = MPI_COMM_NULL; /* File MPI Communicator */ + int mpi_code; /* MPI error code */ +#endif /* H5_HAVE_PARALLEL */ + void *ret_value = NULL; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + assert(f); + assert(f->shared); + assert(f->shared->cache); + assert(type); + assert(H5_addr_defined(addr)); + assert(type->get_initial_load_size); + if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) + assert(type->get_final_load_size); + else + assert(NULL == type->get_final_load_size); + assert(type->deserialize); + + /* Can't see how skip reads could be usefully combined with + * the speculative read flag. Hence disallow. + */ + assert(!((type->flags & H5C__CLASS_SKIP_READS) && (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG))); + + /* Call the get_initial_load_size callback, to retrieve the initial size of image */ + if (type->get_initial_load_size(udata, &len) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't retrieve image size") + assert(len > 0); + + /* Check for possible speculative read off the end of the file */ + if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) + if (H5C__verify_len_eoa(f, type, addr, &len, FALSE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid len with respect to EOA") + + /* Allocate the buffer for reading the on-disk entry image */ + if (NULL == (image = (uint8_t *)H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer") +#if H5C_DO_MEMORY_SANITY_CHECKS + H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE); +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + +#ifdef H5_HAVE_PARALLEL + if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) { + if ((mpi_rank = H5F_mpi_get_rank(f)) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank") + if ((comm = H5F_mpi_get_comm(f)) == MPI_COMM_NULL) + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed") + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + + /* Get the on-disk entry image */ + if (0 == (type->flags & H5C__CLASS_SKIP_READS)) { + unsigned tries, max_tries; /* The # of read attempts */ + unsigned retries; /* The # of retries */ + htri_t chk_ret; /* return from verify_chksum callback */ + size_t actual_len = len; /* The actual length, after speculative reads have been resolved */ + uint64_t nanosec = 1; /* # of nanoseconds to sleep between retries */ + void *new_image; /* Pointer to image */ + hbool_t len_changed = TRUE; /* Whether to re-check speculative entries */ + + /* Get the # of read attempts */ + max_tries = tries = H5F_GET_READ_ATTEMPTS(f); + + /* + * This do/while loop performs the following till the metadata checksum + * is correct or the file's number of allowed read attempts are reached. + * --read the metadata + * --determine the actual size of the metadata + * --perform checksum verification + */ + do { + if (actual_len != len) { + if (NULL == (new_image = H5MM_realloc(image, len + H5C_IMAGE_EXTRA_SPACE))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()") + image = (uint8_t *)new_image; +#if H5C_DO_MEMORY_SANITY_CHECKS + H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE); +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + } /* end if */ + +#ifdef H5_HAVE_PARALLEL + if (!coll_access || 0 == mpi_rank) { +#endif /* H5_HAVE_PARALLEL */ + if (H5F_block_read(f, type->mem_type, addr, len, image) < 0) { +#ifdef H5_HAVE_PARALLEL + if (coll_access) { + /* Push an error, but still participate in following MPI_Bcast */ + memset(image, 0, len); + HDONE_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*"); + } + else +#endif + HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*") + } + +#ifdef H5_HAVE_PARALLEL + } /* end if */ + /* if the collective metadata read optimization is turned on, + * bcast the metadata read from process 0 to all ranks in the file + * communicator + */ + if (coll_access) { + int buf_size; + + H5_CHECKED_ASSIGN(buf_size, int, len, size_t); + if (MPI_SUCCESS != (mpi_code = MPI_Bcast(image, buf_size, MPI_BYTE, 0, comm))) + HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code) + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + + /* If the entry could be read speculatively and the length is still + * changing, check for updating the actual size + */ + if ((type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) && len_changed) { + /* Retrieve the actual length */ + actual_len = len; + if (type->get_final_load_size(image, len, udata, &actual_len) < 0) + continue; /* Transfer control to while() and count towards retries */ + + /* Check for the length changing */ + if (actual_len != len) { + /* Verify that the length isn't past the EOA for the file */ + if (H5C__verify_len_eoa(f, type, addr, &actual_len, TRUE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA") + + /* Expand buffer to new size */ + if (NULL == (new_image = H5MM_realloc(image, actual_len + H5C_IMAGE_EXTRA_SPACE))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()") + image = (uint8_t *)new_image; +#if H5C_DO_MEMORY_SANITY_CHECKS + H5MM_memcpy(image + actual_len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE); +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + + if (actual_len > len) { +#ifdef H5_HAVE_PARALLEL + if (!coll_access || 0 == mpi_rank) { +#endif /* H5_HAVE_PARALLEL */ + /* If the thing's image needs to be bigger for a speculatively + * loaded thing, go get the on-disk image again (the extra portion). + */ + if (H5F_block_read(f, type->mem_type, addr + len, actual_len - len, image + len) < + 0) { +#ifdef H5_HAVE_PARALLEL + if (coll_access) { + /* Push an error, but still participate in following MPI_Bcast */ + memset(image + len, 0, actual_len - len); + HDONE_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image"); + } + else +#endif + HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image") + } + +#ifdef H5_HAVE_PARALLEL + } + /* If the collective metadata read optimization is turned on, + * Bcast the metadata read from process 0 to all ranks in the file + * communicator */ + if (coll_access) { + int buf_size; + + H5_CHECKED_ASSIGN(buf_size, int, actual_len - len, size_t); + if (MPI_SUCCESS != + (mpi_code = MPI_Bcast(image + len, buf_size, MPI_BYTE, 0, comm))) + HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code) + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + } /* end if */ + } /* end if (actual_len != len) */ + else { + /* The length has stabilized */ + len_changed = FALSE; + + /* Set the final length */ + len = actual_len; + } /* else */ + } /* end if */ + + /* If there's no way to verify the checksum for a piece of metadata + * (usually because there's no checksum in the file), leave now + */ + if (type->verify_chksum == NULL) + break; + + /* Verify the checksum for the metadata image */ + if ((chk_ret = type->verify_chksum(image, actual_len, udata)) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "failure from verify_chksum callback") + if (chk_ret == TRUE) + break; + + /* Sleep for some time */ + H5_nanosleep(nanosec); + nanosec *= 2; /* Double the sleep time next time */ + } while (--tries); + + /* Check for too many tries */ + if (tries == 0) + HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "incorrect metadata checksum after all read attempts") + + /* Calculate and track the # of retries */ + retries = max_tries - tries; + if (retries) /* Does not track 0 retry */ + if (H5F_track_metadata_read_retries(f, (unsigned)type->mem_type, retries) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "cannot track read tries = %u ", retries) + + /* Set the final length (in case it wasn't set earlier) */ + len = actual_len; + } /* end if !H5C__CLASS_SKIP_READS */ + + /* Deserialize the on-disk image into the native memory form */ + if (NULL == (thing = type->deserialize(image, len, udata, &dirty))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't deserialize image") + + entry = (H5C_cache_entry_t *)thing; + + /* In general, an entry should be clean just after it is loaded. + * + * However, when this code is used in the metadata cache, it is + * possible that object headers will be dirty at this point, as + * the deserialize function will alter object headers if necessary to + * fix an old bug. + * + * In the following assert: + * + * assert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) ); + * + * note that type ids 5 & 6 are associated with object headers in the + * metadata cache. + * + * When we get to using H5C for other purposes, we may wish to + * tighten up the assert so that the loophole only applies to the + * metadata cache. + */ + + assert((dirty == FALSE) || (type->id == 5 || type->id == 6)); + + entry->cache_ptr = f->shared->cache; + entry->addr = addr; + entry->size = len; + assert(entry->size < H5C_MAX_ENTRY_SIZE); + entry->image_ptr = image; + entry->image_up_to_date = !dirty; + entry->type = type; + entry->is_dirty = dirty; + entry->dirtied = FALSE; + entry->is_protected = FALSE; + entry->is_read_only = FALSE; + entry->ro_ref_count = 0; + entry->is_pinned = FALSE; + entry->in_slist = FALSE; + entry->flush_marker = FALSE; +#ifdef H5_HAVE_PARALLEL + entry->clear_on_unprotect = FALSE; + entry->flush_immediately = FALSE; + entry->coll_access = coll_access; +#endif /* H5_HAVE_PARALLEL */ + entry->flush_in_progress = FALSE; + entry->destroy_in_progress = FALSE; + + entry->ring = H5C_RING_UNDEFINED; + + /* Initialize flush dependency fields */ + entry->flush_dep_parent = NULL; + entry->flush_dep_nparents = 0; + entry->flush_dep_parent_nalloc = 0; + entry->flush_dep_nchildren = 0; + entry->flush_dep_ndirty_children = 0; + entry->flush_dep_nunser_children = 0; + entry->ht_next = NULL; + entry->ht_prev = NULL; + entry->il_next = NULL; + entry->il_prev = NULL; + + entry->next = NULL; + entry->prev = NULL; + +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + entry->aux_next = NULL; + entry->aux_prev = NULL; +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + +#ifdef H5_HAVE_PARALLEL + entry->coll_next = NULL; + entry->coll_prev = NULL; +#endif /* H5_HAVE_PARALLEL */ + + /* initialize cache image related fields */ + entry->include_in_image = FALSE; + entry->lru_rank = 0; + entry->image_dirty = FALSE; + entry->fd_parent_count = 0; + entry->fd_parent_addrs = NULL; + entry->fd_child_count = 0; + entry->fd_dirty_child_count = 0; + entry->image_fd_height = 0; + entry->prefetched = FALSE; + entry->prefetch_type_id = 0; + entry->age = 0; + entry->prefetched_dirty = FALSE; +#ifndef NDEBUG /* debugging field */ + entry->serialization_count = 0; +#endif + + /* initialize tag list fields */ + entry->tl_next = NULL; + entry->tl_prev = NULL; + entry->tag_info = NULL; + + H5C__RESET_CACHE_ENTRY_STATS(entry); + + ret_value = thing; + +done: + /* Cleanup on error */ + if (NULL == ret_value) { + /* Release resources */ + if (thing && type->free_icr(thing) < 0) + HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "free_icr callback failed"); + if (image) + image = (uint8_t *)H5MM_xfree(image); + } /* end if */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__load_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C__mark_flush_dep_dirty() + * + * Purpose: Recursively propagate the flush_dep_ndirty_children flag + * up the dependency chain in response to entry either + * becoming dirty or having its flush_dep_ndirty_children + * increased from 0. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry) +{ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + assert(entry); + + /* Iterate over the parent entries, if any */ + for (u = 0; u < entry->flush_dep_nparents; u++) { + /* Sanity check */ + assert(entry->flush_dep_parent[u]->flush_dep_ndirty_children < + entry->flush_dep_parent[u]->flush_dep_nchildren); + + /* Adjust the parent's number of dirty children */ + entry->flush_dep_parent[u]->flush_dep_ndirty_children++; + + /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */ + if (entry->flush_dep_parent[u]->type->notify && + (entry->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED, + entry->flush_dep_parent[u]) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify parent about child entry dirty flag set") + } /* end for */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__mark_flush_dep_dirty() */ + +/*------------------------------------------------------------------------- + * Function: H5C__mark_flush_dep_clean() + * + * Purpose: Recursively propagate the flush_dep_ndirty_children flag + * up the dependency chain in response to entry either + * becoming clean or having its flush_dep_ndirty_children + * reduced to 0. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry) +{ + int i; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + assert(entry); + + /* Iterate over the parent entries, if any */ + /* Note reverse iteration order, in case the callback removes the flush + * dependency - QAK, 2017/08/12 + */ + for (i = ((int)entry->flush_dep_nparents) - 1; i >= 0; i--) { + /* Sanity check */ + assert(entry->flush_dep_parent[i]->flush_dep_ndirty_children > 0); + + /* Adjust the parent's number of dirty children */ + entry->flush_dep_parent[i]->flush_dep_ndirty_children--; + + /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */ + if (entry->flush_dep_parent[i]->type->notify && + (entry->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED, + entry->flush_dep_parent[i]) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify parent about child entry dirty flag reset") + } /* end for */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__mark_flush_dep_clean() */ + +/*------------------------------------------------------------------------- + * Function: H5C__mark_flush_dep_serialized() + * + * Purpose: Decrement the flush_dep_nunser_children fields of all the + * target entry's flush dependency parents in response to + * the target entry becoming serialized. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry_ptr) +{ + int i; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + assert(entry_ptr); + + /* Iterate over the parent entries, if any */ + /* Note reverse iteration order, in case the callback removes the flush + * dependency - QAK, 2017/08/12 + */ + for (i = ((int)entry_ptr->flush_dep_nparents) - 1; i >= 0; i--) { + /* Sanity checks */ + assert(entry_ptr->flush_dep_parent); + assert(entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children > 0); + + /* decrement the parents number of unserialized children */ + entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children--; + + /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */ + if (entry_ptr->flush_dep_parent[i]->type->notify && + (entry_ptr->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, + entry_ptr->flush_dep_parent[i]) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify parent about child entry serialized flag set") + } /* end for */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__mark_flush_dep_serialized() */ + +/*------------------------------------------------------------------------- + * Function: H5C__mark_flush_dep_unserialized() + * + * Purpose: Increment the flush_dep_nunser_children fields of all the + * target entry's flush dependency parents in response to + * the target entry becoming unserialized. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry_ptr) +{ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + assert(entry_ptr); + + /* Iterate over the parent entries, if any */ + for (u = 0; u < entry_ptr->flush_dep_nparents; u++) { + /* Sanity check */ + assert(entry_ptr->flush_dep_parent); + assert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children < + entry_ptr->flush_dep_parent[u]->flush_dep_nchildren); + + /* increment parents number of usserialized children */ + entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children++; + + /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */ + if (entry_ptr->flush_dep_parent[u]->type->notify && + (entry_ptr->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, + entry_ptr->flush_dep_parent[u]) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify parent about child entry serialized flag reset") + } /* end for */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__mark_flush_dep_unserialized() */ + +#ifndef NDEBUG +/*------------------------------------------------------------------------- + * Function: H5C__assert_flush_dep_nocycle() + * + * Purpose: Assert recursively that base_entry is not the same as + * entry, and perform the same assertion on all of entry's + * flush dependency parents. This is used to detect cycles + * created by flush dependencies. + * + * Return: void + * + *------------------------------------------------------------------------- + */ +static void +H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, const H5C_cache_entry_t *base_entry) +{ + unsigned u; /* Local index variable */ + + FUNC_ENTER_PACKAGE_NOERR + + /* Sanity checks */ + assert(entry); + assert(base_entry); + + /* Make sure the entries are not the same */ + assert(base_entry != entry); + + /* Iterate over entry's parents (if any) */ + for (u = 0; u < entry->flush_dep_nparents; u++) + H5C__assert_flush_dep_nocycle(entry->flush_dep_parent[u], base_entry); + + FUNC_LEAVE_NOAPI_VOID +} /* H5C__assert_flush_dep_nocycle() */ +#endif + +/*------------------------------------------------------------------------- + * Function: H5C__serialize_single_entry + * + * Purpose: Serialize the cache entry pointed to by the entry_ptr + * parameter. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + assert(f); + assert(cache_ptr); + assert(entry_ptr); + assert(!entry_ptr->prefetched); + assert(!entry_ptr->image_up_to_date); + assert(entry_ptr->is_dirty); + assert(!entry_ptr->is_protected); + assert(!entry_ptr->flush_in_progress); + assert(entry_ptr->type); + + /* Set entry_ptr->flush_in_progress to TRUE so the target entry + * will not be evicted out from under us. Must set it back to FALSE + * when we are done. + */ + entry_ptr->flush_in_progress = TRUE; + + /* Allocate buffer for the entry image if required. */ + if (NULL == entry_ptr->image_ptr) { + assert(entry_ptr->size > 0); + if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer") +#if H5C_DO_MEMORY_SANITY_CHECKS + H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE, + H5C_IMAGE_EXTRA_SPACE); +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + } /* end if */ + + /* Generate image for entry */ + if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry") + + /* Reset the flush_in progress flag */ + entry_ptr->flush_in_progress = FALSE; + +done: + assert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress)); + assert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date)); + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__serialize_single_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C__destroy_pf_entry_child_flush_deps() + * + * Purpose: Destroy all flush dependencies in this the supplied + * prefetched entry is the parent. Note that the children + * in these flush dependencies must be prefetched entries as + * well. + * + * As this action is part of the process of transferring all + * such flush dependencies to the deserialized version of the + * prefetched entry, ensure that the data necessary to complete + * the transfer is retained. + * + * Note: The current implementation of this function is + * quite inefficient -- mostly due to the current + * implementation of flush dependencies. This should + * be fixed at some point. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_entry_ptr, + H5C_cache_entry_t **fd_children) +{ + H5C_cache_entry_t *entry_ptr; +#ifndef NDEBUG + unsigned entries_visited = 0; +#endif + int fd_children_found = 0; + hbool_t found; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + assert(cache_ptr); + assert(pf_entry_ptr); + assert(pf_entry_ptr->type); + assert(pf_entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); + assert(pf_entry_ptr->prefetched); + assert(pf_entry_ptr->fd_child_count > 0); + assert(fd_children); + + /* Scan each entry on the index list */ + entry_ptr = cache_ptr->il_head; + while (entry_ptr != NULL) { + /* Here we look at entry_ptr->flush_dep_nparents and not + * entry_ptr->fd_parent_count as it is possible that some + * or all of the prefetched flush dependency child relationships + * have already been destroyed. + */ + if (entry_ptr->prefetched && (entry_ptr->flush_dep_nparents > 0)) { + unsigned u; /* Local index variable */ + + /* Re-init */ + u = 0; + found = FALSE; + + /* Sanity checks */ + assert(entry_ptr->type); + assert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); + assert(entry_ptr->fd_parent_count >= entry_ptr->flush_dep_nparents); + assert(entry_ptr->fd_parent_addrs); + assert(entry_ptr->flush_dep_parent); + + /* Look for correct entry */ + while (!found && (u < entry_ptr->fd_parent_count)) { + /* Sanity check entry */ + assert(entry_ptr->flush_dep_parent[u]); + + /* Correct entry? */ + if (pf_entry_ptr == entry_ptr->flush_dep_parent[u]) + found = TRUE; + + u++; + } /* end while */ + + if (found) { + assert(NULL == fd_children[fd_children_found]); + + /* Remove flush dependency */ + fd_children[fd_children_found] = entry_ptr; + fd_children_found++; + if (H5C_destroy_flush_dependency(pf_entry_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, + "can't destroy pf entry child flush dependency") + +#ifndef NDEBUG + /* Sanity check -- verify that the address of the parent + * appears in entry_ptr->fd_parent_addrs. Must do a search, + * as with flush dependency creates and destroys, + * entry_ptr->fd_parent_addrs and entry_ptr->flush_dep_parent + * can list parents in different order. + */ + found = FALSE; + u = 0; + while (!found && u < entry_ptr->fd_parent_count) { + if (pf_entry_ptr->addr == entry_ptr->fd_parent_addrs[u]) + found = TRUE; + u++; + } /* end while */ + assert(found); +#endif + } /* end if */ + } /* end if */ + +#ifndef NDEBUG + entries_visited++; +#endif + entry_ptr = entry_ptr->il_next; + } /* end while */ + + /* Post-op sanity checks */ + assert(NULL == fd_children[fd_children_found]); + assert((unsigned)fd_children_found == pf_entry_ptr->fd_child_count); + assert(entries_visited == cache_ptr->index_len); + assert(!pf_entry_ptr->is_pinned); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__destroy_pf_entry_child_flush_deps() */ + +/*------------------------------------------------------------------------- + * Function: H5C__deserialize_prefetched_entry() + * + * Purpose: Deserialize the supplied prefetched entry entry, and return + * a pointer to the deserialized entry in *entry_ptr_ptr. + * If successful, remove the prefetched entry from the cache, + * and free it. Insert the deserialized entry into the cache. + * + * Note that the on disk image of the entry is not freed -- + * a pointer to it is stored in the deserialized entries' + * image_ptr field, and its image_up_to_date field is set to + * TRUE unless the entry is dirtied by the deserialize call. + * + * If the prefetched entry is a flush dependency child, + * destroy that flush dependency prior to calling the + * deserialize callback. If appropriate, the flush dependency + * relationship will be recreated by the cache client. + * + * If the prefetched entry is a flush dependency parent, + * destroy the flush dependency relationship with all its + * children. As all these children must be prefetched entries, + * recreate these flush dependency relationships with + * deserialized entry after it is inserted in the cache. + * + * Since deserializing a prefetched entry is semantically + * equivalent to a load, issue an entry loaded nofification + * if the notify callback is defined. + * + * Return: SUCCEED on success, and FAIL on failure. + * + * Note that *entry_ptr_ptr is undefined on failure. + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t **entry_ptr_ptr, + const H5C_class_t *type, haddr_t addr, void *udata) +{ + hbool_t dirty = FALSE; /* Flag indicating whether thing was + * dirtied during deserialize + */ + size_t len; /* Size of image in file */ + void *thing = NULL; /* Pointer to thing loaded */ + H5C_cache_entry_t *pf_entry_ptr; /* pointer to the prefetched entry */ + /* supplied in *entry_ptr_ptr. */ + H5C_cache_entry_t *ds_entry_ptr; /* Alias for thing loaded, as cache + * entry + */ + H5C_cache_entry_t **fd_children = NULL; /* Pointer to a dynamically */ + /* allocated array of pointers to */ + /* the flush dependency children of */ + /* the prefetched entry, or NULL if */ + /* that array does not exist. */ + unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG); + int i; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* sanity checks */ + assert(f); + assert(f->shared); + assert(f->shared->cache); + assert(f->shared->cache == cache_ptr); + assert(entry_ptr_ptr); + assert(*entry_ptr_ptr); + pf_entry_ptr = *entry_ptr_ptr; + assert(pf_entry_ptr->type); + assert(pf_entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); + assert(pf_entry_ptr->prefetched); + assert(pf_entry_ptr->image_up_to_date); + assert(pf_entry_ptr->image_ptr); + assert(pf_entry_ptr->size > 0); + assert(pf_entry_ptr->addr == addr); + assert(type); + assert(type->id == pf_entry_ptr->prefetch_type_id); + assert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type); + + /* verify absence of prohibited or unsupported type flag combinations */ + assert(!(type->flags & H5C__CLASS_SKIP_READS)); + + /* Can't see how skip reads could be usefully combined with + * either the speculative read flag. Hence disallow. + */ + assert(!((type->flags & H5C__CLASS_SKIP_READS) && (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG))); + assert(H5_addr_defined(addr)); + assert(type->get_initial_load_size); + assert(type->deserialize); + + /* if *pf_entry_ptr is a flush dependency child, destroy all such + * relationships now. The client will restore the relationship(s) with + * the deserialized entry if appropriate. + */ + assert(pf_entry_ptr->fd_parent_count == pf_entry_ptr->flush_dep_nparents); + for (i = (int)(pf_entry_ptr->fd_parent_count) - 1; i >= 0; i--) { + assert(pf_entry_ptr->flush_dep_parent); + assert(pf_entry_ptr->flush_dep_parent[i]); + assert(pf_entry_ptr->flush_dep_parent[i]->flush_dep_nchildren > 0); + assert(pf_entry_ptr->fd_parent_addrs); + assert(pf_entry_ptr->flush_dep_parent[i]->addr == pf_entry_ptr->fd_parent_addrs[i]); + + if (H5C_destroy_flush_dependency(pf_entry_ptr->flush_dep_parent[i], pf_entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "can't destroy pf entry parent flush dependency") + + pf_entry_ptr->fd_parent_addrs[i] = HADDR_UNDEF; + } /* end for */ + assert(pf_entry_ptr->flush_dep_nparents == 0); + + /* If *pf_entry_ptr is a flush dependency parent, destroy its flush + * dependency relationships with all its children (which must be + * prefetched entries as well). + * + * These flush dependency relationships will have to be restored + * after the deserialized entry is inserted into the cache in order + * to transfer these relationships to the new entry. Hence save the + * pointers to the flush dependency children of *pf_enty_ptr for later + * use. + */ + if (pf_entry_ptr->fd_child_count > 0) { + if (NULL == (fd_children = (H5C_cache_entry_t **)H5MM_calloc( + sizeof(H5C_cache_entry_t **) * (size_t)(pf_entry_ptr->fd_child_count + 1)))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for fd child ptr array") + + if (H5C__destroy_pf_entry_child_flush_deps(cache_ptr, pf_entry_ptr, fd_children) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, + "can't destroy pf entry child flush dependency(s).") + } /* end if */ + + /* Since the size of the on disk image is known exactly, there is + * no need for either a call to the get_initial_load_size() callback, + * or retries if the H5C__CLASS_SPECULATIVE_LOAD_FLAG flag is set. + * Similarly, there is no need to clamp possible reads beyond + * EOF. + */ + len = pf_entry_ptr->size; + + /* Deserialize the prefetched on-disk image of the entry into the + * native memory form + */ + if (NULL == (thing = type->deserialize(pf_entry_ptr->image_ptr, len, udata, &dirty))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "Can't deserialize image") + ds_entry_ptr = (H5C_cache_entry_t *)thing; + + /* In general, an entry should be clean just after it is loaded. + * + * However, when this code is used in the metadata cache, it is + * possible that object headers will be dirty at this point, as + * the deserialize function will alter object headers if necessary to + * fix an old bug. + * + * In the following assert: + * + * assert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) ); + * + * note that type ids 5 & 6 are associated with object headers in the + * metadata cache. + * + * When we get to using H5C for other purposes, we may wish to + * tighten up the assert so that the loophole only applies to the + * metadata cache. + * + * Note that at present, dirty can't be set to true with prefetched + * entries. However this may change, so include this functionality + * against that possibility. + * + * Also, note that it is possible for a prefetched entry to be dirty -- + * hence the value assigned to ds_entry_ptr->is_dirty below. + */ + + assert((dirty == FALSE) || (type->id == 5 || type->id == 6)); + + ds_entry_ptr->cache_ptr = f->shared->cache; + ds_entry_ptr->addr = addr; + ds_entry_ptr->size = len; + assert(ds_entry_ptr->size < H5C_MAX_ENTRY_SIZE); + ds_entry_ptr->image_ptr = pf_entry_ptr->image_ptr; + ds_entry_ptr->image_up_to_date = !dirty; + ds_entry_ptr->type = type; + ds_entry_ptr->is_dirty = dirty | pf_entry_ptr->is_dirty; + ds_entry_ptr->dirtied = FALSE; + ds_entry_ptr->is_protected = FALSE; + ds_entry_ptr->is_read_only = FALSE; + ds_entry_ptr->ro_ref_count = 0; + ds_entry_ptr->is_pinned = FALSE; + ds_entry_ptr->in_slist = FALSE; + ds_entry_ptr->flush_marker = FALSE; +#ifdef H5_HAVE_PARALLEL + ds_entry_ptr->clear_on_unprotect = FALSE; + ds_entry_ptr->flush_immediately = FALSE; + ds_entry_ptr->coll_access = FALSE; +#endif /* H5_HAVE_PARALLEL */ + ds_entry_ptr->flush_in_progress = FALSE; + ds_entry_ptr->destroy_in_progress = FALSE; + + ds_entry_ptr->ring = pf_entry_ptr->ring; + + /* Initialize flush dependency height fields */ + ds_entry_ptr->flush_dep_parent = NULL; + ds_entry_ptr->flush_dep_nparents = 0; + ds_entry_ptr->flush_dep_parent_nalloc = 0; + ds_entry_ptr->flush_dep_nchildren = 0; + ds_entry_ptr->flush_dep_ndirty_children = 0; + ds_entry_ptr->flush_dep_nunser_children = 0; + + /* Initialize fields supporting the hash table: */ + ds_entry_ptr->ht_next = NULL; + ds_entry_ptr->ht_prev = NULL; + ds_entry_ptr->il_next = NULL; + ds_entry_ptr->il_prev = NULL; + + /* Initialize fields supporting replacement policies: */ + ds_entry_ptr->next = NULL; + ds_entry_ptr->prev = NULL; +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + ds_entry_ptr->aux_next = NULL; + ds_entry_ptr->aux_prev = NULL; +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ +#ifdef H5_HAVE_PARALLEL + pf_entry_ptr->coll_next = NULL; + pf_entry_ptr->coll_prev = NULL; +#endif /* H5_HAVE_PARALLEL */ + + /* Initialize cache image related fields */ + ds_entry_ptr->include_in_image = FALSE; + ds_entry_ptr->lru_rank = 0; + ds_entry_ptr->image_dirty = FALSE; + ds_entry_ptr->fd_parent_count = 0; + ds_entry_ptr->fd_parent_addrs = NULL; + ds_entry_ptr->fd_child_count = pf_entry_ptr->fd_child_count; + ds_entry_ptr->fd_dirty_child_count = 0; + ds_entry_ptr->image_fd_height = 0; + ds_entry_ptr->prefetched = FALSE; + ds_entry_ptr->prefetch_type_id = 0; + ds_entry_ptr->age = 0; + ds_entry_ptr->prefetched_dirty = pf_entry_ptr->prefetched_dirty; +#ifndef NDEBUG /* debugging field */ + ds_entry_ptr->serialization_count = 0; +#endif + + H5C__RESET_CACHE_ENTRY_STATS(ds_entry_ptr); + + /* Apply to to the newly deserialized entry */ + if (H5C__tag_entry(cache_ptr, ds_entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry") + + /* We have successfully deserialized the prefetched entry. + * + * Before we return a pointer to the deserialized entry, we must remove + * the prefetched entry from the cache, discard it, and replace it with + * the deserialized entry. Note that we do not free the prefetched + * entries image, as that has been transferred to the deserialized + * entry. + * + * Also note that we have not yet restored any flush dependencies. This + * must wait until the deserialized entry is inserted in the cache. + * + * To delete the prefetched entry from the cache: + * + * 1) Set pf_entry_ptr->image_ptr to NULL. Since we have already + * transferred the buffer containing the image to *ds_entry_ptr, + * this is not a memory leak. + * + * 2) Call H5C__flush_single_entry() with the H5C__FLUSH_INVALIDATE_FLAG + * and H5C__FLUSH_CLEAR_ONLY_FLAG flags set. + */ + pf_entry_ptr->image_ptr = NULL; + + if (pf_entry_ptr->is_dirty) { + assert(((cache_ptr->slist_enabled) && (pf_entry_ptr->in_slist)) || + ((!cache_ptr->slist_enabled) && (!pf_entry_ptr->in_slist))); + + flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; + } /* end if */ + + if (H5C__flush_single_entry(f, pf_entry_ptr, flush_flags) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't expunge prefetched entry") + +#ifndef NDEGUG /* verify deletion */ + H5C__SEARCH_INDEX(cache_ptr, addr, pf_entry_ptr, FAIL); + + assert(NULL == pf_entry_ptr); +#endif + + /* Insert the deserialized entry into the cache. */ + H5C__INSERT_IN_INDEX(cache_ptr, ds_entry_ptr, FAIL); + + assert(!ds_entry_ptr->in_slist); + if (ds_entry_ptr->is_dirty) + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, ds_entry_ptr, FAIL); + + H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, ds_entry_ptr, FAIL); + + /* Deserializing a prefetched entry is the conceptual equivalent of + * loading it from file. If the deserialized entry has a notify callback, + * send an "after load" notice now that the deserialized entry is fully + * integrated into the cache. + */ + if (ds_entry_ptr->type->notify && + (ds_entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, ds_entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry loaded into cache") + + /* Restore flush dependencies with the flush dependency children of + * of the prefetched entry. Note that we must protect *ds_entry_ptr + * before the call to avoid triggering sanity check failures, and + * then unprotect it afterwards. + */ + i = 0; + if (fd_children != NULL) { + H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, ds_entry_ptr, FAIL); + ds_entry_ptr->is_protected = TRUE; + while (fd_children[i] != NULL) { + /* Sanity checks */ + assert((fd_children[i])->prefetched); + assert((fd_children[i])->fd_parent_count > 0); + assert((fd_children[i])->fd_parent_addrs); + +#ifndef NDEBUG + { + int j; + hbool_t found; + + j = 0; + found = FALSE; + while ((j < (int)((fd_children[i])->fd_parent_count)) && (!found)) { + if ((fd_children[i])->fd_parent_addrs[j] == ds_entry_ptr->addr) + found = TRUE; + + j++; + } /* end while */ + assert(found); + } +#endif + + if (H5C_create_flush_dependency(ds_entry_ptr, fd_children[i]) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Can't restore child flush dependency") + + i++; + } /* end while */ + + H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, ds_entry_ptr, FAIL); + ds_entry_ptr->is_protected = FALSE; + } /* end if ( fd_children != NULL ) */ + assert((unsigned)i == ds_entry_ptr->fd_child_count); + + ds_entry_ptr->fd_child_count = 0; + H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr); + + /* finally, pass ds_entry_ptr back to the caller */ + *entry_ptr_ptr = ds_entry_ptr; + +done: + if (fd_children) + fd_children = (H5C_cache_entry_t **)H5MM_xfree((void *)fd_children); + + /* Release resources on error */ + if (FAIL == ret_value) + if (thing && type->free_icr(thing) < 0) + HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed"); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__deserialize_prefetched_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C_insert_entry + * + * Purpose: Adds the specified thing to the cache. The thing need not + * exist on disk yet, but it must have an address and disk + * space reserved. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, unsigned int flags) +{ + H5C_t *cache_ptr; + H5AC_ring_t ring = H5C_RING_UNDEFINED; + hbool_t insert_pinned; + hbool_t flush_last; +#ifdef H5_HAVE_PARALLEL + hbool_t coll_access = FALSE; /* whether access to the cache entry is done collectively */ +#endif /* H5_HAVE_PARALLEL */ + hbool_t set_flush_marker; + hbool_t write_permitted = TRUE; + size_t empty_space; + H5C_cache_entry_t *entry_ptr = NULL; + H5C_cache_entry_t *test_entry_ptr; + hbool_t entry_tagged = FALSE; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + assert(f); + assert(f->shared); + + cache_ptr = f->shared->cache; + + assert(cache_ptr); + assert(type); + assert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type); + assert(type->image_len); + assert(H5_addr_defined(addr)); + assert(thing); + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + /* no need to verify that entry is not already in the index as */ + /* we already make that check below. */ + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0); + insert_pinned = ((flags & H5C__PIN_ENTRY_FLAG) != 0); + flush_last = ((flags & H5C__FLUSH_LAST_FLAG) != 0); + + /* Get the ring type from the API context */ + ring = H5CX_get_ring(); + + entry_ptr = (H5C_cache_entry_t *)thing; + + /* verify that the new entry isn't already in the hash table -- scream + * and die if it is. + */ + + H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL); + + if (test_entry_ptr != NULL) { + if (test_entry_ptr == entry_ptr) + HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "entry already in cache") + else + HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "duplicate entry in cache") + } /* end if */ + + entry_ptr->cache_ptr = cache_ptr; + entry_ptr->addr = addr; + entry_ptr->type = type; + + entry_ptr->image_ptr = NULL; + entry_ptr->image_up_to_date = FALSE; + + entry_ptr->is_protected = FALSE; + entry_ptr->is_read_only = FALSE; + entry_ptr->ro_ref_count = 0; + + entry_ptr->is_pinned = insert_pinned; + entry_ptr->pinned_from_client = insert_pinned; + entry_ptr->pinned_from_cache = FALSE; + entry_ptr->flush_me_last = flush_last; + + /* newly inserted entries are assumed to be dirty */ + entry_ptr->is_dirty = TRUE; + + /* not protected, so can't be dirtied */ + entry_ptr->dirtied = FALSE; + + /* Retrieve the size of the thing */ + if ((type->image_len)(thing, &(entry_ptr->size)) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTGETSIZE, FAIL, "can't get size of thing") + assert(entry_ptr->size > 0 && entry_ptr->size < H5C_MAX_ENTRY_SIZE); + + entry_ptr->in_slist = FALSE; + +#ifdef H5_HAVE_PARALLEL + entry_ptr->clear_on_unprotect = FALSE; + entry_ptr->flush_immediately = FALSE; +#endif /* H5_HAVE_PARALLEL */ + + entry_ptr->flush_in_progress = FALSE; + entry_ptr->destroy_in_progress = FALSE; + + entry_ptr->ring = ring; + + /* Initialize flush dependency fields */ + entry_ptr->flush_dep_parent = NULL; + entry_ptr->flush_dep_nparents = 0; + entry_ptr->flush_dep_parent_nalloc = 0; + entry_ptr->flush_dep_nchildren = 0; + entry_ptr->flush_dep_ndirty_children = 0; + entry_ptr->flush_dep_nunser_children = 0; + + entry_ptr->ht_next = NULL; + entry_ptr->ht_prev = NULL; + entry_ptr->il_next = NULL; + entry_ptr->il_prev = NULL; + + entry_ptr->next = NULL; + entry_ptr->prev = NULL; + +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + entry_ptr->aux_next = NULL; + entry_ptr->aux_prev = NULL; +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + +#ifdef H5_HAVE_PARALLEL + entry_ptr->coll_next = NULL; + entry_ptr->coll_prev = NULL; +#endif /* H5_HAVE_PARALLEL */ + + /* initialize cache image related fields */ + entry_ptr->include_in_image = FALSE; + entry_ptr->lru_rank = 0; + entry_ptr->image_dirty = FALSE; + entry_ptr->fd_parent_count = 0; + entry_ptr->fd_parent_addrs = NULL; + entry_ptr->fd_child_count = 0; + entry_ptr->fd_dirty_child_count = 0; + entry_ptr->image_fd_height = 0; + entry_ptr->prefetched = FALSE; + entry_ptr->prefetch_type_id = 0; + entry_ptr->age = 0; + entry_ptr->prefetched_dirty = FALSE; +#ifndef NDEBUG /* debugging field */ + entry_ptr->serialization_count = 0; +#endif + + /* initialize tag list fields */ + entry_ptr->tl_next = NULL; + entry_ptr->tl_prev = NULL; + entry_ptr->tag_info = NULL; + + /* Apply tag to newly inserted entry */ + if (H5C__tag_entry(cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry") + entry_tagged = TRUE; + + H5C__RESET_CACHE_ENTRY_STATS(entry_ptr); + + if (cache_ptr->flash_size_increase_possible && + (entry_ptr->size > cache_ptr->flash_size_increase_threshold)) + if (H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__flash_increase_cache_size failed") + + if (cache_ptr->index_size >= cache_ptr->max_cache_size) + empty_space = 0; + else + empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; + + if (cache_ptr->evictions_enabled && + (((cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size) || + (((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size)))) { + size_t space_needed; + + if (empty_space <= entry_ptr->size) + cache_ptr->cache_full = TRUE; + + if (cache_ptr->check_write_permitted != NULL) { + if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "Can't get write_permitted") + } /* end if */ + else + write_permitted = cache_ptr->write_permitted; + + assert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE); + space_needed = entry_ptr->size; + if (space_needed > cache_ptr->max_cache_size) + space_needed = cache_ptr->max_cache_size; + + /* Note that space_needed is just the amount of space that + * needed to insert the new entry without exceeding the cache + * size limit. The subsequent call to H5C__make_space_in_cache() + * may evict the entries required to free more or less space + * depending on conditions. It MAY be less if the cache is + * currently undersized, or more if the cache is oversized. + * + * The cache can exceed its maximum size limit via the following + * mechanisms: + * + * First, it is possible for the cache to grow without + * bound as long as entries are protected and not unprotected. + * + * Second, when writes are not permitted it is also possible + * for the cache to grow without bound. + * + * Finally, we usually don't check to see if the cache is + * oversized at the end of an unprotect. As a result, it is + * possible to have a vastly oversized cache with no protected + * entries as long as all the protects precede the unprotects. + */ + + if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__make_space_in_cache failed") + } /* end if */ + + H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL); + + /* New entries are presumed to be dirty */ + assert(entry_ptr->is_dirty); + entry_ptr->flush_marker = set_flush_marker; + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL); + H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, FAIL); + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed just before done") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + /* If the entry's type has a 'notify' callback send a 'after insertion' + * notice now that the entry is fully integrated into the cache. + */ + if (entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_INSERT, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry inserted into cache") + + H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr); + +#ifdef H5_HAVE_PARALLEL + if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) + coll_access = H5F_get_coll_metadata_reads(f); + + entry_ptr->coll_access = coll_access; + if (coll_access) { + H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, FAIL); + + /* Make sure the size of the collective entries in the cache remain in check */ + if (H5P_USER_TRUE == H5F_COLL_MD_READ(f)) { + if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100) { + if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries") + } /* end if */ + } /* end if */ + else { + if (cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100) { + if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries") + } /* end if */ + } /* end else */ + } /* end if */ +#endif + +done: +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit"); +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + if (ret_value < 0 && entry_tagged) + if (H5C__untag_entry(cache_ptr, entry_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list"); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_insert_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C_mark_entry_dirty + * + * Purpose: Mark a pinned or protected entry as dirty. The target entry + * MUST be either pinned or protected, and MAY be both. + * + * In the protected case, this call is the functional + * equivalent of setting the H5C__DIRTIED_FLAG on an unprotect + * call. + * + * In the pinned but not protected case, if the entry is not + * already dirty, the function places function marks the entry + * dirty and places it on the skip list. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_mark_entry_dirty(void *thing) +{ + H5C_t *cache_ptr; + H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + assert(entry_ptr); + assert(H5_addr_defined(entry_ptr->addr)); + cache_ptr = entry_ptr->cache_ptr; + assert(cache_ptr); + + if (entry_ptr->is_protected) { + assert(!((entry_ptr)->is_read_only)); + + /* set the dirtied flag */ + entry_ptr->dirtied = TRUE; + + /* reset image_up_to_date */ + if (entry_ptr->image_up_to_date) { + entry_ptr->image_up_to_date = FALSE; + + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "Can't propagate serialization status to fd parents") + } /* end if */ + } /* end if */ + else if (entry_ptr->is_pinned) { + hbool_t was_clean; /* Whether the entry was previously clean */ + hbool_t image_was_up_to_date; + + /* Remember previous dirty status */ + was_clean = !entry_ptr->is_dirty; + + /* Check if image is up to date */ + image_was_up_to_date = entry_ptr->image_up_to_date; + + /* Mark the entry as dirty if it isn't already */ + entry_ptr->is_dirty = TRUE; + entry_ptr->image_up_to_date = FALSE; + + /* Modify cache data structures */ + if (was_clean) + H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, FAIL); + if (!entry_ptr->in_slist) + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL); + + /* Update stats for entry being marked dirty */ + H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr); + + /* Check for entry changing status and do notifications, etc. */ + if (was_clean) { + /* If the entry's type has a 'notify' callback send a 'entry dirtied' + * notice now that the entry is fully integrated into the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set") + + /* Propagate the dirty flag up the flush dependency chain if appropriate */ + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_dirty(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") + } /* end if */ + if (image_was_up_to_date) + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "Can't propagate serialization status to fd parents") + } /* end if */ + else + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Entry is neither pinned nor protected??") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_mark_entry_dirty() */ + +/*------------------------------------------------------------------------- + * Function: H5C_mark_entry_clean + * + * Purpose: Mark a pinned entry as clean. The target entry MUST be pinned. + * + * If the entry is not + * already clean, the function places function marks the entry + * clean and removes it from the skip list. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_mark_entry_clean(void *_thing) +{ + H5C_t *cache_ptr; + H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)_thing; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + assert(entry_ptr); + assert(H5_addr_defined(entry_ptr->addr)); + cache_ptr = entry_ptr->cache_ptr; + assert(cache_ptr); + + /* Operate on pinned entry */ + if (entry_ptr->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "entry is protected") + else if (entry_ptr->is_pinned) { + hbool_t was_dirty; /* Whether the entry was previously dirty */ + + /* Remember previous dirty status */ + was_dirty = entry_ptr->is_dirty; + + /* Mark the entry as clean if it isn't already */ + entry_ptr->is_dirty = FALSE; + + /* Also reset the 'flush_marker' flag, since the entry shouldn't be flushed now */ + entry_ptr->flush_marker = FALSE; + + /* Modify cache data structures */ + if (was_dirty) + H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, FAIL); + if (entry_ptr->in_slist) + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL); + + /* Update stats for entry being marked clean */ + H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr); + + /* Check for entry changing status and do notifications, etc. */ + if (was_dirty) { + /* If the entry's type has a 'notify' callback send a 'entry cleaned' + * notice now that the entry is fully integrated into the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify client about entry dirty flag cleared") + + /* Propagate the clean up the flush dependency chain, if appropriate */ + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_clean(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean") + } /* end if */ + } /* end if */ + else + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Entry is not pinned??") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_mark_entry_clean() */ + +/*------------------------------------------------------------------------- + * Function: H5C_mark_entry_unserialized + * + * Purpose: Mark a pinned or protected entry as unserialized. The target + * entry MUST be either pinned or protected, and MAY be both. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_mark_entry_unserialized(void *thing) +{ + H5C_cache_entry_t *entry = (H5C_cache_entry_t *)thing; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + assert(entry); + assert(H5_addr_defined(entry->addr)); + + if (entry->is_protected || entry->is_pinned) { + assert(!entry->is_read_only); + + /* Reset image_up_to_date */ + if (entry->image_up_to_date) { + entry->image_up_to_date = FALSE; + + if (entry->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_unserialized(entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, + "Can't propagate serialization status to fd parents") + } /* end if */ + } /* end if */ + else + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKUNSERIALIZED, FAIL, + "Entry to unserialize is neither pinned nor protected??") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_mark_entry_unserialized() */ + +/*------------------------------------------------------------------------- + * Function: H5C_mark_entry_serialized + * + * Purpose: Mark a pinned entry as serialized. The target entry MUST be + * pinned. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_mark_entry_serialized(void *_thing) +{ + H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_thing; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + assert(entry); + assert(H5_addr_defined(entry->addr)); + + /* Operate on pinned entry */ + if (entry->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "entry is protected") + else if (entry->is_pinned) { + /* Check for entry changing status and do notifications, etc. */ + if (!entry->image_up_to_date) { + /* Set the image_up_to_date flag */ + entry->image_up_to_date = TRUE; + + /* Propagate the serialize up the flush dependency chain, if appropriate */ + if (entry->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_serialized(entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, + "Can't propagate flush dep serialize") + } /* end if */ + } /* end if */ + else + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "Entry is not pinned??") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_mark_entry_serialized() */ + +/*------------------------------------------------------------------------- + * Function: H5C_move_entry + * + * Purpose: Use this function to notify the cache that an entry's + * file address changed. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, haddr_t new_addr) +{ + H5C_cache_entry_t *entry_ptr = NULL; + H5C_cache_entry_t *test_entry_ptr = NULL; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + assert(cache_ptr); + assert(type); + assert(H5_addr_defined(old_addr)); + assert(H5_addr_defined(new_addr)); + assert(H5_addr_ne(old_addr, new_addr)); + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + H5C__SEARCH_INDEX(cache_ptr, old_addr, entry_ptr, FAIL); + + if (entry_ptr == NULL || entry_ptr->type != type) + /* the old item doesn't exist in the cache, so we are done. */ + HGOTO_DONE(SUCCEED); + + assert(entry_ptr->addr == old_addr); + assert(entry_ptr->type == type); + + /* Check for R/W status, otherwise error */ + /* (Moving a R/O entry would mark it dirty, which shouldn't + * happen. QAK - 2016/12/02) + */ + if (entry_ptr->is_read_only) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "can't move R/O entry") + + H5C__SEARCH_INDEX(cache_ptr, new_addr, test_entry_ptr, FAIL); + + if (test_entry_ptr != NULL) { /* we are hosed */ + if (test_entry_ptr->type == type) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "target already moved & reinserted???") + else + HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "new address already in use?") + } /* end if */ + + /* If we get this far we have work to do. Remove *entry_ptr from + * the hash table (and skip list if necessary), change its address to the + * new address, mark it as dirty (if it isn't already) and then re-insert. + * + * Update the replacement policy for a hit to avoid an eviction before + * the moved entry is touched. Update stats for a move. + * + * Note that we do not check the size of the cache, or evict anything. + * Since this is a simple re-name, cache size should be unaffected. + * + * Check to see if the target entry is in the process of being destroyed + * before we delete from the index, etc. If it is, all we do is + * change the addr. If the entry is only in the process of being flushed, + * don't mark it as dirty either, lest we confuse the flush call back. + */ + if (!entry_ptr->destroy_in_progress) { + H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL); + + if (entry_ptr->in_slist) { + assert(cache_ptr->slist_ptr); + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL); + } /* end if */ + } /* end if */ + + entry_ptr->addr = new_addr; + + if (!entry_ptr->destroy_in_progress) { + hbool_t was_dirty; /* Whether the entry was previously dirty */ + + /* Remember previous dirty status */ + was_dirty = entry_ptr->is_dirty; + + /* Mark the entry as dirty if it isn't already */ + entry_ptr->is_dirty = TRUE; + + /* This shouldn't be needed, but it keeps the test code happy */ + if (entry_ptr->image_up_to_date) { + entry_ptr->image_up_to_date = FALSE; + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "Can't propagate serialization status to fd parents") + } /* end if */ + + /* Modify cache data structures */ + H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL); + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL); + + /* Skip some actions if we're in the middle of flushing the entry */ + if (!entry_ptr->flush_in_progress) { + /* Update the replacement policy for the entry */ + H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, FAIL); + + /* Check for entry changing status and do notifications, etc. */ + if (!was_dirty) { + /* If the entry's type has a 'notify' callback send a 'entry dirtied' + * notice now that the entry is fully integrated into the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify client about entry dirty flag set") + + /* Propagate the dirty flag up the flush dependency chain if appropriate */ + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_dirty(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, + "Can't propagate flush dep dirty flag") + } /* end if */ + } /* end if */ + } /* end if */ + + H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr); + +done: +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit"); +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_move_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C_resize_entry + * + * Purpose: Resize a pinned or protected entry. + * + * Resizing an entry dirties it, so if the entry is not + * already dirty, the function places the entry on the + * skip list. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_resize_entry(void *thing, size_t new_size) +{ + H5C_t *cache_ptr; + H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + assert(entry_ptr); + assert(H5_addr_defined(entry_ptr->addr)); + cache_ptr = entry_ptr->cache_ptr; + assert(cache_ptr); + + /* Check for usage errors */ + if (new_size <= 0) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "New size is non-positive") + if (!(entry_ptr->is_pinned || entry_ptr->is_protected)) + HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "Entry isn't pinned or protected??") + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + /* update for change in entry size if necessary */ + if (entry_ptr->size != new_size) { + hbool_t was_clean; + + /* make note of whether the entry was clean to begin with */ + was_clean = !entry_ptr->is_dirty; + + /* mark the entry as dirty if it isn't already */ + entry_ptr->is_dirty = TRUE; + + /* Reset the image up-to-date status */ + if (entry_ptr->image_up_to_date) { + entry_ptr->image_up_to_date = FALSE; + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "Can't propagate serialization status to fd parents") + } /* end if */ + + /* Release the current image */ + if (entry_ptr->image_ptr) + entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr); + + /* do a flash cache size increase if appropriate */ + if (cache_ptr->flash_size_increase_possible) { + if (new_size > entry_ptr->size) { + size_t size_increase; + + size_increase = new_size - entry_ptr->size; + if (size_increase >= cache_ptr->flash_size_increase_threshold) + if (H5C__flash_increase_cache_size(cache_ptr, entry_ptr->size, new_size) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "flash cache increase failed") + } + } + + /* update the pinned and/or protected entry list */ + if (entry_ptr->is_pinned) + H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->pel_len, cache_ptr->pel_size, entry_ptr->size, + new_size, FAIL) + if (entry_ptr->is_protected) + H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->pl_len, cache_ptr->pl_size, entry_ptr->size, new_size, + FAIL) + +#ifdef H5_HAVE_PARALLEL + if (entry_ptr->coll_access) + H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->coll_list_len, cache_ptr->coll_list_size, + entry_ptr->size, new_size, FAIL) +#endif /* H5_HAVE_PARALLEL */ + + /* update statistics just before changing the entry size */ + H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size); + + /* update the hash table */ + H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_size, entry_ptr, was_clean, FAIL); + + /* if the entry is in the skip list, update that too */ + if (entry_ptr->in_slist) + H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_size); + + /* finally, update the entry size proper */ + entry_ptr->size = new_size; + + if (!entry_ptr->in_slist) + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL); + + if (entry_ptr->is_pinned) + H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr); + + /* Check for entry changing status and do notifications, etc. */ + if (was_clean) { + /* If the entry's type has a 'notify' callback send a 'entry dirtied' + * notice now that the entry is fully integrated into the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set") + + /* Propagate the dirty flag up the flush dependency chain if appropriate */ + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_dirty(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") + } /* end if */ + } /* end if */ + +done: +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit"); +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_resize_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C_pin_protected_entry() + * + * Purpose: Pin a protected cache entry. The entry must be protected + * at the time of call, and must be unpinned. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_pin_protected_entry(void *thing) +{ + H5C_t *cache_ptr; + H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing; /* Pointer to entry to pin */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + assert(entry_ptr); + assert(H5_addr_defined(entry_ptr->addr)); + cache_ptr = entry_ptr->cache_ptr; + assert(cache_ptr); + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + /* Only protected entries can be pinned */ + if (!entry_ptr->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Entry isn't protected") + + /* Pin the entry from a client */ + if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client") + +done: +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit"); +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_pin_protected_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C_protect + * + * Purpose: If the target entry is not in the cache, load it. If + * necessary, attempt to evict one or more entries to keep + * the cache within its maximum size. + * + * Mark the target entry as protected, and return its address + * to the caller. The caller must call H5C_unprotect() when + * finished with the entry. + * + * While it is protected, the entry may not be either evicted + * or flushed -- nor may it be accessed by another call to + * H5C_protect. Any attempt to do so will result in a failure. + * + * Return: Success: Ptr to the desired entry + * Failure: NULL + * + *------------------------------------------------------------------------- + */ +void * +H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsigned flags) +{ + H5C_t *cache_ptr; + H5AC_ring_t ring = H5C_RING_UNDEFINED; + hbool_t hit; + hbool_t have_write_permitted = FALSE; + hbool_t read_only = FALSE; + hbool_t flush_last; +#ifdef H5_HAVE_PARALLEL + hbool_t coll_access = FALSE; /* whether access to the cache entry is done collectively */ +#endif /* H5_HAVE_PARALLEL */ + hbool_t write_permitted = FALSE; + hbool_t was_loaded = FALSE; /* Whether the entry was loaded as a result of the protect */ + size_t empty_space; + void *thing; + H5C_cache_entry_t *entry_ptr; + void *ret_value = NULL; /* Return value */ + + FUNC_ENTER_NOAPI(NULL) + + /* check args */ + assert(f); + assert(f->shared); + cache_ptr = f->shared->cache; + assert(cache_ptr); + assert(type); + assert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type); + assert(H5_addr_defined(addr)); + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + /* Load the cache image, if requested */ + if (cache_ptr->load_image) { + cache_ptr->load_image = FALSE; + if (H5C__load_cache_image(f) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't load cache image") + } /* end if */ + + read_only = ((flags & H5C__READ_ONLY_FLAG) != 0); + flush_last = ((flags & H5C__FLUSH_LAST_FLAG) != 0); + + /* Get the ring type from the API context */ + ring = H5CX_get_ring(); + +#ifdef H5_HAVE_PARALLEL + if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) + coll_access = H5F_get_coll_metadata_reads(f); +#endif /* H5_HAVE_PARALLEL */ + + /* first check to see if the target is in cache */ + H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL); + + if (entry_ptr != NULL) { + if (entry_ptr->ring != ring) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occurred for cache entry") + + if (entry_ptr->prefetched) { + /* This call removes the prefetched entry from the cache, + * and replaces it with an entry deserialized from the + * image of the prefetched entry. + */ + if (H5C__deserialize_prefetched_entry(f, cache_ptr, &entry_ptr, type, addr, udata) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't deserialize prefetched entry") + + assert(!entry_ptr->prefetched); + assert(entry_ptr->addr == addr); + } /* end if */ + + /* Check for trying to load the wrong type of entry from an address */ + if (entry_ptr->type != type) + HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, NULL, "incorrect cache entry type") + +#ifdef H5_HAVE_PARALLEL + /* If this is a collective metadata read, the entry is not marked as + * collective, and is clean, it is possible that other processes will + * not have it in its cache and will expect a bcast of the entry from + * process 0. So process 0 will bcast the entry to all other ranks. + * Ranks that _do_ have the entry in their cache still have to + * participate in the bcast. + */ + if (coll_access) { + if (!entry_ptr->is_dirty && !entry_ptr->coll_access) { + MPI_Comm comm; /* File MPI Communicator */ + int mpi_code; /* MPI error code */ + int buf_size; + + if (MPI_COMM_NULL == (comm = H5F_mpi_get_comm(f))) + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed") + + if (entry_ptr->image_ptr == NULL) { + int mpi_rank; + + if ((mpi_rank = H5F_mpi_get_rank(f)) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank") + + if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, + "memory allocation failed for on disk image buffer") +#if H5C_DO_MEMORY_SANITY_CHECKS + H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, + H5C_IMAGE_EXTRA_SPACE); +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + if (0 == mpi_rank && H5C__generate_image(f, cache_ptr, entry_ptr) < 0) + /* If image generation fails, push an error but + * still participate in the following MPI_Bcast + */ + HDONE_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't generate entry's image"); + } /* end if */ + assert(entry_ptr->image_ptr); + + H5_CHECKED_ASSIGN(buf_size, int, entry_ptr->size, size_t); + if (MPI_SUCCESS != (mpi_code = MPI_Bcast(entry_ptr->image_ptr, buf_size, MPI_BYTE, 0, comm))) + HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code) + + /* Mark the entry as collective and insert into the collective list */ + entry_ptr->coll_access = TRUE; + H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL); + } /* end if */ + else if (entry_ptr->coll_access) + H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, NULL); + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + +#ifdef H5C_DO_TAGGING_SANITY_CHECKS + { + /* Verify tag value */ + if (cache_ptr->ignore_tags != TRUE) { + haddr_t tag; /* Tag value */ + + /* The entry is already in the cache, but make sure that the tag value + * is still legal. This will ensure that had the entry NOT been in the + * cache, tagging was still set up correctly and it would have received + * a legal tag value after getting loaded from disk. + */ + + /* Get the tag */ + tag = H5CX_get_tag(); + + if (H5C_verify_tag(entry_ptr->type->id, tag) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "tag verification failed") + } /* end if */ + } +#endif + + hit = TRUE; + thing = (void *)entry_ptr; + } + else { + /* must try to load the entry from disk. */ + hit = FALSE; + if (NULL == (thing = H5C__load_entry(f, +#ifdef H5_HAVE_PARALLEL + coll_access, +#endif /* H5_HAVE_PARALLEL */ + type, addr, udata))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry") + + entry_ptr = (H5C_cache_entry_t *)thing; + cache_ptr->entries_loaded_counter++; + + entry_ptr->ring = ring; +#ifdef H5_HAVE_PARALLEL + if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && entry_ptr->coll_access) + H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL); +#endif /* H5_HAVE_PARALLEL */ + + /* Apply tag to newly protected entry */ + if (H5C__tag_entry(cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, NULL, "Cannot tag metadata entry") + + /* If the entry is very large, and we are configured to allow it, + * we may wish to perform a flash cache size increase. + */ + if (cache_ptr->flash_size_increase_possible && + (entry_ptr->size > cache_ptr->flash_size_increase_threshold)) + if (H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__flash_increase_cache_size failed") + + if (cache_ptr->index_size >= cache_ptr->max_cache_size) + empty_space = 0; + else + empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; + + /* try to free up if necceary and if evictions are permitted. Note + * that if evictions are enabled, we will call H5C__make_space_in_cache() + * regardless if the min_free_space requirement is not met. + */ + if (cache_ptr->evictions_enabled && + (((cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size) || + ((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size))) { + + size_t space_needed; + + if (empty_space <= entry_ptr->size) + cache_ptr->cache_full = TRUE; + + if (cache_ptr->check_write_permitted != NULL) { + if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted 1") + else + have_write_permitted = TRUE; + } /* end if */ + else { + write_permitted = cache_ptr->write_permitted; + have_write_permitted = TRUE; + } /* end else */ + + assert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE); + space_needed = entry_ptr->size; + if (space_needed > cache_ptr->max_cache_size) + space_needed = cache_ptr->max_cache_size; + + /* Note that space_needed is just the amount of space that + * needed to insert the new entry without exceeding the cache + * size limit. The subsequent call to H5C__make_space_in_cache() + * may evict the entries required to free more or less space + * depending on conditions. It MAY be less if the cache is + * currently undersized, or more if the cache is oversized. + * + * The cache can exceed its maximum size limit via the following + * mechanisms: + * + * First, it is possible for the cache to grow without + * bound as long as entries are protected and not unprotected. + * + * Second, when writes are not permitted it is also possible + * for the cache to grow without bound. + * + * Third, the user may choose to disable evictions -- causing + * the cache to grow without bound until evictions are + * re-enabled. + * + * Finally, we usually don't check to see if the cache is + * oversized at the end of an unprotect. As a result, it is + * possible to have a vastly oversized cache with no protected + * entries as long as all the protects precede the unprotects. + */ + if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed") + } /* end if */ + + /* Insert the entry in the hash table. + * + * ******************************************* + * + * Set the flush_me_last field + * of the newly loaded entry before inserting it into the + * index. Must do this, as the index tracked the number of + * entries with the flush_last field set, but assumes that + * the field will not change after insertion into the index. + * + * Note that this means that the H5C__FLUSH_LAST_FLAG flag + * is ignored if the entry is already in cache. + */ + entry_ptr->flush_me_last = flush_last; + + H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, NULL); + if (entry_ptr->is_dirty && !entry_ptr->in_slist) + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, NULL); + + /* insert the entry in the data structures used by the replacement + * policy. We are just going to take it out again when we update + * the replacement policy for a protect, but this simplifies the + * code. If we do this often enough, we may want to optimize this. + */ + H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, NULL); + + /* Record that the entry was loaded, to trigger a notify callback later */ + /* (After the entry is fully added to the cache) */ + was_loaded = TRUE; + } /* end else */ + + assert(entry_ptr->addr == addr); + assert(entry_ptr->type == type); + + if (entry_ptr->is_protected) { + if (read_only && entry_ptr->is_read_only) { + assert(entry_ptr->ro_ref_count > 0); + (entry_ptr->ro_ref_count)++; + } /* end if */ + else + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Target already protected & not read only?!?") + } /* end if */ + else { + H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, NULL); + + entry_ptr->is_protected = TRUE; + if (read_only) { + entry_ptr->is_read_only = TRUE; + entry_ptr->ro_ref_count = 1; + } /* end if */ + entry_ptr->dirtied = FALSE; + } /* end else */ + + H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit); + H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit); + + ret_value = thing; + + if (cache_ptr->evictions_enabled && + (cache_ptr->size_decreased || + (cache_ptr->resize_enabled && (cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length)))) { + + if (!have_write_permitted) { + if (cache_ptr->check_write_permitted != NULL) { + if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted") + else + have_write_permitted = TRUE; + } + else { + write_permitted = cache_ptr->write_permitted; + have_write_permitted = TRUE; + } + } + + if (cache_ptr->resize_enabled && (cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length)) + if (H5C__auto_adjust_cache_size(f, write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Cache auto-resize failed") + + if (cache_ptr->size_decreased) { + cache_ptr->size_decreased = FALSE; + + /* check to see if the cache is now oversized due to the cache + * size reduction. If it is, try to evict enough entries to + * bring the cache size down to the current maximum cache size. + * + * Also, if the min_clean_size requirement is not met, we + * should also call H5C__make_space_in_cache() to bring us + * into compliance. + */ + if (cache_ptr->index_size >= cache_ptr->max_cache_size) + empty_space = 0; + else + empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; + + if ((cache_ptr->index_size > cache_ptr->max_cache_size) || + ((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size)) { + + if (cache_ptr->index_size > cache_ptr->max_cache_size) + cache_ptr->cache_full = TRUE; + + if (H5C__make_space_in_cache(f, (size_t)0, write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed") + } + } /* end if */ + } + + /* If we loaded the entry and the entry's type has a 'notify' callback, send + * an 'after load' notice now that the entry is fully integrated into + * the cache and protected. We must wait until it is protected so it is not + * evicted during the notify callback. + */ + if (was_loaded) + /* If the entry's type has a 'notify' callback send a 'after load' + * notice now that the entry is fully integrated into the cache. + */ + if (entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, NULL, + "can't notify client about entry inserted into cache") + +#ifdef H5_HAVE_PARALLEL + /* Make sure the size of the collective entries in the cache remain in check */ + if (coll_access) { + if (H5P_USER_TRUE == H5F_COLL_MD_READ(f)) { + if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100) + if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries") + } /* end if */ + else { + if (cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100) + if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries") + } /* end else */ + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + +done: +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on exit"); +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_protect() */ + +/*------------------------------------------------------------------------- + * Function: H5C_unpin_entry() + * + * Purpose: Unpin a cache entry. The entry can be either protected or + * unprotected at the time of call, but must be pinned. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_unpin_entry(void *_entry_ptr) +{ + H5C_t *cache_ptr; + H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)_entry_ptr; /* Pointer to entry to unpin */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity check */ + assert(entry_ptr); + cache_ptr = entry_ptr->cache_ptr; + assert(cache_ptr); + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + /* Unpin the entry */ + if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, TRUE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry from client") + +done: +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit"); +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_unpin_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C_unprotect + * + * Purpose: Undo an H5C_protect() call -- specifically, mark the + * entry as unprotected, remove it from the protected list, + * and give it back to the replacement policy. + * + * The TYPE and ADDR arguments must be the same as those in + * the corresponding call to H5C_protect() and the THING + * argument must be the value returned by that call to + * H5C_protect(). + * + * Return: Non-negative on success/Negative on failure + * + * If the deleted flag is TRUE, simply remove the target entry + * from the cache, clear it, and free it without writing it to + * disk. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) +{ + H5C_t *cache_ptr; + hbool_t deleted; + hbool_t dirtied; + hbool_t set_flush_marker; + hbool_t pin_entry; + hbool_t unpin_entry; + hbool_t free_file_space; + hbool_t take_ownership; + hbool_t was_clean; +#ifdef H5_HAVE_PARALLEL + hbool_t clear_entry = FALSE; +#endif /* H5_HAVE_PARALLEL */ + H5C_cache_entry_t *entry_ptr; + H5C_cache_entry_t *test_entry_ptr; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + deleted = ((flags & H5C__DELETED_FLAG) != 0); + dirtied = ((flags & H5C__DIRTIED_FLAG) != 0); + set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0); + pin_entry = ((flags & H5C__PIN_ENTRY_FLAG) != 0); + unpin_entry = ((flags & H5C__UNPIN_ENTRY_FLAG) != 0); + free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0); + take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0); + + assert(f); + assert(f->shared); + + cache_ptr = f->shared->cache; + + assert(cache_ptr); + assert(H5_addr_defined(addr)); + assert(thing); + assert(!(pin_entry && unpin_entry)); + + /* deleted flag must accompany free_file_space */ + assert((!free_file_space) || (deleted)); + + /* deleted flag must accompany take_ownership */ + assert((!take_ownership) || (deleted)); + + /* can't have both free_file_space & take_ownership */ + assert(!(free_file_space && take_ownership)); + + entry_ptr = (H5C_cache_entry_t *)thing; + assert(entry_ptr->addr == addr); + + /* also set the dirtied variable if the dirtied field is set in + * the entry. + */ + dirtied |= entry_ptr->dirtied; + was_clean = !(entry_ptr->is_dirty); + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + /* if the entry has multiple read only protects, just decrement + * the ro_ref_counter. Don't actually unprotect until the ref count + * drops to zero. + */ + if (entry_ptr->ro_ref_count > 1) { + /* Sanity check */ + assert(entry_ptr->is_protected); + assert(entry_ptr->is_read_only); + + if (dirtied) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??") + + /* Reduce the RO ref count */ + (entry_ptr->ro_ref_count)--; + + /* Pin or unpin the entry as requested. */ + if (pin_entry) { + /* Pin the entry from a client */ + if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client") + } + else if (unpin_entry) { + /* Unpin the entry from a client */ + if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client") + } /* end if */ + } + else { + if (entry_ptr->is_read_only) { + /* Sanity check */ + assert(entry_ptr->ro_ref_count == 1); + + if (dirtied) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??") + + entry_ptr->is_read_only = FALSE; + entry_ptr->ro_ref_count = 0; + } /* end if */ + +#ifdef H5_HAVE_PARALLEL + /* When the H5C code is used to implement the metadata cache in the + * PHDF5 case, only the cache on process 0 is allowed to write to file. + * All the other metadata caches must hold dirty entries until they + * are told that the entries are clean. + * + * The clear_on_unprotect flag in the H5C_cache_entry_t structure + * exists to deal with the case in which an entry is protected when + * its cache receives word that the entry is now clean. In this case, + * the clear_on_unprotect flag is set, and the entry is flushed with + * the H5C__FLUSH_CLEAR_ONLY_FLAG. + * + * All this is a bit awkward, but until the metadata cache entries + * are contiguous, with only one dirty flag, we have to let the supplied + * functions deal with the resetting the is_dirty flag. + */ + if (entry_ptr->clear_on_unprotect) { + /* Sanity check */ + assert(entry_ptr->is_dirty); + + entry_ptr->clear_on_unprotect = FALSE; + if (!dirtied) + clear_entry = TRUE; + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + + if (!entry_ptr->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Entry already unprotected??") + + /* Mark the entry as dirty if appropriate */ + entry_ptr->is_dirty = (entry_ptr->is_dirty || dirtied); + if (dirtied && entry_ptr->image_up_to_date) { + entry_ptr->image_up_to_date = FALSE; + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "Can't propagate serialization status to fd parents") + } /* end if */ + + /* Check for newly dirtied entry */ + if (was_clean && entry_ptr->is_dirty) { + /* Update index for newly dirtied entry */ + H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, FAIL); + + /* If the entry's type has a 'notify' callback send a + * 'entry dirtied' notice now that the entry is fully + * integrated into the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set") + + /* Propagate the flush dep dirty flag up the flush dependency chain + * if appropriate + */ + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_dirty(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") + } /* end if */ + /* Check for newly clean entry */ + else if (!was_clean && !entry_ptr->is_dirty) { + + /* If the entry's type has a 'notify' callback send a + * 'entry cleaned' notice now that the entry is fully + * integrated into the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify client about entry dirty flag cleared") + + /* Propagate the flush dep clean flag up the flush dependency chain + * if appropriate + */ + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_clean(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") + } /* end else-if */ + + /* Pin or unpin the entry as requested. */ + if (pin_entry) { + /* Pin the entry from a client */ + if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client") + } + else if (unpin_entry) { + /* Unpin the entry from a client */ + if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client") + } /* end if */ + + /* H5C__UPDATE_RP_FOR_UNPROTECT will place the unprotected entry on + * the pinned entry list if entry_ptr->is_pinned is TRUE. + */ + H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, FAIL); + + entry_ptr->is_protected = FALSE; + + /* if the entry is dirty, 'or' its flush_marker with the set flush flag, + * and then add it to the skip list if it isn't there already. + */ + if (entry_ptr->is_dirty) { + entry_ptr->flush_marker |= set_flush_marker; + if (!entry_ptr->in_slist) + /* this is a no-op if cache_ptr->slist_enabled is FALSE */ + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL); + } /* end if */ + + /* This implementation of the "deleted" option is a bit inefficient, as + * we re-insert the entry to be deleted into the replacement policy + * data structures, only to remove them again. Depending on how often + * we do this, we may want to optimize a bit. + */ + if (deleted) { + unsigned flush_flags = (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__FLUSH_INVALIDATE_FLAG); + + /* verify that the target entry is in the cache. */ + H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL); + + if (test_entry_ptr == NULL) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?") + else if (test_entry_ptr != entry_ptr) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, + "hash table contains multiple entries for addr?!?") + + /* Set the 'free file space' flag for the flush, if needed */ + if (free_file_space) + flush_flags |= H5C__FREE_FILE_SPACE_FLAG; + + /* Set the "take ownership" flag for the flush, if needed */ + if (take_ownership) + flush_flags |= H5C__TAKE_OWNERSHIP_FLAG; + + /* Delete the entry from the skip list on destroy */ + flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; + + assert((!cache_ptr->slist_enabled) || (((!was_clean) || dirtied) == (entry_ptr->in_slist))); + + if (H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush entry") + } /* end if */ +#ifdef H5_HAVE_PARALLEL + else if (clear_entry) { + /* Verify that the target entry is in the cache. */ + H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL); + + if (test_entry_ptr == NULL) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?") + else if (test_entry_ptr != entry_ptr) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, + "hash table contains multiple entries for addr?!?") + + if (H5C__flush_single_entry(f, entry_ptr, + H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear entry") + } /* end else if */ +#endif /* H5_HAVE_PARALLEL */ + } + + H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr); + +done: +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit"); +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_unprotect() */ + +/*------------------------------------------------------------------------- + * Function: H5C_unsettle_entry_ring + * + * Purpose: Advise the metadata cache that the specified entry's free space + * manager ring is no longer settled (if it was on entry). + * + * If the target free space manager ring is already + * unsettled, do nothing, and return SUCCEED. + * + * If the target free space manager ring is settled, and + * we are not in the process of a file shutdown, mark + * the ring as unsettled, and return SUCCEED. + * + * If the target free space manager is settled, and we + * are in the process of a file shutdown, post an error + * message, and return FAIL. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_unsettle_entry_ring(void *_entry) +{ + H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry whose ring to unsettle */ + H5C_t *cache; /* Cache for file */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + assert(entry); + assert(entry->ring != H5C_RING_UNDEFINED); + assert((H5C_RING_USER == entry->ring) || (H5C_RING_RDFSM == entry->ring) || + (H5C_RING_MDFSM == entry->ring)); + cache = entry->cache_ptr; + assert(cache); + + switch (entry->ring) { + case H5C_RING_USER: + /* Do nothing */ + break; + + case H5C_RING_RDFSM: + if (cache->rdfsm_settled) { + if (cache->flush_in_progress || cache->close_warning_received) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle") + cache->rdfsm_settled = FALSE; + } /* end if */ + break; + + case H5C_RING_MDFSM: + if (cache->mdfsm_settled) { + if (cache->flush_in_progress || cache->close_warning_received) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle") + cache->mdfsm_settled = FALSE; + } /* end if */ + break; + + default: + assert(FALSE); /* this should be un-reachable */ + break; + } /* end switch */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_unsettle_entry_ring() */ + +/*------------------------------------------------------------------------- + * Function: H5C_create_flush_dependency() + * + * Purpose: Initiates a parent<->child entry flush dependency. The parent + * entry must be pinned or protected at the time of call, and must + * have all dependencies removed before the cache can shut down. + * + * Note: Flush dependencies in the cache indicate that a child entry + * must be flushed to the file before its parent. (This is + * currently used to implement Single-Writer/Multiple-Reader (SWMR) + * I/O access for data structures in the file). + * + * Creating a flush dependency between two entries will also pin + * the parent entry. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_create_flush_dependency(void *parent_thing, void *child_thing) +{ + H5C_t *cache_ptr; + H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent thing's entry */ + H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child thing's entry */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + assert(parent_entry); + assert(H5_addr_defined(parent_entry->addr)); + assert(child_entry); + assert(H5_addr_defined(child_entry->addr)); + cache_ptr = parent_entry->cache_ptr; + assert(cache_ptr); + assert(cache_ptr == child_entry->cache_ptr); +#ifndef NDEBUG + /* Make sure the parent is not already a parent */ + { + unsigned u; + + for (u = 0; u < child_entry->flush_dep_nparents; u++) + assert(child_entry->flush_dep_parent[u] != parent_entry); + } /* end block */ +#endif + + /* More sanity checks */ + if (child_entry == parent_entry) + HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Child entry flush dependency parent can't be itself") + if (!(parent_entry->is_protected || parent_entry->is_pinned)) + HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Parent entry isn't pinned or protected") + + /* Check for parent not pinned */ + if (!parent_entry->is_pinned) { + /* Sanity check */ + assert(parent_entry->flush_dep_nchildren == 0); + assert(!parent_entry->pinned_from_client); + assert(!parent_entry->pinned_from_cache); + + /* Pin the parent entry */ + parent_entry->is_pinned = TRUE; + H5C__UPDATE_STATS_FOR_PIN(cache_ptr, parent_entry); + } /* end else */ + + /* Mark the entry as pinned from the cache's action (possibly redundantly) */ + parent_entry->pinned_from_cache = TRUE; + + /* Check if we need to resize the child's parent array */ + if (child_entry->flush_dep_nparents >= child_entry->flush_dep_parent_nalloc) { + if (child_entry->flush_dep_parent_nalloc == 0) { + /* Array does not exist yet, allocate it */ + assert(!child_entry->flush_dep_parent); + + if (NULL == (child_entry->flush_dep_parent = + H5FL_SEQ_MALLOC(H5C_cache_entry_ptr_t, H5C_FLUSH_DEP_PARENT_INIT))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, + "memory allocation failed for flush dependency parent list") + child_entry->flush_dep_parent_nalloc = H5C_FLUSH_DEP_PARENT_INIT; + } /* end if */ + else { + /* Resize existing array */ + assert(child_entry->flush_dep_parent); + + if (NULL == (child_entry->flush_dep_parent = + H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent, + 2 * child_entry->flush_dep_parent_nalloc))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, + "memory allocation failed for flush dependency parent list") + child_entry->flush_dep_parent_nalloc *= 2; + } /* end else */ + cache_ptr->entry_fd_height_change_counter++; + } /* end if */ + + /* Add the dependency to the child's parent array */ + child_entry->flush_dep_parent[child_entry->flush_dep_nparents] = parent_entry; + child_entry->flush_dep_nparents++; + + /* Increment parent's number of children */ + parent_entry->flush_dep_nchildren++; + + /* Adjust the number of dirty children */ + if (child_entry->is_dirty) { + /* Sanity check */ + assert(parent_entry->flush_dep_ndirty_children < parent_entry->flush_dep_nchildren); + + parent_entry->flush_dep_ndirty_children++; + + /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */ + if (parent_entry->type->notify && + (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED, parent_entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify parent about child entry dirty flag set") + } /* end if */ + + /* adjust the parent's number of unserialized children. Note + * that it is possible for and entry to be clean and unserialized. + */ + if (!child_entry->image_up_to_date) { + assert(parent_entry->flush_dep_nunser_children < parent_entry->flush_dep_nchildren); + + parent_entry->flush_dep_nunser_children++; + + /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */ + if (parent_entry->type->notify && + (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, parent_entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify parent about child entry serialized flag reset") + } /* end if */ + + /* Post-conditions, for successful operation */ + assert(parent_entry->is_pinned); + assert(parent_entry->flush_dep_nchildren > 0); + assert(child_entry->flush_dep_parent); + assert(child_entry->flush_dep_nparents > 0); + assert(child_entry->flush_dep_parent_nalloc > 0); +#ifndef NDEBUG + H5C__assert_flush_dep_nocycle(parent_entry, child_entry); +#endif + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_create_flush_dependency() */ + +/*------------------------------------------------------------------------- + * Function: H5C_destroy_flush_dependency() + * + * Purpose: Terminates a parent<-> child entry flush dependency. The + * parent entry must be pinned. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_destroy_flush_dependency(void *parent_thing, void *child_thing) +{ + H5C_t *cache_ptr; + H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent entry */ + H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child entry */ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + assert(parent_entry); + assert(H5_addr_defined(parent_entry->addr)); + assert(child_entry); + assert(H5_addr_defined(child_entry->addr)); + cache_ptr = parent_entry->cache_ptr; + assert(cache_ptr); + assert(cache_ptr == child_entry->cache_ptr); + + /* Usage checks */ + if (!parent_entry->is_pinned) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't pinned") + if (NULL == child_entry->flush_dep_parent) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, + "Child entry doesn't have a flush dependency parent array") + if (0 == parent_entry->flush_dep_nchildren) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, + "Parent entry flush dependency ref. count has no child dependencies") + + /* Search for parent in child's parent array. This is a linear search + * because we do not expect large numbers of parents. If this changes, we + * may wish to change the parent array to a skip list */ + for (u = 0; u < child_entry->flush_dep_nparents; u++) + if (child_entry->flush_dep_parent[u] == parent_entry) + break; + if (u == child_entry->flush_dep_nparents) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, + "Parent entry isn't a flush dependency parent for child entry") + + /* Remove parent entry from child's parent array */ + if (u < (child_entry->flush_dep_nparents - 1)) + memmove(&child_entry->flush_dep_parent[u], &child_entry->flush_dep_parent[u + 1], + (child_entry->flush_dep_nparents - u - 1) * sizeof(child_entry->flush_dep_parent[0])); + child_entry->flush_dep_nparents--; + + /* Adjust parent entry's nchildren and unpin parent if it goes to zero */ + parent_entry->flush_dep_nchildren--; + if (0 == parent_entry->flush_dep_nchildren) { + /* Sanity check */ + assert(parent_entry->pinned_from_cache); + + /* Check if we should unpin parent entry now */ + if (!parent_entry->pinned_from_client) + if (H5C__unpin_entry_real(cache_ptr, parent_entry, TRUE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry") + + /* Mark the entry as unpinned from the cache's action */ + parent_entry->pinned_from_cache = FALSE; + } /* end if */ + + /* Adjust parent entry's ndirty_children */ + if (child_entry->is_dirty) { + /* Sanity check */ + assert(parent_entry->flush_dep_ndirty_children > 0); + + parent_entry->flush_dep_ndirty_children--; + + /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */ + if (parent_entry->type->notify && + (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED, parent_entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify parent about child entry dirty flag reset") + } /* end if */ + + /* adjust parent entry's number of unserialized children */ + if (!child_entry->image_up_to_date) { + assert(parent_entry->flush_dep_nunser_children > 0); + + parent_entry->flush_dep_nunser_children--; + + /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */ + if (parent_entry->type->notify && + (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, parent_entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify parent about child entry serialized flag set") + } /* end if */ + + /* Shrink or free the parent array if appropriate */ + if (child_entry->flush_dep_nparents == 0) { + child_entry->flush_dep_parent = H5FL_SEQ_FREE(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent); + child_entry->flush_dep_parent_nalloc = 0; + } /* end if */ + else if (child_entry->flush_dep_parent_nalloc > H5C_FLUSH_DEP_PARENT_INIT && + child_entry->flush_dep_nparents <= (child_entry->flush_dep_parent_nalloc / 4)) { + if (NULL == (child_entry->flush_dep_parent = + H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent, + child_entry->flush_dep_parent_nalloc / 4))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, + "memory allocation failed for flush dependency parent list") + child_entry->flush_dep_parent_nalloc /= 4; + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_destroy_flush_dependency() */ + +/*------------------------------------------------------------------------- + * Function: H5C_expunge_entry + * + * Purpose: Expunge an entry from the cache without writing it to disk + * even if it is dirty. The entry may not be either pinned or + * protected. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, unsigned flags) +{ + H5C_t *cache_ptr; + H5C_cache_entry_t *entry_ptr = NULL; + unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG); + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + assert(f); + assert(f->shared); + cache_ptr = f->shared->cache; + assert(cache_ptr); + assert(type); + assert(H5_addr_defined(addr)); + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + /* Look for entry in cache */ + H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL); + if ((entry_ptr == NULL) || (entry_ptr->type != type)) + /* the target doesn't exist in the cache, so we are done. */ + HGOTO_DONE(SUCCEED); + + assert(entry_ptr->addr == addr); + assert(entry_ptr->type == type); + + /* Check for entry being pinned or protected */ + if (entry_ptr->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is protected") + if (entry_ptr->is_pinned) + HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is pinned") + + /* If we get this far, call H5C__flush_single_entry() with the + * H5C__FLUSH_INVALIDATE_FLAG and the H5C__FLUSH_CLEAR_ONLY_FLAG. + * This will clear the entry, and then delete it from the cache. + */ + + /* Pass along 'free file space' flag */ + flush_flags |= (flags & H5C__FREE_FILE_SPACE_FLAG); + + /* Delete the entry from the skip list on destroy */ + flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; + + if (H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't flush entry") + +done: +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_lru_list(cache_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on exit"); +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_expunge_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C_remove_entry + * + * Purpose: Remove an entry from the cache. Must be not protected, pinned, + * dirty, involved in flush dependencies, etc. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_remove_entry(void *_entry) +{ + H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry to remove */ + H5C_t *cache; /* Cache for file */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + assert(entry); + assert(entry->ring != H5C_RING_UNDEFINED); + cache = entry->cache_ptr; + assert(cache); + + /* Check for error conditions */ + if (entry->is_dirty) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove dirty entry from cache") + if (entry->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove protected entry from cache") + if (entry->is_pinned) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove pinned entry from cache") + /* NOTE: If these two errors are getting tripped because the entry is + * in a flush dependency with a freedspace entry, move the checks + * after the "before evict" message is sent, and add the + * "child being evicted" message to the "before evict" notify + * section below. QAK - 2017/08/03 + */ + if (entry->flush_dep_nparents > 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, + "can't remove entry with flush dependency parents from cache") + if (entry->flush_dep_nchildren > 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, + "can't remove entry with flush dependency children from cache") + + /* Additional internal cache consistency checks */ + assert(!entry->in_slist); + assert(!entry->flush_marker); + assert(!entry->flush_in_progress); + + /* Note that the algorithm below is (very) similar to the set of operations + * in H5C__flush_single_entry() and should be kept in sync with changes + * to that code. - QAK, 2016/11/30 + */ + + /* Update stats, as if we are "destroying" and taking ownership of the entry */ + H5C__UPDATE_STATS_FOR_EVICTION(cache, entry, TRUE); + + /* If the entry's type has a 'notify' callback, send a 'before eviction' + * notice while the entry is still fully integrated in the cache. + */ + if (entry->type->notify && (entry->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict") + + /* Update the cache internal data structures as appropriate for a destroy. + * Specifically: + * 1) Delete it from the index + * 2) Delete it from the collective read access list + * 3) Update the replacement policy for eviction + * 4) Remove it from the tag list for this object + */ + + H5C__DELETE_FROM_INDEX(cache, entry, FAIL); + +#ifdef H5_HAVE_PARALLEL + /* Check for collective read access flag */ + if (entry->coll_access) { + entry->coll_access = FALSE; + H5C__REMOVE_FROM_COLL_LIST(cache, entry, FAIL); + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + + H5C__UPDATE_RP_FOR_EVICTION(cache, entry, FAIL); + + /* Remove entry from tag list */ + if (H5C__untag_entry(cache, entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list"); + + /* Increment entries_removed_counter and set last_entry_removed_ptr. + * As we me be about to free the entry, recall that last_entry_removed_ptr + * must NEVER be dereferenced. + * + * Recall that these fields are maintained to allow functions that perform + * scans of lists of entries to detect the unexpected removal of entries + * (via expunge, eviction, or take ownership at present), so that they can + * re-start their scans if necessary. + * + * Also check if the entry we are watching for removal is being + * removed (usually the 'next' entry for an iteration) and reset + * it to indicate that it was removed. + */ + cache->entries_removed_counter++; + cache->last_entry_removed_ptr = entry; + if (entry == cache->entry_watched_for_removal) + cache->entry_watched_for_removal = NULL; + + /* Internal cache data structures should now be up to date, and + * consistent with the status of the entry. + * + * Now clean up internal cache fields if appropriate. + */ + + /* Free the buffer for the on disk image */ + if (entry->image_ptr != NULL) + entry->image_ptr = H5MM_xfree(entry->image_ptr); + + /* Reset the pointer to the cache the entry is within */ + entry->cache_ptr = NULL; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__remove_entry() */ diff --git a/src/H5Cepoch.c b/src/H5Cepoch.c index 1b55080..ed161e8 100644 --- a/src/H5Cepoch.c +++ b/src/H5Cepoch.c @@ -13,19 +13,26 @@ /*------------------------------------------------------------------------- * * Created: H5Cepoch.c - * June 5 2004 - * Quincey Koziol * - * Purpose: Metadata cache epoch callbacks. + * Purpose: Metadata cache epoch callbacks * *------------------------------------------------------------------------- */ +/****************/ +/* Module Setup */ +/****************/ + +#include "H5Cmodule.h" /* This source code file is part of the H5C module */ + /***********/ /* Headers */ /***********/ -#include "H5private.h" /* Generic Functions */ -#include "H5ACprivate.h" /* Metadata cache */ +#include "H5private.h" /* Generic Functions */ +#include "H5ACprivate.h" /* Metadata Cache */ +#include "H5Cpkg.h" /* Cache */ +#include "H5Eprivate.h" /* Error Handling */ +#include "H5Fprivate.h" /* Files */ /****************/ /* Local Macros */ diff --git a/src/H5Cimage.c b/src/H5Cimage.c index 0d0e42b..96d22d7 100644 --- a/src/H5Cimage.c +++ b/src/H5Cimage.c @@ -71,10 +71,6 @@ #define H5C__MDCI_MAX_FD_CHILDREN USHRT_MAX #define H5C__MDCI_MAX_FD_PARENTS USHRT_MAX -/* Values for image entry magic field */ -#define H5C_IMAGE_ENTRY_T_MAGIC 0x005CAC08 -#define H5C_IMAGE_ENTRY_T_BAD_MAGIC 0xBeefDead - /* Maximum ring allowed in image */ #define H5C_MAX_RING_IN_IMAGE H5C_RING_MDFSM @@ -89,35 +85,27 @@ #if H5C_COLLECT_CACHE_STATS /* clang-format off */ #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) \ - (cache_ptr)->images_created++; +do { \ + (cache_ptr)->images_created++; \ +} while (0) #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) \ -{ \ +do { \ /* make sure image len is still good */ \ assert((cache_ptr)->image_len > 0); \ (cache_ptr)->images_read++; \ -} +} while (0) #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) \ -{ \ +do { \ /* make sure image len is still good */ \ assert((cache_ptr)->image_len > 0); \ (cache_ptr)->images_loaded++; \ (cache_ptr)->last_image_size = (cache_ptr)->image_len; \ -} -#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) \ -{ \ - (cache_ptr)->prefetches++; \ - if (dirty) \ - (cache_ptr)->dirty_prefetches++; \ -} -#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) \ - (cache_ptr)->prefetch_hits++; +} while (0) /* clang-format on */ #else /* H5C_COLLECT_CACHE_STATS */ #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) -#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) -#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) #endif /* H5C_COLLECT_CACHE_STATS */ /******************/ @@ -135,9 +123,7 @@ static herr_t H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr, c #ifndef NDEBUG /* only used in assertions */ static herr_t H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint8_t **buf, unsigned entry_num); -#endif /* NDEBUG */ /* only used in assertions */ -static herr_t H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_entry_ptr, - H5C_cache_entry_t **fd_children); +#endif static herr_t H5C__encode_cache_image_header(const H5F_t *f, const H5C_t *cache_ptr, uint8_t **buf); static herr_t H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, unsigned entry_num); static herr_t H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr); @@ -169,7 +155,6 @@ H5FL_DEFINE(H5C_cache_entry_t); /*******************/ /*------------------------------------------------------------------------- - * * Function: H5C_cache_image_pending() * * Purpose: Tests to see if the load of a metadata cache image @@ -192,7 +177,6 @@ H5C_cache_image_pending(const H5C_t *cache_ptr) /* Sanity checks */ assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); ret_value = (cache_ptr->load_image && !cache_ptr->image_loaded); @@ -230,7 +214,6 @@ H5C_cache_image_status(H5F_t *f, hbool_t *load_ci_ptr, hbool_t *write_ci_ptr) assert(f->shared); cache_ptr = f->shared->cache; assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(load_ci_ptr); assert(write_ci_ptr); @@ -268,7 +251,6 @@ H5C__construct_cache_image_buffer(H5F_t *f, H5C_t *cache_ptr) assert(f->shared); assert(cache_ptr == f->shared->cache); assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(cache_ptr->close_warning_received); assert(cache_ptr->image_ctl.generate_image); assert(cache_ptr->num_entries_in_image > 0); @@ -314,7 +296,6 @@ H5C__construct_cache_image_buffer(H5F_t *f, H5C_t *cache_ptr) fake_cache_ptr = (H5C_t *)H5MM_malloc(sizeof(H5C_t)); assert(fake_cache_ptr); - fake_cache_ptr->magic = H5C__H5C_T_MAGIC; /* needed for sanity checks */ fake_cache_ptr->image_len = cache_ptr->image_len; @@ -330,7 +311,6 @@ H5C__construct_cache_image_buffer(H5F_t *f, H5C_t *cache_ptr) assert(fake_cache_ptr->image_entries); for (u = 0; u < fake_cache_ptr->num_entries_in_image; u++) { - fake_cache_ptr->image_entries[u].magic = H5C_IMAGE_ENTRY_T_MAGIC; fake_cache_ptr->image_entries[u].image_ptr = NULL; /* touch up f->shared->cache to satisfy sanity checks... */ @@ -391,8 +371,8 @@ H5C__construct_cache_image_buffer(H5F_t *f, H5C_t *cache_ptr) fake_cache_ptr->image_entries = (H5C_image_entry_t *)H5MM_xfree(fake_cache_ptr->image_entries); fake_cache_ptr = (H5C_t *)H5MM_xfree(fake_cache_ptr); - } /* end block */ -#endif /* NDEBUG */ + } /* end block */ +#endif done: FUNC_LEAVE_NOAPI(ret_value) @@ -420,7 +400,6 @@ H5C__generate_cache_image(H5F_t *f, H5C_t *cache_ptr) assert(f->shared); assert(cache_ptr == f->shared->cache); assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); /* Construct cache image */ if (H5C__construct_cache_image_buffer(f, cache_ptr) < 0) @@ -447,375 +426,6 @@ done: } /* H5C__generate_cache_image() */ /*------------------------------------------------------------------------- - * Function: H5C__deserialize_prefetched_entry() - * - * Purpose: Deserialize the supplied prefetched entry entry, and return - * a pointer to the deserialized entry in *entry_ptr_ptr. - * If successful, remove the prefetched entry from the cache, - * and free it. Insert the deserialized entry into the cache. - * - * Note that the on disk image of the entry is not freed -- - * a pointer to it is stored in the deserialized entries' - * image_ptr field, and its image_up_to_date field is set to - * TRUE unless the entry is dirtied by the deserialize call. - * - * If the prefetched entry is a flush dependency child, - * destroy that flush dependency prior to calling the - * deserialize callback. If appropriate, the flush dependency - * relationship will be recreated by the cache client. - * - * If the prefetched entry is a flush dependency parent, - * destroy the flush dependency relationship with all its - * children. As all these children must be prefetched entries, - * recreate these flush dependency relationships with - * deserialized entry after it is inserted in the cache. - * - * Since deserializing a prefetched entry is semantically - * equivalent to a load, issue an entry loaded nofification - * if the notify callback is defined. - * - * Return: SUCCEED on success, and FAIL on failure. - * - * Note that *entry_ptr_ptr is undefined on failure. - * - * Programmer: John Mainzer, 8/10/15 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t **entry_ptr_ptr, - const H5C_class_t *type, haddr_t addr, void *udata) -{ - hbool_t dirty = FALSE; /* Flag indicating whether thing was - * dirtied during deserialize - */ - size_t len; /* Size of image in file */ - void *thing = NULL; /* Pointer to thing loaded */ - H5C_cache_entry_t *pf_entry_ptr; /* pointer to the prefetched entry */ - /* supplied in *entry_ptr_ptr. */ - H5C_cache_entry_t *ds_entry_ptr; /* Alias for thing loaded, as cache - * entry - */ - H5C_cache_entry_t **fd_children = NULL; /* Pointer to a dynamically */ - /* allocated array of pointers to */ - /* the flush dependency children of */ - /* the prefetched entry, or NULL if */ - /* that array does not exist. */ - unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG); - int i; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* sanity checks */ - assert(f); - assert(f->shared); - assert(f->shared->cache); - assert(f->shared->cache == cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(entry_ptr_ptr); - assert(*entry_ptr_ptr); - pf_entry_ptr = *entry_ptr_ptr; - assert(pf_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(pf_entry_ptr->type); - assert(pf_entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); - assert(pf_entry_ptr->prefetched); - assert(pf_entry_ptr->image_up_to_date); - assert(pf_entry_ptr->image_ptr); - assert(pf_entry_ptr->size > 0); - assert(pf_entry_ptr->addr == addr); - assert(type); - assert(type->id == pf_entry_ptr->prefetch_type_id); - assert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type); - - /* verify absence of prohibited or unsupported type flag combinations */ - assert(!(type->flags & H5C__CLASS_SKIP_READS)); - - /* Can't see how skip reads could be usefully combined with - * either the speculative read flag. Hence disallow. - */ - assert(!((type->flags & H5C__CLASS_SKIP_READS) && (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG))); - assert(H5_addr_defined(addr)); - assert(type->get_initial_load_size); - assert(type->deserialize); - - /* if *pf_entry_ptr is a flush dependency child, destroy all such - * relationships now. The client will restore the relationship(s) with - * the deserialized entry if appropriate. - */ - assert(pf_entry_ptr->fd_parent_count == pf_entry_ptr->flush_dep_nparents); - for (i = (int)(pf_entry_ptr->fd_parent_count) - 1; i >= 0; i--) { - assert(pf_entry_ptr->flush_dep_parent); - assert(pf_entry_ptr->flush_dep_parent[i]); - assert(pf_entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(pf_entry_ptr->flush_dep_parent[i]->flush_dep_nchildren > 0); - assert(pf_entry_ptr->fd_parent_addrs); - assert(pf_entry_ptr->flush_dep_parent[i]->addr == pf_entry_ptr->fd_parent_addrs[i]); - - if (H5C_destroy_flush_dependency(pf_entry_ptr->flush_dep_parent[i], pf_entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "can't destroy pf entry parent flush dependency") - - pf_entry_ptr->fd_parent_addrs[i] = HADDR_UNDEF; - } /* end for */ - assert(pf_entry_ptr->flush_dep_nparents == 0); - - /* If *pf_entry_ptr is a flush dependency parent, destroy its flush - * dependency relationships with all its children (which must be - * prefetched entries as well). - * - * These flush dependency relationships will have to be restored - * after the deserialized entry is inserted into the cache in order - * to transfer these relationships to the new entry. Hence save the - * pointers to the flush dependency children of *pf_enty_ptr for later - * use. - */ - if (pf_entry_ptr->fd_child_count > 0) { - if (NULL == (fd_children = (H5C_cache_entry_t **)H5MM_calloc( - sizeof(H5C_cache_entry_t **) * (size_t)(pf_entry_ptr->fd_child_count + 1)))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for fd child ptr array") - - if (H5C__destroy_pf_entry_child_flush_deps(cache_ptr, pf_entry_ptr, fd_children) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, - "can't destroy pf entry child flush dependency(s).") - } /* end if */ - - /* Since the size of the on disk image is known exactly, there is - * no need for either a call to the get_initial_load_size() callback, - * or retries if the H5C__CLASS_SPECULATIVE_LOAD_FLAG flag is set. - * Similarly, there is no need to clamp possible reads beyond - * EOF. - */ - len = pf_entry_ptr->size; - - /* Deserialize the prefetched on-disk image of the entry into the - * native memory form - */ - if (NULL == (thing = type->deserialize(pf_entry_ptr->image_ptr, len, udata, &dirty))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "Can't deserialize image") - ds_entry_ptr = (H5C_cache_entry_t *)thing; - - /* In general, an entry should be clean just after it is loaded. - * - * However, when this code is used in the metadata cache, it is - * possible that object headers will be dirty at this point, as - * the deserialize function will alter object headers if necessary to - * fix an old bug. - * - * In the following assert: - * - * assert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) ); - * - * note that type ids 5 & 6 are associated with object headers in the - * metadata cache. - * - * When we get to using H5C for other purposes, we may wish to - * tighten up the assert so that the loophole only applies to the - * metadata cache. - * - * Note that at present, dirty can't be set to true with prefetched - * entries. However this may change, so include this functionality - * against that possibility. - * - * Also, note that it is possible for a prefetched entry to be dirty -- - * hence the value assigned to ds_entry_ptr->is_dirty below. - */ - - assert((dirty == FALSE) || (type->id == 5 || type->id == 6)); - - ds_entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC; - ds_entry_ptr->cache_ptr = f->shared->cache; - ds_entry_ptr->addr = addr; - ds_entry_ptr->size = len; - assert(ds_entry_ptr->size < H5C_MAX_ENTRY_SIZE); - ds_entry_ptr->image_ptr = pf_entry_ptr->image_ptr; - ds_entry_ptr->image_up_to_date = !dirty; - ds_entry_ptr->type = type; - ds_entry_ptr->is_dirty = dirty | pf_entry_ptr->is_dirty; - ds_entry_ptr->dirtied = FALSE; - ds_entry_ptr->is_protected = FALSE; - ds_entry_ptr->is_read_only = FALSE; - ds_entry_ptr->ro_ref_count = 0; - ds_entry_ptr->is_pinned = FALSE; - ds_entry_ptr->in_slist = FALSE; - ds_entry_ptr->flush_marker = FALSE; -#ifdef H5_HAVE_PARALLEL - ds_entry_ptr->clear_on_unprotect = FALSE; - ds_entry_ptr->flush_immediately = FALSE; - ds_entry_ptr->coll_access = FALSE; -#endif /* H5_HAVE_PARALLEL */ - ds_entry_ptr->flush_in_progress = FALSE; - ds_entry_ptr->destroy_in_progress = FALSE; - - ds_entry_ptr->ring = pf_entry_ptr->ring; - - /* Initialize flush dependency height fields */ - ds_entry_ptr->flush_dep_parent = NULL; - ds_entry_ptr->flush_dep_nparents = 0; - ds_entry_ptr->flush_dep_parent_nalloc = 0; - ds_entry_ptr->flush_dep_nchildren = 0; - ds_entry_ptr->flush_dep_ndirty_children = 0; - ds_entry_ptr->flush_dep_nunser_children = 0; - - /* Initialize fields supporting the hash table: */ - ds_entry_ptr->ht_next = NULL; - ds_entry_ptr->ht_prev = NULL; - ds_entry_ptr->il_next = NULL; - ds_entry_ptr->il_prev = NULL; - - /* Initialize fields supporting replacement policies: */ - ds_entry_ptr->next = NULL; - ds_entry_ptr->prev = NULL; -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - ds_entry_ptr->aux_next = NULL; - ds_entry_ptr->aux_prev = NULL; -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -#ifdef H5_HAVE_PARALLEL - pf_entry_ptr->coll_next = NULL; - pf_entry_ptr->coll_prev = NULL; -#endif /* H5_HAVE_PARALLEL */ - - /* Initialize cache image related fields */ - ds_entry_ptr->include_in_image = FALSE; - ds_entry_ptr->lru_rank = 0; - ds_entry_ptr->image_dirty = FALSE; - ds_entry_ptr->fd_parent_count = 0; - ds_entry_ptr->fd_parent_addrs = NULL; - ds_entry_ptr->fd_child_count = pf_entry_ptr->fd_child_count; - ds_entry_ptr->fd_dirty_child_count = 0; - ds_entry_ptr->image_fd_height = 0; - ds_entry_ptr->prefetched = FALSE; - ds_entry_ptr->prefetch_type_id = 0; - ds_entry_ptr->age = 0; - ds_entry_ptr->prefetched_dirty = pf_entry_ptr->prefetched_dirty; -#ifndef NDEBUG /* debugging field */ - ds_entry_ptr->serialization_count = 0; -#endif /* NDEBUG */ - - H5C__RESET_CACHE_ENTRY_STATS(ds_entry_ptr); - - /* Apply to to the newly deserialized entry */ - if (H5C__tag_entry(cache_ptr, ds_entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry") - - /* We have successfully deserialized the prefetched entry. - * - * Before we return a pointer to the deserialized entry, we must remove - * the prefetched entry from the cache, discard it, and replace it with - * the deserialized entry. Note that we do not free the prefetched - * entries image, as that has been transferred to the deserialized - * entry. - * - * Also note that we have not yet restored any flush dependencies. This - * must wait until the deserialized entry is inserted in the cache. - * - * To delete the prefetched entry from the cache: - * - * 1) Set pf_entry_ptr->image_ptr to NULL. Since we have already - * transferred the buffer containing the image to *ds_entry_ptr, - * this is not a memory leak. - * - * 2) Call H5C__flush_single_entry() with the H5C__FLUSH_INVALIDATE_FLAG - * and H5C__FLUSH_CLEAR_ONLY_FLAG flags set. - */ - pf_entry_ptr->image_ptr = NULL; - - if (pf_entry_ptr->is_dirty) { - assert(((cache_ptr->slist_enabled) && (pf_entry_ptr->in_slist)) || - ((!cache_ptr->slist_enabled) && (!pf_entry_ptr->in_slist))); - - flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; - } /* end if */ - - if (H5C__flush_single_entry(f, pf_entry_ptr, flush_flags) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't expunge prefetched entry") - -#ifndef NDEGUG /* verify deletion */ - H5C__SEARCH_INDEX(cache_ptr, addr, pf_entry_ptr, FAIL); - - assert(NULL == pf_entry_ptr); -#endif /* NDEBUG */ - - /* Insert the deserialized entry into the cache. */ - H5C__INSERT_IN_INDEX(cache_ptr, ds_entry_ptr, FAIL) - - assert(!ds_entry_ptr->in_slist); - if (ds_entry_ptr->is_dirty) - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, ds_entry_ptr, FAIL) - - H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, ds_entry_ptr, FAIL) - - /* Deserializing a prefetched entry is the conceptual equivalent of - * loading it from file. If the deserialized entry has a notify callback, - * send an "after load" notice now that the deserialized entry is fully - * integrated into the cache. - */ - if (ds_entry_ptr->type->notify && - (ds_entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, ds_entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry loaded into cache") - - /* Restore flush dependencies with the flush dependency children of - * of the prefetched entry. Note that we must protect *ds_entry_ptr - * before the call to avoid triggering sanity check failures, and - * then unprotect it afterwards. - */ - i = 0; - if (fd_children != NULL) { - H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, ds_entry_ptr, FAIL) - ds_entry_ptr->is_protected = TRUE; - while (fd_children[i] != NULL) { - /* Sanity checks */ - assert((fd_children[i])->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert((fd_children[i])->prefetched); - assert((fd_children[i])->fd_parent_count > 0); - assert((fd_children[i])->fd_parent_addrs); - -#ifndef NDEBUG - { - int j; - hbool_t found; - - j = 0; - found = FALSE; - while ((j < (int)((fd_children[i])->fd_parent_count)) && (!found)) { - if ((fd_children[i])->fd_parent_addrs[j] == ds_entry_ptr->addr) - found = TRUE; - - j++; - } /* end while */ - assert(found); - } -#endif /* NDEBUG */ - - if (H5C_create_flush_dependency(ds_entry_ptr, fd_children[i]) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Can't restore child flush dependency") - - i++; - } /* end while */ - - H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, ds_entry_ptr, FAIL); - ds_entry_ptr->is_protected = FALSE; - } /* end if ( fd_children != NULL ) */ - assert((unsigned)i == ds_entry_ptr->fd_child_count); - - ds_entry_ptr->fd_child_count = 0; - H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) - - /* finally, pass ds_entry_ptr back to the caller */ - *entry_ptr_ptr = ds_entry_ptr; - -done: - if (fd_children) - fd_children = (H5C_cache_entry_t **)H5MM_xfree((void *)fd_children); - - /* Release resources on error */ - if (FAIL == ret_value) - if (thing && type->free_icr(thing) < 0) - HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed") - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__deserialize_prefetched_entry() */ - -/*------------------------------------------------------------------------- * Function: H5C__free_image_entries_array * * Purpose: If the image entries array exists, free the image @@ -836,7 +446,6 @@ H5C__free_image_entries_array(H5C_t *cache_ptr) /* Sanity checks */ assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(cache_ptr->close_warning_received); assert(cache_ptr->image_ctl.generate_image); assert(cache_ptr->index_len == 0); @@ -853,7 +462,6 @@ H5C__free_image_entries_array(H5C_t *cache_ptr) /* Sanity checks */ assert(ie_ptr); - assert(ie_ptr->magic == H5C_IMAGE_ENTRY_T_MAGIC); assert(ie_ptr->image_ptr); /* Free the parent addrs array if appropriate */ @@ -867,9 +475,6 @@ H5C__free_image_entries_array(H5C_t *cache_ptr) /* Free the image */ ie_ptr->image_ptr = H5MM_xfree(ie_ptr->image_ptr); - - /* Set magic field to bad magic so we can detect freed entries */ - ie_ptr->magic = H5C_IMAGE_ENTRY_T_BAD_MAGIC; } /* end for */ /* Free the image entries array */ @@ -880,7 +485,7 @@ H5C__free_image_entries_array(H5C_t *cache_ptr) } /* H5C__free_image_entries_array() */ /*------------------------------------------------------------------------- - * Function: H5C_get_cache_image_config + * Function: H5C__get_cache_image_config * * Purpose: Copy the current configuration for cache image generation * on file close into the instance of H5C_cache_image_ctl_t @@ -891,13 +496,13 @@ H5C__free_image_entries_array(H5C_t *cache_ptr) *------------------------------------------------------------------------- */ herr_t -H5C_get_cache_image_config(const H5C_t *cache_ptr, H5C_cache_image_ctl_t *config_ptr) +H5C__get_cache_image_config(const H5C_t *cache_ptr, H5C_cache_image_ctl_t *config_ptr) { herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI(FAIL) + FUNC_ENTER_PACKAGE - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + if (cache_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache_ptr on entry") if (config_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad config_ptr on entry") @@ -906,72 +511,7 @@ H5C_get_cache_image_config(const H5C_t *cache_ptr, H5C_cache_image_ctl_t *config done: FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_get_cache_image_config() */ - -/*------------------------------------------------------------------------- - * Function: H5C_image_stats - * - * Purpose: Prints statistics specific to the cache image. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 10/26/15 - * - *------------------------------------------------------------------------- - */ -herr_t -#if H5C_COLLECT_CACHE_STATS -H5C_image_stats(H5C_t *cache_ptr, hbool_t print_header) -#else /* H5C_COLLECT_CACHE_STATS */ -H5C_image_stats(H5C_t *cache_ptr, hbool_t H5_ATTR_UNUSED print_header) -#endif /* H5C_COLLECT_CACHE_STATS */ -{ -#if H5C_COLLECT_CACHE_STATS - int i; - int64_t total_hits = 0; - int64_t total_misses = 0; - double hit_rate; - double prefetch_use_rate; -#endif /* H5C_COLLECT_CACHE_STATS */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - if (!cache_ptr || cache_ptr->magic != H5C__H5C_T_MAGIC) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr") - -#if H5C_COLLECT_CACHE_STATS - for (i = 0; i <= cache_ptr->max_type_id; i++) { - total_hits += cache_ptr->hits[i]; - total_misses += cache_ptr->misses[i]; - } /* end for */ - - if ((total_hits > 0) || (total_misses > 0)) - hit_rate = 100.0 * ((double)(total_hits)) / ((double)(total_hits + total_misses)); - else - hit_rate = 0.0; - - if (cache_ptr->prefetches > 0) - prefetch_use_rate = 100.0 * ((double)(cache_ptr->prefetch_hits)) / ((double)(cache_ptr->prefetches)); - else - prefetch_use_rate = 0.0; - - if (print_header) { - fprintf(stdout, "\nhit prefetches prefetch image pf hit\n"); - fprintf(stdout, "rate: total: dirty: hits: flshs: evct: size: rate:\n"); - } /* end if */ - - fprintf(stdout, "%3.1lf %5lld %5lld %5lld %5lld %5lld %5lld %3.1lf\n", hit_rate, - (long long)(cache_ptr->prefetches), (long long)(cache_ptr->dirty_prefetches), - (long long)(cache_ptr->prefetch_hits), (long long)(cache_ptr->flushes[H5AC_PREFETCHED_ENTRY_ID]), - (long long)(cache_ptr->evictions[H5AC_PREFETCHED_ENTRY_ID]), - (long long)(cache_ptr->last_image_size), prefetch_use_rate); -#endif /* H5C_COLLECT_CACHE_STATS */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_image_stats() */ +} /* H5C__get_cache_image_config() */ /*------------------------------------------------------------------------- * Function: H5C__read_cache_image @@ -1002,8 +542,7 @@ H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr) H5AC_aux_t *aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr; int mpi_result; - if ((NULL == aux_ptr) || (aux_ptr->mpi_rank == 0)) { - assert((NULL == aux_ptr) || (aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC)); + if (NULL == aux_ptr || aux_ptr->mpi_rank == 0) { #endif /* H5_HAVE_PARALLEL */ /* Read the buffer (if serial access, or rank 0 of parallel access) */ @@ -1014,7 +553,7 @@ H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr) cache_ptr->image_buffer) < 0) HGOTO_ERROR(H5E_CACHE, H5E_READERROR, FAIL, "Can't read metadata cache image block") - H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) + H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr); #ifdef H5_HAVE_PARALLEL if (aux_ptr) { @@ -1064,7 +603,6 @@ H5C__load_cache_image(H5F_t *f) assert(f->shared); cache_ptr = f->shared->cache; assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); /* If the image address is defined, load the image, decode it, * and insert its contents into the metadata cache. @@ -1098,7 +636,7 @@ H5C__load_cache_image(H5F_t *f) /* Update stats -- must do this now, as we are about * to discard the size of the cache image. */ - H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) + H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr); cache_ptr->image_loaded = TRUE; } /* end if */ @@ -1156,7 +694,6 @@ H5C_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr, hsize_t len, hbool_ assert(f->shared); cache_ptr = f->shared->cache; assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); /* Set information needed to load cache image */ cache_ptr->image_addr = addr; @@ -1279,7 +816,6 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) assert(f->shared->cache); cache_ptr = f->shared->cache; assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(image_generated); /* If the file is opened and closed without any access to @@ -1494,8 +1030,8 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) HGOTO_ERROR(H5E_CACHE, H5E_CANTINIT, FAIL, "can't setup image entries array.") /* Sort the entries */ - HDqsort(cache_ptr->image_entries, (size_t)cache_ptr->num_entries_in_image, - sizeof(H5C_image_entry_t), H5C__image_entry_cmp); + qsort(cache_ptr->image_entries, (size_t)cache_ptr->num_entries_in_image, + sizeof(H5C_image_entry_t), H5C__image_entry_cmp); } /* end if */ else { /* cancel creation of metadata cache image */ assert(cache_ptr->image_entries == NULL); @@ -1559,7 +1095,7 @@ H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_image_ctl assert(f->shared->cache == f->shared->cache); /* Check arguments */ - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + if (cache_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache_ptr on entry") /* Validate the config: */ @@ -1745,7 +1281,6 @@ H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr, const uint8_t * /* Sanity checks */ assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(buf); assert(*buf); @@ -1842,13 +1377,11 @@ H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint assert(f->shared); assert(cache_ptr == f->shared->cache); assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(buf); assert(*buf); assert(entry_num < cache_ptr->num_entries_in_image); ie_ptr = &(cache_ptr->image_entries[entry_num]); assert(ie_ptr); - assert(ie_ptr->magic == H5C_IMAGE_ENTRY_T_MAGIC); /* Get pointer to buffer */ p = *buf; @@ -1893,7 +1426,7 @@ H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint assert((in_lru && lru_rank >= 0) || (!in_lru && lru_rank == -1)); /* Decode entry offset */ - H5_addr_decode(f, &p, &addr); + H5F_addr_decode(f, &p, &addr); if (!H5_addr_defined(addr)) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid entry offset") @@ -1916,7 +1449,7 @@ H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for fd parent addrs buffer") for (i = 0; i < fd_parent_count; i++) { - H5_addr_decode(f, &p, &(fd_parent_addrs[i])); + H5F_addr_decode(f, &p, &(fd_parent_addrs[i])); if (!H5_addr_defined(fd_parent_addrs[i])) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid flush dependency parent offset") } /* end for */ @@ -1954,135 +1487,7 @@ H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C__decode_cache_image_entry() */ -#endif /* NDEBUG */ - -/*------------------------------------------------------------------------- - * Function: H5C__destroy_pf_entry_child_flush_deps() - * - * Purpose: Destroy all flush dependencies in this the supplied - * prefetched entry is the parent. Note that the children - * in these flush dependencies must be prefetched entries as - * well. - * - * As this action is part of the process of transferring all - * such flush dependencies to the deserialized version of the - * prefetched entry, ensure that the data necessary to complete - * the transfer is retained. - * - * Note: The current implementation of this function is - * quite inefficient -- mostly due to the current - * implementation of flush dependencies. This should - * be fixed at some point. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 8/11/15 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_entry_ptr, - H5C_cache_entry_t **fd_children) -{ - H5C_cache_entry_t *entry_ptr; - unsigned entries_visited = 0; - int fd_children_found = 0; - hbool_t found; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity checks */ - assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); - assert(pf_entry_ptr); - assert(pf_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(pf_entry_ptr->type); - assert(pf_entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); - assert(pf_entry_ptr->prefetched); - assert(pf_entry_ptr->fd_child_count > 0); - assert(fd_children); - - /* Scan each entry on the index list */ - entry_ptr = cache_ptr->il_head; - while (entry_ptr != NULL) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - - /* Here we look at entry_ptr->flush_dep_nparents and not - * entry_ptr->fd_parent_count as it is possible that some - * or all of the prefetched flush dependency child relationships - * have already been destroyed. - */ - if (entry_ptr->prefetched && (entry_ptr->flush_dep_nparents > 0)) { - unsigned u; /* Local index variable */ - - /* Re-init */ - u = 0; - found = FALSE; - - /* Sanity checks */ - assert(entry_ptr->type); - assert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); - assert(entry_ptr->fd_parent_count >= entry_ptr->flush_dep_nparents); - assert(entry_ptr->fd_parent_addrs); - assert(entry_ptr->flush_dep_parent); - - /* Look for correct entry */ - while (!found && (u < entry_ptr->fd_parent_count)) { - /* Sanity check entry */ - assert(entry_ptr->flush_dep_parent[u]); - assert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - - /* Correct entry? */ - if (pf_entry_ptr == entry_ptr->flush_dep_parent[u]) - found = TRUE; - - u++; - } /* end while */ - - if (found) { - assert(NULL == fd_children[fd_children_found]); - - /* Remove flush dependency */ - fd_children[fd_children_found] = entry_ptr; - fd_children_found++; - if (H5C_destroy_flush_dependency(pf_entry_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, - "can't destroy pf entry child flush dependency") - -#ifndef NDEBUG - /* Sanity check -- verify that the address of the parent - * appears in entry_ptr->fd_parent_addrs. Must do a search, - * as with flush dependency creates and destroys, - * entry_ptr->fd_parent_addrs and entry_ptr->flush_dep_parent - * can list parents in different order. - */ - found = FALSE; - u = 0; - while (!found && u < entry_ptr->fd_parent_count) { - if (pf_entry_ptr->addr == entry_ptr->fd_parent_addrs[u]) - found = TRUE; - u++; - } /* end while */ - assert(found); -#endif /* NDEBUG */ - } /* end if */ - } /* end if */ - - entries_visited++; - entry_ptr = entry_ptr->il_next; - } /* end while */ - - /* Post-op sanity checks */ - assert(NULL == fd_children[fd_children_found]); - assert((unsigned)fd_children_found == pf_entry_ptr->fd_child_count); - assert(entries_visited == cache_ptr->index_len); - assert(!pf_entry_ptr->is_pinned); - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__destroy_pf_entry_child_flush_deps() */ +#endif /*------------------------------------------------------------------------- * Function: H5C__encode_cache_image_header() @@ -2108,7 +1513,6 @@ H5C__encode_cache_image_header(const H5F_t *f, const H5C_t *cache_ptr, uint8_t * /* Sanity checks */ assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(cache_ptr->close_warning_received); assert(cache_ptr->image_ctl.generate_image); assert(cache_ptr->index_len == 0); @@ -2184,7 +1588,6 @@ H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, unsigne assert(f->shared); assert(cache_ptr == f->shared->cache); assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(cache_ptr->close_warning_received); assert(cache_ptr->image_ctl.generate_image); assert(cache_ptr->index_len == 0); @@ -2192,7 +1595,6 @@ H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, unsigne assert(*buf); assert(entry_num < cache_ptr->num_entries_in_image); ie_ptr = &(cache_ptr->image_entries[entry_num]); - assert(ie_ptr->magic == H5C_IMAGE_ENTRY_T_MAGIC); /* Get pointer to buffer to encode into */ p = *buf; @@ -2238,7 +1640,7 @@ H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, unsigne INT32ENCODE(p, ie_ptr->lru_rank); /* Encode entry offset */ - H5_addr_encode(f, &p, ie_ptr->addr); + H5F_addr_encode(f, &p, ie_ptr->addr); /* Encode entry length */ H5F_ENCODE_LENGTH(f, p, ie_ptr->size); @@ -2249,7 +1651,7 @@ H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, unsigne /* Encode dependency parent offsets -- if any */ for (u = 0; u < ie_ptr->fd_parent_count; u++) - H5_addr_encode(f, &p, ie_ptr->fd_parent_addrs[u]); + H5F_addr_encode(f, &p, ie_ptr->fd_parent_addrs[u]); /* Copy entry image */ H5MM_memcpy(p, ie_ptr->image_ptr, ie_ptr->size); @@ -2312,18 +1714,19 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) { H5C_cache_entry_t *entry_ptr; H5C_cache_entry_t *parent_ptr; - unsigned entries_removed_from_image = 0; - unsigned external_parent_fd_refs_removed = 0; - unsigned external_child_fd_refs_removed = 0; - hbool_t done = FALSE; - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; +#ifndef NDEBUG + unsigned entries_removed_from_image = 0; + unsigned external_parent_fd_refs_removed = 0; + unsigned external_child_fd_refs_removed = 0; +#endif + hbool_t done = FALSE; + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE /* sanity checks */ assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); /* Remove from the cache image all dirty entries that are * flush dependency children of dirty entries that are not in the @@ -2337,8 +1740,6 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) done = TRUE; entry_ptr = cache_ptr->il_head; while (entry_ptr != NULL) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - /* Should this entry be in the image */ if (entry_ptr->image_dirty && entry_ptr->include_in_image && (entry_ptr->fd_parent_count > 0)) { assert(entry_ptr->flush_dep_parent != NULL); @@ -2346,14 +1747,15 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) parent_ptr = entry_ptr->flush_dep_parent[u]; /* Sanity check parent */ - assert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); assert(entry_ptr->ring == parent_ptr->ring); if (parent_ptr->is_dirty && !parent_ptr->include_in_image && entry_ptr->include_in_image) { /* Must remove child from image -- only do this once */ +#ifndef NDEBUG entries_removed_from_image++; +#endif entry_ptr->include_in_image = FALSE; } /* end if */ } /* for */ @@ -2383,7 +1785,6 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) parent_ptr = entry_ptr->flush_dep_parent[u]; /* Sanity check parent */ - assert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); assert(entry_ptr->ring == parent_ptr->ring); if (parent_ptr->include_in_image) { @@ -2396,7 +1797,9 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) parent_ptr->fd_dirty_child_count--; } /* end if */ +#ifndef NDEBUG external_child_fd_refs_removed++; +#endif } /* end if */ } /* for */ } /* end if */ @@ -2410,7 +1813,6 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) parent_ptr = entry_ptr->flush_dep_parent[u]; /* Sanity check parent */ - assert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); assert(entry_ptr->ring == parent_ptr->ring); if (!parent_ptr->include_in_image) { @@ -2421,7 +1823,9 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) assert(parent_ptr->addr == entry_ptr->fd_parent_addrs[u]); entry_ptr->fd_parent_addrs[u] = HADDR_UNDEF; +#ifndef NDEBUG external_parent_fd_refs_removed++; +#endif } /* end if */ } /* for */ @@ -2477,7 +1881,6 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) for (u = 0; u < entry_ptr->fd_parent_count; u++) { parent_ptr = entry_ptr->flush_dep_parent[u]; - assert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); if (parent_ptr->include_in_image && parent_ptr->image_fd_height <= 0) H5C__prep_for_file_close__compute_fd_heights_real(parent_ptr, 1); } /* end for */ @@ -2542,7 +1945,6 @@ H5C__prep_for_file_close__compute_fd_heights_real(H5C_cache_entry_t *entry_ptr, /* Sanity checks */ assert(entry_ptr); - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); assert(entry_ptr->include_in_image); assert((entry_ptr->image_fd_height == 0) || (entry_ptr->image_fd_height < fd_height)); assert(((fd_height == 0) && (entry_ptr->fd_child_count == 0)) || @@ -2557,7 +1959,6 @@ H5C__prep_for_file_close__compute_fd_heights_real(H5C_cache_entry_t *entry_ptr, H5C_cache_entry_t *parent_ptr; parent_ptr = entry_ptr->flush_dep_parent[u]; - assert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); if (parent_ptr->include_in_image && parent_ptr->image_fd_height <= fd_height) H5C__prep_for_file_close__compute_fd_heights_real(parent_ptr, fd_height + 1); @@ -2582,16 +1983,17 @@ static herr_t H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr) { H5C_cache_entry_t *entry_ptr; - H5C_image_entry_t *image_entries = NULL; - uint32_t entries_visited = 0; - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + H5C_image_entry_t *image_entries = NULL; +#ifndef NDEBUG + uint32_t entries_visited = 0; +#endif + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE /* Sanity checks */ assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(cache_ptr->close_warning_received); assert(cache_ptr->pl_len == 0); assert(cache_ptr->num_entries_in_image > 0); @@ -2604,7 +2006,6 @@ H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr) /* Initialize (non-zero/NULL/FALSE) fields */ for (u = 0; u <= cache_ptr->num_entries_in_image; u++) { - image_entries[u].magic = H5C_IMAGE_ENTRY_T_MAGIC; image_entries[u].addr = HADDR_UNDEF; image_entries[u].ring = H5C_RING_UNDEFINED; image_entries[u].type_id = -1; @@ -2614,8 +2015,6 @@ H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr) u = 0; entry_ptr = cache_ptr->il_head; while (entry_ptr != NULL) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - if (entry_ptr->include_in_image) { /* Since we have already serialized the cache, the following * should hold. @@ -2668,7 +2067,9 @@ H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr) assert(u <= cache_ptr->num_entries_in_image); } /* end if */ +#ifndef NDEBUG entries_visited++; +#endif entry_ptr = entry_ptr->il_next; } /* end while */ @@ -2718,14 +2119,16 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) { H5C_cache_entry_t *entry_ptr; hbool_t include_in_image; - unsigned entries_visited = 0; - int lru_rank = 1; - uint32_t num_entries_tentatively_in_image = 0; - uint32_t num_entries_in_image = 0; - size_t image_len; - size_t entry_header_len; - size_t fd_parents_list_len; - herr_t ret_value = SUCCEED; /* Return value */ + int lru_rank = 1; +#ifndef NDEBUG + unsigned entries_visited = 0; + uint32_t num_entries_tentatively_in_image = 0; +#endif + uint32_t num_entries_in_image = 0; + size_t image_len; + size_t entry_header_len; + size_t fd_parents_list_len; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -2734,7 +2137,6 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) assert(f->shared); assert(f->shared->sblock); assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(cache_ptr->close_warning_received); assert(cache_ptr->pl_len == 0); @@ -2747,8 +2149,6 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) /* Scan each entry on the index list */ entry_ptr = cache_ptr->il_head; while (entry_ptr != NULL) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - /* Since we have already serialized the cache, the following * should hold. */ @@ -2826,10 +2226,14 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) entry_ptr->fd_dirty_child_count = entry_ptr->flush_dep_ndirty_children; } /* end if */ +#ifndef NDEBUG num_entries_tentatively_in_image++; +#endif } /* end if */ +#ifndef NDEBUG entries_visited++; +#endif entry_ptr = entry_ptr->il_next; } /* end while */ assert(entries_visited == cache_ptr->index_len); @@ -2860,15 +2264,15 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) if (H5C__prep_for_file_close__compute_fd_heights(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "computation of flush dependency heights failed?!?") - /* At this point, all entries that will appear in the cache - * image should be marked correctly. Compute the size of the - * cache image. - */ + /* At this point, all entries that will appear in the cache + * image should be marked correctly. Compute the size of the + * cache image. + */ +#ifndef NDEBUG entries_visited = 0; - entry_ptr = cache_ptr->il_head; +#endif + entry_ptr = cache_ptr->il_head; while (entry_ptr != NULL) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - if (entry_ptr->include_in_image) { if (entry_ptr->fd_parent_count > 0) fd_parents_list_len = (size_t)(H5F_SIZEOF_ADDR(f) * entry_ptr->fd_parent_count); @@ -2879,7 +2283,9 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) num_entries_in_image++; } /* end if */ +#ifndef NDEBUG entries_visited++; +#endif entry_ptr = entry_ptr->il_next; } /* end while */ assert(entries_visited == cache_ptr->index_len); @@ -2897,7 +2303,9 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) #endif cache_ptr->num_entries_in_image = num_entries_in_image; - entries_visited = 0; +#ifndef NDEBUG + entries_visited = 0; +#endif /* Now scan the LRU list to set the lru_rank fields of all entries * on the LRU. @@ -2911,7 +2319,6 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) */ entry_ptr = cache_ptr->LRU_head_ptr; while (entry_ptr != NULL) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); assert(entry_ptr->type != NULL); /* to avoid confusion, don't set lru_rank on epoch markers. @@ -2930,7 +2337,9 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) lru_rank++; } /* end else-if */ +#ifndef NDEBUG entries_visited++; +#endif entry_ptr = entry_ptr->next; } /* end while */ assert(entries_visited == cache_ptr->LRU_list_len); @@ -2971,7 +2380,6 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) assert(f->shared); assert(cache_ptr == f->shared->cache); assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(cache_ptr->image_buffer); assert(cache_ptr->image_len > 0); @@ -3003,16 +2411,16 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) */ /* Insert the prefetched entry in the index */ - H5C__INSERT_IN_INDEX(cache_ptr, pf_entry_ptr, FAIL) + H5C__INSERT_IN_INDEX(cache_ptr, pf_entry_ptr, FAIL); /* If dirty, insert the entry into the slist. */ if (pf_entry_ptr->is_dirty) - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, pf_entry_ptr, FAIL) + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, pf_entry_ptr, FAIL); /* Append the entry to the LRU */ - H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, pf_entry_ptr, FAIL) + H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, pf_entry_ptr, FAIL); - H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, pf_entry_ptr->is_dirty) + H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, pf_entry_ptr->is_dirty); /* If the prefetched entry is the child in one or more flush * dependency relationships, recreate those flush dependencies. @@ -3024,19 +2432,18 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) /* Find the parent entry */ parent_ptr = NULL; - H5C__SEARCH_INDEX(cache_ptr, pf_entry_ptr->fd_parent_addrs[v], parent_ptr, FAIL) + H5C__SEARCH_INDEX(cache_ptr, pf_entry_ptr->fd_parent_addrs[v], parent_ptr, FAIL); if (parent_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "fd parent not in cache?!?") /* Sanity checks */ - assert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); assert(parent_ptr->addr == pf_entry_ptr->fd_parent_addrs[v]); assert(parent_ptr->lru_rank == -1); /* Must protect parent entry to set up a flush dependency. * Do this now, and then uprotect when done. */ - H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, parent_ptr, FAIL) + H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, parent_ptr, FAIL); parent_ptr->is_protected = TRUE; /* Setup the flush dependency */ @@ -3044,7 +2451,7 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Can't restore flush dependency") /* And now unprotect */ - H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, parent_ptr, FAIL) + H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, parent_ptr, FAIL); parent_ptr->is_protected = FALSE; } /* end for */ } /* end for */ @@ -3055,7 +2462,6 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) */ pf_entry_ptr = cache_ptr->il_head; while (pf_entry_ptr != NULL) { - assert(pf_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); assert((pf_entry_ptr->prefetched && pf_entry_ptr->type == H5AC_PREFETCHED_ENTRY) || (!pf_entry_ptr->prefetched && pf_entry_ptr->type != H5AC_PREFETCHED_ENTRY)); if (pf_entry_ptr->type == H5AC_PREFETCHED_ENTRY) @@ -3064,7 +2470,6 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) for (v = 0; v < pf_entry_ptr->fd_parent_count; v++) { parent_ptr = pf_entry_ptr->flush_dep_parent[v]; assert(parent_ptr); - assert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); assert(pf_entry_ptr->fd_parent_addrs); assert(pf_entry_ptr->fd_parent_addrs[v] == parent_ptr->addr); assert(parent_ptr->flush_dep_nchildren > 0); @@ -3089,7 +2494,6 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) i = -1; entry_ptr = cache_ptr->LRU_head_ptr; while (entry_ptr != NULL) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); assert(entry_ptr->type != NULL); if (entry_ptr->prefetched) { @@ -3111,8 +2515,8 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) * we add code to store and restore adaptive resize status. */ assert(lru_rank_holes <= H5C__MAX_EPOCH_MARKERS); - } /* end block */ -#endif /* NDEBUG */ + } /* end block */ +#endif /* Check to see if the cache is oversize, and evict entries as * necessary to remain within limits. @@ -3160,7 +2564,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b hbool_t in_lru = FALSE; hbool_t is_fd_parent = FALSE; hbool_t is_fd_child = FALSE; -#endif /* NDEBUG */ /* only used in assertions */ +#endif const uint8_t *p; hbool_t file_is_rw; H5C_cache_entry_t *ret_value = NULL; /* Return value */ @@ -3169,7 +2573,6 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b /* Sanity checks */ assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(cache_ptr->num_entries_in_image > 0); assert(buf && *buf); @@ -3197,7 +2600,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b is_fd_parent = TRUE; if (flags & H5C__MDCI_ENTRY_IS_FD_CHILD_FLAG) is_fd_child = TRUE; -#endif /* NDEBUG */ /* only used in assertions */ +#endif /* Force dirty entries to clean if the file read only -- must do * this as otherwise the cache will attempt to write them on file @@ -3243,7 +2646,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b assert((in_lru && pf_entry_ptr->lru_rank >= 0) || (!in_lru && pf_entry_ptr->lru_rank == -1)); /* Decode entry offset */ - H5_addr_decode(f, &p, &pf_entry_ptr->addr); + H5F_addr_decode(f, &p, &pf_entry_ptr->addr); if (!H5_addr_defined(pf_entry_ptr->addr)) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid entry offset") @@ -3267,7 +2670,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for fd parent addrs buffer") for (u = 0; u < pf_entry_ptr->fd_parent_count; u++) { - H5_addr_decode(f, &p, &(pf_entry_ptr->fd_parent_addrs[u])); + H5F_addr_decode(f, &p, &(pf_entry_ptr->fd_parent_addrs[u])); if (!H5_addr_defined(pf_entry_ptr->fd_parent_addrs[u])) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid flush dependency parent offset") } /* end for */ @@ -3286,7 +2689,6 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b /* Initialize the rest of the fields in the prefetched entry */ /* (Only need to set non-zero/NULL/FALSE fields, due to calloc() above) */ - pf_entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC; pf_entry_ptr->cache_ptr = cache_ptr; pf_entry_ptr->image_up_to_date = TRUE; pf_entry_ptr->type = H5AC_PREFETCHED_ENTRY; @@ -3341,7 +2743,6 @@ H5C__write_cache_image_superblock_msg(H5F_t *f, hbool_t create) assert(f->shared->cache); cache_ptr = f->shared->cache; assert(cache_ptr); - assert(cache_ptr->magic == H5C__H5C_T_MAGIC); assert(cache_ptr->close_warning_received); /* Write data into the metadata cache image superblock extension message. @@ -3353,8 +2754,7 @@ H5C__write_cache_image_superblock_msg(H5F_t *f, hbool_t create) if (cache_ptr->aux_ptr) { /* we have multiple processes */ H5AC_aux_t *aux_ptr; - aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr; - assert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); + aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr; mdci_msg.size = aux_ptr->p0_image_len; } /* end if */ else @@ -3398,8 +2798,7 @@ H5C__write_cache_image(H5F_t *f, const H5C_t *cache_ptr) { H5AC_aux_t *aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr; - if ((NULL == aux_ptr) || (aux_ptr->mpi_rank == 0)) { - assert((NULL == aux_ptr) || (aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC)); + if (NULL == aux_ptr || aux_ptr->mpi_rank == 0) { #endif /* H5_HAVE_PARALLEL */ /* Write the buffer (if serial access, or rank 0 for parallel access) */ diff --git a/src/H5Cint.c b/src/H5Cint.c new file mode 100644 index 0000000..a2a4dcf --- /dev/null +++ b/src/H5Cint.c @@ -0,0 +1,2508 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*------------------------------------------------------------------------- + * + * Created: H5Centry.c + * + * Purpose: Routines which operate on cache entries. + * + *------------------------------------------------------------------------- + */ + +/****************/ +/* Module Setup */ +/****************/ + +#include "H5Cmodule.h" /* This source code file is part of the H5C module */ +#define H5F_FRIEND /* suppress error about including H5Fpkg */ + +/***********/ +/* Headers */ +/***********/ +#include "H5private.h" /* Generic Functions */ +#include "H5Cpkg.h" /* Cache */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5Fpkg.h" /* Files */ +#include "H5MFprivate.h" /* File memory management */ + +/****************/ +/* Local Macros */ +/****************/ + +/******************/ +/* Local Typedefs */ +/******************/ + +/********************/ +/* Local Prototypes */ +/********************/ +static herr_t H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *status_ptr, + size_t *new_max_cache_size_ptr, hbool_t write_permitted); +static herr_t H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr); +static herr_t H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted); +static herr_t H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr); +static herr_t H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags); +static herr_t H5C__serialize_ring(H5F_t *f, H5C_ring_t ring); + +/*********************/ +/* Package Variables */ +/*********************/ + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + +/*******************/ +/* Local Variables */ +/*******************/ + +/*------------------------------------------------------------------------- + * Function: H5C__auto_adjust_cache_size + * + * Purpose: Obtain the current full cache hit rate, and compare it + * with the hit rate thresholds for modifying cache size. + * If one of the thresholds has been crossed, adjusts the + * size of the cache accordingly. + * + * The function then resets the full cache hit rate + * statistics, and exits. + * + * Return: Non-negative on success/Negative on failure or if there was + * an attempt to flush a protected item. + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted) +{ + H5C_t *cache_ptr = f->shared->cache; + hbool_t reentrant_call = FALSE; + hbool_t inserted_epoch_marker = FALSE; + size_t new_max_cache_size = 0; + size_t old_max_cache_size = 0; + size_t new_min_clean_size = 0; + size_t old_min_clean_size = 0; + double hit_rate; + enum H5C_resize_status status = in_spec; /* will change if needed */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(f); + assert(cache_ptr); + assert(cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length); + assert(0.0 <= cache_ptr->resize_ctl.min_clean_fraction); + assert(cache_ptr->resize_ctl.min_clean_fraction <= 100.0); + + /* check to see if cache_ptr->resize_in_progress is TRUE. If it, this + * is a re-entrant call via a client callback called in the resize + * process. To avoid an infinite recursion, set reentrant_call to + * TRUE, and goto done. + */ + if (cache_ptr->resize_in_progress) { + reentrant_call = TRUE; + HGOTO_DONE(SUCCEED); + } /* end if */ + + cache_ptr->resize_in_progress = TRUE; + + if (!cache_ptr->resize_enabled) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled") + + assert((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) || + (cache_ptr->resize_ctl.decr_mode != H5C_decr__off)); + + if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate") + + assert((0.0 <= hit_rate) && (hit_rate <= 1.0)); + + switch (cache_ptr->resize_ctl.incr_mode) { + case H5C_incr__off: + if (cache_ptr->size_increase_possible) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "size_increase_possible but H5C_incr__off?!?!?") + break; + + case H5C_incr__threshold: + if (hit_rate < cache_ptr->resize_ctl.lower_hr_threshold) { + if (!cache_ptr->size_increase_possible) + status = increase_disabled; + else if (cache_ptr->max_cache_size >= cache_ptr->resize_ctl.max_size) { + assert(cache_ptr->max_cache_size == cache_ptr->resize_ctl.max_size); + status = at_max_size; + } + else if (!cache_ptr->cache_full) + status = not_full; + else { + new_max_cache_size = + (size_t)(((double)(cache_ptr->max_cache_size)) * cache_ptr->resize_ctl.increment); + + /* clip to max size if necessary */ + if (new_max_cache_size > cache_ptr->resize_ctl.max_size) + new_max_cache_size = cache_ptr->resize_ctl.max_size; + + /* clip to max increment if necessary */ + if (cache_ptr->resize_ctl.apply_max_increment && + ((cache_ptr->max_cache_size + cache_ptr->resize_ctl.max_increment) < + new_max_cache_size)) + new_max_cache_size = cache_ptr->max_cache_size + cache_ptr->resize_ctl.max_increment; + + status = increase; + } + } + break; + + default: + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode") + } + + /* If the decr_mode is either age out or age out with threshold, we + * must run the marker maintenance code, whether we run the size + * reduction code or not. We do this in two places -- here we + * insert a new marker if the number of active epoch markers is + * is less than the current epochs before eviction, and after + * the ageout call, we cycle the markers. + * + * However, we can't call the ageout code or cycle the markers + * unless there was a full complement of markers in place on + * entry. The inserted_epoch_marker flag is used to track this. + */ + + if (((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) || + (cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold)) && + (cache_ptr->epoch_markers_active < cache_ptr->resize_ctl.epochs_before_eviction)) { + + if (H5C__autoadjust__ageout__insert_new_marker(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't insert new epoch marker") + + inserted_epoch_marker = TRUE; + } + + /* don't run the cache size decrease code unless the cache size + * increase code is disabled, or the size increase code sees no need + * for action. In either case, status == in_spec at this point. + */ + + if (status == in_spec) { + switch (cache_ptr->resize_ctl.decr_mode) { + case H5C_decr__off: + break; + + case H5C_decr__threshold: + if (hit_rate > cache_ptr->resize_ctl.upper_hr_threshold) { + if (!cache_ptr->size_decrease_possible) + status = decrease_disabled; + else if (cache_ptr->max_cache_size <= cache_ptr->resize_ctl.min_size) { + assert(cache_ptr->max_cache_size == cache_ptr->resize_ctl.min_size); + status = at_min_size; + } + else { + new_max_cache_size = + (size_t)(((double)(cache_ptr->max_cache_size)) * cache_ptr->resize_ctl.decrement); + + /* clip to min size if necessary */ + if (new_max_cache_size < cache_ptr->resize_ctl.min_size) + new_max_cache_size = cache_ptr->resize_ctl.min_size; + + /* clip to max decrement if necessary */ + if (cache_ptr->resize_ctl.apply_max_decrement && + ((cache_ptr->resize_ctl.max_decrement + new_max_cache_size) < + cache_ptr->max_cache_size)) + new_max_cache_size = + cache_ptr->max_cache_size - cache_ptr->resize_ctl.max_decrement; + + status = decrease; + } + } + break; + + case H5C_decr__age_out_with_threshold: + case H5C_decr__age_out: + if (!inserted_epoch_marker) { + if (!cache_ptr->size_decrease_possible) + status = decrease_disabled; + else { + if (H5C__autoadjust__ageout(f, hit_rate, &status, &new_max_cache_size, + write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ageout code failed") + } /* end else */ + } /* end if */ + break; + + default: + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode") + } + } + + /* cycle the epoch markers here if appropriate */ + if (((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) || + (cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold)) && + !inserted_epoch_marker) + /* move last epoch marker to the head of the LRU list */ + if (H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error cycling epoch marker") + + if ((status == increase) || (status == decrease)) { + old_max_cache_size = cache_ptr->max_cache_size; + old_min_clean_size = cache_ptr->min_clean_size; + + new_min_clean_size = + (size_t)((double)new_max_cache_size * (cache_ptr->resize_ctl.min_clean_fraction)); + + /* new_min_clean_size is of size_t, and thus must be non-negative. + * Hence we have + * + * ( 0 <= new_min_clean_size ). + * + * by definition. + */ + assert(new_min_clean_size <= new_max_cache_size); + assert(cache_ptr->resize_ctl.min_size <= new_max_cache_size); + assert(new_max_cache_size <= cache_ptr->resize_ctl.max_size); + + cache_ptr->max_cache_size = new_max_cache_size; + cache_ptr->min_clean_size = new_min_clean_size; + + if (status == increase) + cache_ptr->cache_full = FALSE; + else if (status == decrease) + cache_ptr->size_decreased = TRUE; + + /* update flash cache size increase fields as appropriate */ + if (cache_ptr->flash_size_increase_possible) { + switch (cache_ptr->resize_ctl.flash_incr_mode) { + case H5C_flash_incr__off: + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, + "flash_size_increase_possible but H5C_flash_incr__off?!") + break; + + case H5C_flash_incr__add_space: + cache_ptr->flash_size_increase_threshold = + (size_t)(((double)(cache_ptr->max_cache_size)) * + (cache_ptr->resize_ctl.flash_threshold)); + break; + + default: /* should be unreachable */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?") + break; + } + } + } + + if (cache_ptr->resize_ctl.rpt_fcn != NULL) + (cache_ptr->resize_ctl.rpt_fcn)(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status, + old_max_cache_size, new_max_cache_size, old_min_clean_size, + new_min_clean_size); + + if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0) + /* this should be impossible... */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed") + +done: + /* Sanity checks */ + assert(cache_ptr->resize_in_progress); + if (!reentrant_call) + cache_ptr->resize_in_progress = FALSE; + assert((!reentrant_call) || (cache_ptr->resize_in_progress)); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__auto_adjust_cache_size() */ + +/*------------------------------------------------------------------------- + * Function: H5C__autoadjust__ageout + * + * Purpose: Implement the ageout automatic cache size decrement + * algorithm. Note that while this code evicts aged out + * entries, the code does not change the maximum cache size. + * Instead, the function simply computes the new value (if + * any change is indicated) and reports this value in + * *new_max_cache_size_ptr. + * + * Return: Non-negative on success/Negative on failure or if there was + * an attempt to flush a protected item. + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *status_ptr, + size_t *new_max_cache_size_ptr, hbool_t write_permitted) +{ + H5C_t *cache_ptr = f->shared->cache; + size_t test_size; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(f); + assert(cache_ptr); + assert((status_ptr) && (*status_ptr == in_spec)); + assert((new_max_cache_size_ptr) && (*new_max_cache_size_ptr == 0)); + + /* remove excess epoch markers if any */ + if (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction) + if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers") + + if ((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) || + ((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold) && + (hit_rate >= cache_ptr->resize_ctl.upper_hr_threshold))) { + + if (cache_ptr->max_cache_size > cache_ptr->resize_ctl.min_size) { + /* evict aged out cache entries if appropriate... */ + if (H5C__autoadjust__ageout__evict_aged_out_entries(f, write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries") + + /* ... and then reduce cache size if appropriate */ + if (cache_ptr->index_size < cache_ptr->max_cache_size) { + if (cache_ptr->resize_ctl.apply_empty_reserve) { + test_size = + (size_t)(((double)cache_ptr->index_size) / (1 - cache_ptr->resize_ctl.empty_reserve)); + if (test_size < cache_ptr->max_cache_size) { + *status_ptr = decrease; + *new_max_cache_size_ptr = test_size; + } + } + else { + *status_ptr = decrease; + *new_max_cache_size_ptr = cache_ptr->index_size; + } + + if (*status_ptr == decrease) { + /* clip to min size if necessary */ + if (*new_max_cache_size_ptr < cache_ptr->resize_ctl.min_size) + *new_max_cache_size_ptr = cache_ptr->resize_ctl.min_size; + + /* clip to max decrement if necessary */ + if ((cache_ptr->resize_ctl.apply_max_decrement) && + ((cache_ptr->resize_ctl.max_decrement + *new_max_cache_size_ptr) < + cache_ptr->max_cache_size)) + *new_max_cache_size_ptr = + cache_ptr->max_cache_size - cache_ptr->resize_ctl.max_decrement; + } + } + } + else + *status_ptr = at_min_size; + } + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__autoadjust__ageout() */ + +/*------------------------------------------------------------------------- + * Function: H5C__autoadjust__ageout__cycle_epoch_marker + * + * Purpose: Remove the oldest epoch marker from the LRU list, + * and reinsert it at the head of the LRU list. Also + * remove the epoch marker's index from the head of the + * ring buffer, and re-insert it at the tail of the ring + * buffer. + * + * Return: SUCCEED on success/FAIL on failure. + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr) +{ + int i; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(cache_ptr); + + if (cache_ptr->epoch_markers_active <= 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "No active epoch markers on entry?!?!?") + + /* remove the last marker from both the ring buffer and the LRU list */ + i = cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_first]; + cache_ptr->epoch_marker_ringbuf_first = + (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1); + if (cache_ptr->epoch_marker_ringbuf_size <= 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow") + + cache_ptr->epoch_marker_ringbuf_size -= 1; + if (cache_ptr->epoch_marker_active[i] != TRUE) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") + + H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr, + (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, (FAIL)) + + /* now, re-insert it at the head of the LRU list, and at the tail of + * the ring buffer. + */ + assert(cache_ptr->epoch_markers[i].addr == (haddr_t)i); + assert(cache_ptr->epoch_markers[i].next == NULL); + assert(cache_ptr->epoch_markers[i].prev == NULL); + + cache_ptr->epoch_marker_ringbuf_last = + (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1); + cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_last] = i; + if (cache_ptr->epoch_marker_ringbuf_size >= H5C__MAX_EPOCH_MARKERS) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow") + + cache_ptr->epoch_marker_ringbuf_size += 1; + + H5C__DLL_PREPEND(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, + cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) +done: + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5C__autoadjust__ageout__cycle_epoch_marker() */ + +/*------------------------------------------------------------------------- + * Function: H5C__autoadjust__ageout__evict_aged_out_entries + * + * Purpose: Evict clean entries in the cache that haven't + * been accessed for at least + * cache_ptr->resize_ctl.epochs_before_eviction epochs, + * and flush dirty entries that haven't been accessed for + * that amount of time. + * + * Depending on configuration, the function will either + * flush or evict all such entries, or all such entries it + * encounters until it has freed the maximum amount of space + * allowed under the maximum decrement. + * + * If we are running in parallel mode, writes may not be + * permitted. If so, the function simply skips any dirty + * entries it may encounter. + * + * The function makes no attempt to maintain the minimum + * clean size, as there is no guarantee that the cache size + * will be changed. + * + * If there is no cache size change, the minimum clean size + * constraint will be met through a combination of clean + * entries and free space in the cache. + * + * If there is a cache size reduction, the minimum clean size + * will be re-calculated, and will be enforced the next time + * we have to make space in the cache. + * + * Return: Non-negative on success/Negative on failure. + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted) +{ + H5C_t *cache_ptr = f->shared->cache; + size_t eviction_size_limit; + size_t bytes_evicted = 0; + hbool_t prev_is_dirty = FALSE; + hbool_t restart_scan; + H5C_cache_entry_t *entry_ptr; + H5C_cache_entry_t *next_ptr; + H5C_cache_entry_t *prev_ptr; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(f); + assert(cache_ptr); + + /* if there is a limit on the amount that the cache size can be decrease + * in any one round of the cache size reduction algorithm, load that + * limit into eviction_size_limit. Otherwise, set eviction_size_limit + * to the equivalent of infinity. The current size of the index will + * do nicely. + */ + if (cache_ptr->resize_ctl.apply_max_decrement) + eviction_size_limit = cache_ptr->resize_ctl.max_decrement; + else + eviction_size_limit = cache_ptr->index_size; /* i.e. infinity */ + + if (write_permitted) { + restart_scan = FALSE; + entry_ptr = cache_ptr->LRU_tail_ptr; + while (entry_ptr != NULL && entry_ptr->type->id != H5AC_EPOCH_MARKER_ID && + bytes_evicted < eviction_size_limit) { + hbool_t skipping_entry = FALSE; + + assert(!(entry_ptr->is_protected)); + assert(!(entry_ptr->is_read_only)); + assert((entry_ptr->ro_ref_count) == 0); + + next_ptr = entry_ptr->next; + prev_ptr = entry_ptr->prev; + + if (prev_ptr != NULL) + prev_is_dirty = prev_ptr->is_dirty; + + if (entry_ptr->is_dirty) { + assert(!entry_ptr->prefetched_dirty); + + /* dirty corked entry is skipped */ + if (entry_ptr->tag_info && entry_ptr->tag_info->corked) + skipping_entry = TRUE; + else { + /* reset entries_removed_counter and + * last_entry_removed_ptr prior to the call to + * H5C__flush_single_entry() so that we can spot + * unexpected removals of entries from the cache, + * and set the restart_scan flag if proceeding + * would be likely to cause us to scan an entry + * that is no longer in the cache. + */ + cache_ptr->entries_removed_counter = 0; + cache_ptr->last_entry_removed_ptr = NULL; + + if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") + + if (cache_ptr->entries_removed_counter > 1 || + cache_ptr->last_entry_removed_ptr == prev_ptr) + restart_scan = TRUE; + } /* end else */ + } /* end if */ + else if (!entry_ptr->prefetched_dirty) { + bytes_evicted += entry_ptr->size; + + if (H5C__flush_single_entry( + f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") + } /* end else-if */ + else { + assert(!entry_ptr->is_dirty); + assert(entry_ptr->prefetched_dirty); + + skipping_entry = TRUE; + } /* end else */ + + if (prev_ptr != NULL) { + if (skipping_entry) + entry_ptr = prev_ptr; + else if (restart_scan || (prev_ptr->is_dirty != prev_is_dirty) || + (prev_ptr->next != next_ptr) || (prev_ptr->is_protected) || (prev_ptr->is_pinned)) { + /* Something has happened to the LRU -- start over + * from the tail. + */ + restart_scan = FALSE; + entry_ptr = cache_ptr->LRU_tail_ptr; + + H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr); + } /* end else-if */ + else + entry_ptr = prev_ptr; + } /* end if */ + else + entry_ptr = NULL; + } /* end while */ + + /* for now at least, don't bother to maintain the minimum clean size, + * as the cache should now be less than its maximum size. Due to + * the vaguries of the cache size reduction algorithm, we may not + * reduce the size of the cache. + * + * If we do, we will calculate a new minimum clean size, which will + * be enforced the next time we try to make space in the cache. + * + * If we don't, no action is necessary, as we have just evicted and/or + * or flushed a bunch of entries and therefore the sum of the clean + * and free space in the cache must be greater than or equal to the + * min clean space requirement (assuming that requirement was met on + * entry). + */ + } /* end if */ + else /* ! write_permitted */ { + /* Since we are not allowed to write, all we can do is evict + * any clean entries that we may encounter before we either + * hit the eviction size limit, or encounter the epoch marker. + * + * If we are operating read only, this isn't an issue, as there + * will not be any dirty entries. + * + * If we are operating in R/W mode, all the dirty entries we + * skip will be flushed the next time we attempt to make space + * when writes are permitted. This may have some local + * performance implications, but it shouldn't cause any net + * slowdown. + */ + assert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS); + entry_ptr = cache_ptr->LRU_tail_ptr; + while (entry_ptr != NULL && ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) && + (bytes_evicted < eviction_size_limit)) { + assert(!(entry_ptr->is_protected)); + + prev_ptr = entry_ptr->prev; + + if (!(entry_ptr->is_dirty) && !(entry_ptr->prefetched_dirty)) + if (H5C__flush_single_entry( + f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry") + + /* just skip the entry if it is dirty, as we can't do + * anything with it now since we can't write. + * + * Since all entries are clean, serialize() will not be called, + * and thus we needn't test to see if the LRU has been changed + * out from under us. + */ + entry_ptr = prev_ptr; + } /* end while */ + } /* end else */ + + if (cache_ptr->index_size < cache_ptr->max_cache_size) + cache_ptr->cache_full = FALSE; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__autoadjust__ageout__evict_aged_out_entries() */ + +/*------------------------------------------------------------------------- + * Function: H5C__autoadjust__ageout__insert_new_marker + * + * Purpose: Find an unused marker cache entry, mark it as used, and + * insert it at the head of the LRU list. Also add the + * marker's index in the epoch_markers array. + * + * Return: SUCCEED on success/FAIL on failure. + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr) +{ + int i; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(cache_ptr); + + if (cache_ptr->epoch_markers_active >= cache_ptr->resize_ctl.epochs_before_eviction) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Already have a full complement of markers") + + /* find an unused marker */ + i = 0; + while ((cache_ptr->epoch_marker_active)[i] && i < H5C__MAX_EPOCH_MARKERS) + i++; + if (i >= H5C__MAX_EPOCH_MARKERS) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker") + + assert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i); + assert(((cache_ptr->epoch_markers)[i]).next == NULL); + assert(((cache_ptr->epoch_markers)[i]).prev == NULL); + + (cache_ptr->epoch_marker_active)[i] = TRUE; + + cache_ptr->epoch_marker_ringbuf_last = + (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1); + (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i; + if (cache_ptr->epoch_marker_ringbuf_size >= H5C__MAX_EPOCH_MARKERS) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow") + + cache_ptr->epoch_marker_ringbuf_size += 1; + + H5C__DLL_PREPEND(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, + cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) + + cache_ptr->epoch_markers_active += 1; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__autoadjust__ageout__insert_new_marker() */ + +/*------------------------------------------------------------------------- + * Function: H5C__autoadjust__ageout__remove_all_markers + * + * Purpose: Remove all epoch markers from the LRU list and mark them + * as inactive. + * + * Return: SUCCEED on success/FAIL on failure. + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr) +{ + int ring_buf_index; + int i; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(cache_ptr); + + while (cache_ptr->epoch_markers_active > 0) { + /* get the index of the last epoch marker in the LRU list + * and remove it from the ring buffer. + */ + + ring_buf_index = cache_ptr->epoch_marker_ringbuf_first; + i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index]; + + cache_ptr->epoch_marker_ringbuf_first = + (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1); + + if (cache_ptr->epoch_marker_ringbuf_size <= 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow") + cache_ptr->epoch_marker_ringbuf_size -= 1; + + if (cache_ptr->epoch_marker_active[i] != TRUE) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") + + /* remove the epoch marker from the LRU list */ + H5C__DLL_REMOVE(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, + cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) + + /* mark the epoch marker as unused. */ + cache_ptr->epoch_marker_active[i] = FALSE; + + assert(cache_ptr->epoch_markers[i].addr == (haddr_t)i); + assert(cache_ptr->epoch_markers[i].next == NULL); + assert(cache_ptr->epoch_markers[i].prev == NULL); + + /* decrement the number of active epoch markers */ + cache_ptr->epoch_markers_active -= 1; + + assert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size); + } + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__autoadjust__ageout__remove_all_markers() */ + +/*------------------------------------------------------------------------- + * Function: H5C__autoadjust__ageout__remove_excess_markers + * + * Purpose: Remove epoch markers from the end of the LRU list and + * mark them as inactive until the number of active markers + * equals the current value of + * cache_ptr->resize_ctl.epochs_before_eviction. + * + * Return: SUCCEED on success/FAIL on failure. + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr) +{ + int ring_buf_index; + int i; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(cache_ptr); + + if (cache_ptr->epoch_markers_active <= cache_ptr->resize_ctl.epochs_before_eviction) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry") + + while (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction) { + /* get the index of the last epoch marker in the LRU list + * and remove it from the ring buffer. + */ + ring_buf_index = cache_ptr->epoch_marker_ringbuf_first; + i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index]; + + cache_ptr->epoch_marker_ringbuf_first = + (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1); + + if (cache_ptr->epoch_marker_ringbuf_size <= 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow") + cache_ptr->epoch_marker_ringbuf_size -= 1; + + if (cache_ptr->epoch_marker_active[i] != TRUE) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") + + /* remove the epoch marker from the LRU list */ + H5C__DLL_REMOVE(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, + cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) + + /* mark the epoch marker as unused. */ + cache_ptr->epoch_marker_active[i] = FALSE; + + assert(cache_ptr->epoch_markers[i].addr == (haddr_t)i); + assert(cache_ptr->epoch_markers[i].next == NULL); + assert(cache_ptr->epoch_markers[i].prev == NULL); + + /* decrement the number of active epoch markers */ + cache_ptr->epoch_markers_active -= 1; + + assert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size); + } + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__autoadjust__ageout__remove_excess_markers() */ + +/*------------------------------------------------------------------------- + * Function: H5C__flash_increase_cache_size + * + * Purpose: If there is not at least new_entry_size - old_entry_size + * bytes of free space in the cache and the current + * max_cache_size is less than cache_ptr->resize_ctl.max_size, + * perform a flash increase in the cache size and then reset + * the full cache hit rate statistics, and exit. + * + * Return: Non-negative on success/Negative on failure. + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t new_entry_size) +{ + size_t new_max_cache_size = 0; + size_t old_max_cache_size = 0; + size_t new_min_clean_size = 0; + size_t old_min_clean_size = 0; + size_t space_needed; + enum H5C_resize_status status = flash_increase; /* may change */ + double hit_rate; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(cache_ptr); + assert(cache_ptr->flash_size_increase_possible); + assert(new_entry_size > cache_ptr->flash_size_increase_threshold); + assert(old_entry_size < new_entry_size); + + if (old_entry_size >= new_entry_size) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "old_entry_size >= new_entry_size") + + space_needed = new_entry_size - old_entry_size; + if (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) && + (cache_ptr->max_cache_size < cache_ptr->resize_ctl.max_size)) { + switch (cache_ptr->resize_ctl.flash_incr_mode) { + case H5C_flash_incr__off: + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, + "flash_size_increase_possible but H5C_flash_incr__off?!") + break; + + case H5C_flash_incr__add_space: + if (cache_ptr->index_size < cache_ptr->max_cache_size) { + assert((cache_ptr->max_cache_size - cache_ptr->index_size) < space_needed); + space_needed -= cache_ptr->max_cache_size - cache_ptr->index_size; + } + space_needed = (size_t)(((double)space_needed) * cache_ptr->resize_ctl.flash_multiple); + new_max_cache_size = cache_ptr->max_cache_size + space_needed; + break; + + default: /* should be unreachable */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?") + break; + } + + if (new_max_cache_size > cache_ptr->resize_ctl.max_size) + new_max_cache_size = cache_ptr->resize_ctl.max_size; + assert(new_max_cache_size > cache_ptr->max_cache_size); + + new_min_clean_size = (size_t)((double)new_max_cache_size * cache_ptr->resize_ctl.min_clean_fraction); + assert(new_min_clean_size <= new_max_cache_size); + + old_max_cache_size = cache_ptr->max_cache_size; + old_min_clean_size = cache_ptr->min_clean_size; + + cache_ptr->max_cache_size = new_max_cache_size; + cache_ptr->min_clean_size = new_min_clean_size; + + /* update flash cache size increase fields as appropriate */ + assert(cache_ptr->flash_size_increase_possible); + + switch (cache_ptr->resize_ctl.flash_incr_mode) { + case H5C_flash_incr__off: + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, + "flash_size_increase_possible but H5C_flash_incr__off?!") + break; + + case H5C_flash_incr__add_space: + cache_ptr->flash_size_increase_threshold = + (size_t)((double)cache_ptr->max_cache_size * cache_ptr->resize_ctl.flash_threshold); + break; + + default: /* should be unreachable */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?") + break; + } + + /* note that we don't cycle the epoch markers. We can + * argue either way as to whether we should, but for now + * we don't. + */ + + if (cache_ptr->resize_ctl.rpt_fcn != NULL) { + /* get the hit rate for the reporting function. Should still + * be good as we haven't reset the hit rate statistics. + */ + if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate") + + (cache_ptr->resize_ctl.rpt_fcn)(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status, + old_max_cache_size, new_max_cache_size, old_min_clean_size, + new_min_clean_size); + } + + if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0) + /* this should be impossible... */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed") + } + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__flash_increase_cache_size() */ + +/*------------------------------------------------------------------------- + * Function: H5C__flush_invalidate_cache + * + * Purpose: Flush and destroy the entries contained in the target + * cache. + * + * If the cache contains protected entries, the function will + * fail, as protected entries cannot be either flushed or + * destroyed. However all unprotected entries should be + * flushed and destroyed before the function returns failure. + * + * While pinned entries can usually be flushed, they cannot + * be destroyed. However, they should be unpinned when all + * the entries that reference them have been destroyed (thus + * reduding the pinned entry's reference count to 0, allowing + * it to be unpinned). + * + * If pinned entries are present, the function makes repeated + * passes through the cache, flushing all dirty entries + * (including the pinned dirty entries where permitted) and + * destroying all unpinned entries. This process is repeated + * until either the cache is empty, or the number of pinned + * entries stops decreasing on each pass. + * + * Return: Non-negative on success/Negative on failure or if there was + * a request to flush all items and something was protected. + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__flush_invalidate_cache(H5F_t *f, unsigned flags) +{ + H5C_t *cache_ptr; + H5C_ring_t ring; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + assert(f); + assert(f->shared); + cache_ptr = f->shared->cache; + assert(cache_ptr); + assert(cache_ptr->slist_ptr); + assert(cache_ptr->slist_enabled); + +#ifdef H5C_DO_SANITY_CHECKS + { + int32_t i; + uint32_t index_len = 0; + uint32_t slist_len = 0; + size_t index_size = (size_t)0; + size_t clean_index_size = (size_t)0; + size_t dirty_index_size = (size_t)0; + size_t slist_size = (size_t)0; + + assert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0); + assert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); + assert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); + assert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); + assert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0); + assert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0); + + for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) { + index_len += cache_ptr->index_ring_len[i]; + index_size += cache_ptr->index_ring_size[i]; + clean_index_size += cache_ptr->clean_index_ring_size[i]; + dirty_index_size += cache_ptr->dirty_index_ring_size[i]; + + slist_len += cache_ptr->slist_ring_len[i]; + slist_size += cache_ptr->slist_ring_size[i]; + } /* end for */ + + assert(cache_ptr->index_len == index_len); + assert(cache_ptr->index_size == index_size); + assert(cache_ptr->clean_index_size == clean_index_size); + assert(cache_ptr->dirty_index_size == dirty_index_size); + assert(cache_ptr->slist_len == slist_len); + assert(cache_ptr->slist_size == slist_size); + } +#endif /* H5C_DO_SANITY_CHECKS */ + + /* remove ageout markers if present */ + if (cache_ptr->epoch_markers_active > 0) + if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers") + + /* flush invalidate each ring, starting from the outermost ring and + * working inward. + */ + ring = H5C_RING_USER; + while (ring < H5C_RING_NTYPES) { + if (H5C__flush_invalidate_ring(f, ring, flags) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed") + ring++; + } /* end while */ + +#ifndef NDEBUG + /* Invariants, after destroying all entries in the hash table */ + if (!(flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG)) { + assert(cache_ptr->index_size == 0); + assert(cache_ptr->clean_index_size == 0); + assert(cache_ptr->pel_len == 0); + assert(cache_ptr->pel_size == 0); + } /* end if */ + else { + H5C_cache_entry_t *entry_ptr; /* Cache entry */ + unsigned u; /* Local index variable */ + + /* All rings except ring 4 should be empty now */ + /* (Ring 4 has the superblock) */ + for (u = H5C_RING_USER; u < H5C_RING_SB; u++) { + assert(cache_ptr->index_ring_len[u] == 0); + assert(cache_ptr->index_ring_size[u] == 0); + assert(cache_ptr->clean_index_ring_size[u] == 0); + } /* end for */ + + /* Check that any remaining pinned entries are in the superblock ring */ + entry_ptr = cache_ptr->pel_head_ptr; + while (entry_ptr) { + /* Check ring */ + assert(entry_ptr->ring == H5C_RING_SB); + + /* Advance to next entry in pinned entry list */ + entry_ptr = entry_ptr->next; + } /* end while */ + } /* end else */ + + assert(cache_ptr->dirty_index_size == 0); + assert(cache_ptr->slist_len == 0); + assert(cache_ptr->slist_size == 0); + assert(cache_ptr->pl_len == 0); + assert(cache_ptr->pl_size == 0); + assert(cache_ptr->LRU_list_len == 0); + assert(cache_ptr->LRU_list_size == 0); +#endif + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__flush_invalidate_cache() */ + +/*------------------------------------------------------------------------- + * Function: H5C__flush_invalidate_ring + * + * Purpose: Flush and destroy the entries contained in the target + * cache and ring. + * + * If the ring contains protected entries, the function will + * fail, as protected entries cannot be either flushed or + * destroyed. However all unprotected entries should be + * flushed and destroyed before the function returns failure. + * + * While pinned entries can usually be flushed, they cannot + * be destroyed. However, they should be unpinned when all + * the entries that reference them have been destroyed (thus + * reduding the pinned entry's reference count to 0, allowing + * it to be unpinned). + * + * If pinned entries are present, the function makes repeated + * passes through the cache, flushing all dirty entries + * (including the pinned dirty entries where permitted) and + * destroying all unpinned entries. This process is repeated + * until either the cache is empty, or the number of pinned + * entries stops decreasing on each pass. + * + * If flush dependencies appear in the target ring, the + * function makes repeated passes through the cache flushing + * entries in flush dependency order. + * + * Return: Non-negative on success/Negative on failure or if there was + * a request to flush all items and something was protected. + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) +{ + H5C_t *cache_ptr; + hbool_t restart_slist_scan; + uint32_t protected_entries = 0; + int32_t i; + uint32_t cur_ring_pel_len; + uint32_t old_ring_pel_len; + unsigned cooked_flags; + unsigned evict_flags; + H5SL_node_t *node_ptr = NULL; + H5C_cache_entry_t *entry_ptr = NULL; + H5C_cache_entry_t *next_entry_ptr = NULL; +#ifdef H5C_DO_SANITY_CHECKS + uint32_t initial_slist_len = 0; + size_t initial_slist_size = 0; +#endif /* H5C_DO_SANITY_CHECKS */ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + assert(f); + assert(f->shared); + cache_ptr = f->shared->cache; + assert(cache_ptr); + assert(cache_ptr->slist_enabled); + assert(cache_ptr->slist_ptr); + assert(ring > H5C_RING_UNDEFINED); + assert(ring < H5C_RING_NTYPES); + + assert(cache_ptr->epoch_markers_active == 0); + + /* Filter out the flags that are not relevant to the flush/invalidate. + */ + cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG; + evict_flags = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG; + + /* The flush procedure here is a bit strange. + * + * In the outer while loop we make at least one pass through the + * cache, and then repeat until either all the pinned entries in + * the ring unpin themselves, or until the number of pinned entries + * in the ring stops declining. In this later case, we scream and die. + * + * Since the fractal heap can dirty, resize, and/or move entries + * in is flush callback, it is possible that the cache will still + * contain dirty entries at this point. If so, we must make more + * passes through the skip list to allow it to empty. + * + * Further, since clean entries can be dirtied, resized, and/or moved + * as the result of a flush call back (either the entries own, or that + * for some other cache entry), we can no longer promise to flush + * the cache entries in increasing address order. + * + * Instead, we make a pass through + * the skip list, and then a pass through the "clean" entries, and + * then repeating as needed. Thus it is quite possible that an + * entry will be evicted from the cache only to be re-loaded later + * in the flush process. + * + * The bottom line is that entries will probably be flushed in close + * to increasing address order, but there are no guarantees. + */ + + /* compute the number of pinned entries in this ring */ + entry_ptr = cache_ptr->pel_head_ptr; + cur_ring_pel_len = 0; + while (entry_ptr != NULL) { + assert(entry_ptr->ring >= ring); + if (entry_ptr->ring == ring) + cur_ring_pel_len++; + + entry_ptr = entry_ptr->next; + } /* end while */ + old_ring_pel_len = cur_ring_pel_len; + + while (cache_ptr->index_ring_len[ring] > 0) { + /* first, try to flush-destroy any dirty entries. Do this by + * making a scan through the slist. Note that new dirty entries + * may be created by the flush call backs. Thus it is possible + * that the slist will not be empty after we finish the scan. + */ + +#ifdef H5C_DO_SANITY_CHECKS + /* Depending on circumstances, H5C__flush_single_entry() will + * remove dirty entries from the slist as it flushes them. + * Thus for sanity checks we must make note of the initial + * slist length and size before we do any flushes. + */ + initial_slist_len = cache_ptr->slist_len; + initial_slist_size = cache_ptr->slist_size; + + /* There is also the possibility that entries will be + * dirtied, resized, moved, and/or removed from the cache + * as the result of calls to the flush callbacks. We use + * the slist_len_increase and slist_size_increase increase + * fields in struct H5C_t to track these changes for purpose + * of sanity checking. + * + * To this end, we must zero these fields before we start + * the pass through the slist. + */ + cache_ptr->slist_len_increase = 0; + cache_ptr->slist_size_increase = 0; +#endif /* H5C_DO_SANITY_CHECKS */ + + /* Set the cache_ptr->slist_changed to false. + * + * This flag is set to TRUE by H5C__flush_single_entry if the slist + * is modified by a pre_serialize, serialize, or notify callback. + * + * H5C__flush_invalidate_ring() uses this flag to detect any + * modifications to the slist that might corrupt the scan of + * the slist -- and restart the scan in this event. + */ + cache_ptr->slist_changed = FALSE; + + /* this done, start the scan of the slist */ + restart_slist_scan = TRUE; + while (restart_slist_scan || (node_ptr != NULL)) { + if (restart_slist_scan) { + restart_slist_scan = FALSE; + + /* Start at beginning of skip list */ + node_ptr = H5SL_first(cache_ptr->slist_ptr); + if (node_ptr == NULL) + /* the slist is empty -- break out of inner loop */ + break; + + /* Get cache entry for this node */ + next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); + if (NULL == next_entry_ptr) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") + + assert(next_entry_ptr->is_dirty); + assert(next_entry_ptr->in_slist); + assert(next_entry_ptr->ring >= ring); + } /* end if */ + + entry_ptr = next_entry_ptr; + + /* It is possible that entries will be dirtied, resized, + * flushed, or removed from the cache via the take ownership + * flag as the result of pre_serialize or serialized callbacks. + * + * This in turn can corrupt the scan through the slist. + * + * We test for slist modifications in the pre_serialize + * and serialize callbacks, and restart the scan of the + * slist if we find them. However, best we do some extra + * sanity checking just in case. + */ + assert(entry_ptr != NULL); + assert(entry_ptr->in_slist); + assert(entry_ptr->is_dirty); + assert(entry_ptr->ring >= ring); + + /* increment node pointer now, before we delete its target + * from the slist. + */ + node_ptr = H5SL_next(node_ptr); + if (node_ptr != NULL) { + next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); + if (NULL == next_entry_ptr) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") + + assert(next_entry_ptr->is_dirty); + assert(next_entry_ptr->in_slist); + assert(next_entry_ptr->ring >= ring); + assert(entry_ptr != next_entry_ptr); + } /* end if */ + else + next_entry_ptr = NULL; + + /* Note that we now remove nodes from the slist as we flush + * the associated entries, instead of leaving them there + * until we are done, and then destroying all nodes in + * the slist. + * + * While this optimization used to be easy, with the possibility + * of new entries being added to the slist in the midst of the + * flush, we must keep the slist in canonical form at all + * times. + */ + if (((!entry_ptr->flush_me_last) || + ((entry_ptr->flush_me_last) && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) && + (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) { + if (entry_ptr->is_protected) { + /* We have major problems -- but lets flush + * everything we can before we flag an error. + */ + protected_entries++; + } /* end if */ + else if (entry_ptr->is_pinned) { + if (H5C__flush_single_entry(f, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed") + + if (cache_ptr->slist_changed) { + /* The slist has been modified by something + * other than the simple removal of the + * of the flushed entry after the flush. + * + * This has the potential to corrupt the + * scan through the slist, so restart it. + */ + restart_slist_scan = TRUE; + cache_ptr->slist_changed = FALSE; + H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr); + } /* end if */ + } /* end else-if */ + else { + if (H5C__flush_single_entry(f, entry_ptr, + (cooked_flags | H5C__DURING_FLUSH_FLAG | + H5C__FLUSH_INVALIDATE_FLAG | + H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed") + + if (cache_ptr->slist_changed) { + /* The slist has been modified by something + * other than the simple removal of the + * of the flushed entry after the flush. + * + * This has the potential to corrupt the + * scan through the slist, so restart it. + */ + restart_slist_scan = TRUE; + cache_ptr->slist_changed = FALSE; + H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr); + } /* end if */ + } /* end else */ + } /* end if */ + } /* end while loop scanning skip list */ + +#ifdef H5C_DO_SANITY_CHECKS + /* It is possible that entries were added to the slist during + * the scan, either before or after scan pointer. The following + * asserts take this into account. + * + * Don't bother with the sanity checks if node_ptr != NULL, as + * in this case we broke out of the loop because it got changed + * out from under us. + */ + + if (node_ptr == NULL) { + assert(cache_ptr->slist_len == + (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase)); + assert(cache_ptr->slist_size == + (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase)); + } /* end if */ +#endif /* H5C_DO_SANITY_CHECKS */ + + /* Since we are doing a destroy, we must make a pass through + * the hash table and try to flush - destroy all entries that + * remain. + * + * It used to be that all entries remaining in the cache at + * this point had to be clean, but with the fractal heap mods + * this may not be the case. If so, we will flush entries out + * in increasing address order. + * + * Writes to disk are possible here. + */ + + /* Reset the counters so that we can detect insertions, loads, + * and moves caused by the pre_serialize and serialize calls. + */ + cache_ptr->entries_loaded_counter = 0; + cache_ptr->entries_inserted_counter = 0; + cache_ptr->entries_relocated_counter = 0; + + next_entry_ptr = cache_ptr->il_head; + while (next_entry_ptr != NULL) { + entry_ptr = next_entry_ptr; + assert(entry_ptr->ring >= ring); + + next_entry_ptr = entry_ptr->il_next; + + if (((!entry_ptr->flush_me_last) || + (entry_ptr->flush_me_last && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) && + (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) { + + if (entry_ptr->is_protected) { + /* we have major problems -- but lets flush and + * destroy everything we can before we flag an + * error. + */ + protected_entries++; + + if (!entry_ptr->in_slist) + assert(!(entry_ptr->is_dirty)); + } /* end if */ + else if (!entry_ptr->is_pinned) { + /* if *entry_ptr is dirty, it is possible + * that one or more other entries may be + * either removed from the cache, loaded + * into the cache, or moved to a new location + * in the file as a side effect of the flush. + * + * It's also possible that removing a clean + * entry will remove the last child of a proxy + * entry, allowing it to be removed also and + * invalidating the next_entry_ptr. + * + * If either of these happen, and one of the target + * or proxy entries happens to be the next entry in + * the hash bucket, we could either find ourselves + * either scanning a non-existent entry, scanning + * through a different bucket, or skipping an entry. + * + * Neither of these are good, so restart the + * the scan at the head of the hash bucket + * after the flush if we detect that the next_entry_ptr + * becomes invalid. + * + * This is not as inefficient at it might seem, + * as hash buckets typically have at most two + * or three entries. + */ + cache_ptr->entry_watched_for_removal = next_entry_ptr; + if (H5C__flush_single_entry(f, entry_ptr, + (cooked_flags | H5C__DURING_FLUSH_FLAG | + H5C__FLUSH_INVALIDATE_FLAG | + H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed") + + /* Restart the index list scan if necessary. Must + * do this if the next entry is evicted, and also if + * one or more entries are inserted, loaded, or moved + * as these operations can result in part of the scan + * being skipped -- which can cause a spurious failure + * if this results in the size of the pinned entry + * failing to decline during the pass. + */ + if (((NULL != next_entry_ptr) && (NULL == cache_ptr->entry_watched_for_removal)) || + (cache_ptr->entries_loaded_counter > 0) || + (cache_ptr->entries_inserted_counter > 0) || + (cache_ptr->entries_relocated_counter > 0)) { + + next_entry_ptr = cache_ptr->il_head; + + cache_ptr->entries_loaded_counter = 0; + cache_ptr->entries_inserted_counter = 0; + cache_ptr->entries_relocated_counter = 0; + + H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr); + } /* end if */ + else + cache_ptr->entry_watched_for_removal = NULL; + } /* end if */ + } /* end if */ + } /* end for loop scanning hash table */ + + /* We can't do anything if entries are pinned. The + * hope is that the entries will be unpinned as the + * result of destroys of entries that reference them. + * + * We detect this by noting the change in the number + * of pinned entries from pass to pass. If it stops + * shrinking before it hits zero, we scream and die. + */ + old_ring_pel_len = cur_ring_pel_len; + entry_ptr = cache_ptr->pel_head_ptr; + cur_ring_pel_len = 0; + + while (entry_ptr != NULL) { + assert(entry_ptr->ring >= ring); + + if (entry_ptr->ring == ring) + cur_ring_pel_len++; + + entry_ptr = entry_ptr->next; + } /* end while */ + + /* Check if the number of pinned entries in the ring is positive, and + * it is not declining. Scream and die if so. + */ + if ((cur_ring_pel_len > 0) && (cur_ring_pel_len >= old_ring_pel_len)) { + /* Don't error if allowed to have pinned entries remaining */ + if (evict_flags) + HGOTO_DONE(TRUE); + + HGOTO_ERROR( + H5E_CACHE, H5E_CANTFLUSH, FAIL, + "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = %d, ring = %d", + (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring) + } /* end if */ + + assert(protected_entries == cache_ptr->pl_len); + + if ((protected_entries > 0) && (protected_entries == cache_ptr->index_len)) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, + "Only protected entries left in cache, protected_entries = %d", + (int)protected_entries) + } /* main while loop */ + + /* Invariants, after destroying all entries in the ring */ + for (i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++) { + assert(cache_ptr->index_ring_len[i] == 0); + assert(cache_ptr->index_ring_size[i] == (size_t)0); + assert(cache_ptr->clean_index_ring_size[i] == (size_t)0); + assert(cache_ptr->dirty_index_ring_size[i] == (size_t)0); + + assert(cache_ptr->slist_ring_len[i] == 0); + assert(cache_ptr->slist_ring_size[i] == (size_t)0); + } /* end for */ + + assert(protected_entries <= cache_ptr->pl_len); + + if (protected_entries > 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries") + else if (cur_ring_pel_len > 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__flush_invalidate_ring() */ + +/*------------------------------------------------------------------------- + * Function: H5C__flush_ring + * + * Purpose: Flush the entries contained in the specified cache and + * ring. All entries in rings outside the specified ring + * must have been flushed on entry. + * + * If the cache contains protected entries in the specified + * ring, the function will fail, as protected entries cannot + * be flushed. However all unprotected entries in the target + * ring should be flushed before the function returns failure. + * + * If flush dependencies appear in the target ring, the + * function makes repeated passes through the slist flushing + * entries in flush dependency order. + * + * Return: Non-negative on success/Negative on failure or if there was + * a request to flush all items and something was protected. + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) +{ + H5C_t *cache_ptr = f->shared->cache; + hbool_t flushed_entries_last_pass; + hbool_t flush_marked_entries; + hbool_t ignore_protected; + hbool_t tried_to_flush_protected_entry = FALSE; + hbool_t restart_slist_scan; + uint32_t protected_entries = 0; + H5SL_node_t *node_ptr = NULL; + H5C_cache_entry_t *entry_ptr = NULL; + H5C_cache_entry_t *next_entry_ptr = NULL; +#ifdef H5C_DO_SANITY_CHECKS + uint32_t initial_slist_len = 0; + size_t initial_slist_size = 0; +#endif /* H5C_DO_SANITY_CHECKS */ + int i; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + assert(cache_ptr); + assert(cache_ptr->slist_enabled); + assert(cache_ptr->slist_ptr); + assert((flags & H5C__FLUSH_INVALIDATE_FLAG) == 0); + assert(ring > H5C_RING_UNDEFINED); + assert(ring < H5C_RING_NTYPES); + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + ignore_protected = ((flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0); + flush_marked_entries = ((flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0); + + if (!flush_marked_entries) + for (i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++) + assert(cache_ptr->slist_ring_len[i] == 0); + + assert(cache_ptr->flush_in_progress); + + /* When we are only flushing marked entries, the slist will usually + * still contain entries when we have flushed everything we should. + * Thus we track whether we have flushed any entries in the last + * pass, and terminate if we haven't. + */ + flushed_entries_last_pass = TRUE; + + /* Set the cache_ptr->slist_changed to false. + * + * This flag is set to TRUE by H5C__flush_single_entry if the + * slist is modified by a pre_serialize, serialize, or notify callback. + * H5C_flush_cache uses this flag to detect any modifications + * to the slist that might corrupt the scan of the slist -- and + * restart the scan in this event. + */ + cache_ptr->slist_changed = FALSE; + + while ((cache_ptr->slist_ring_len[ring] > 0) && (protected_entries == 0) && (flushed_entries_last_pass)) { + flushed_entries_last_pass = FALSE; + +#ifdef H5C_DO_SANITY_CHECKS + /* For sanity checking, try to verify that the skip list has + * the expected size and number of entries at the end of each + * internal while loop (see below). + * + * Doing this get a bit tricky, as depending on flags, we may + * or may not flush all the entries in the slist. + * + * To make things more entertaining, with the advent of the + * fractal heap, the entry serialize callback can cause entries + * to be dirtied, resized, and/or moved. Also, the + * pre_serialize callback can result in an entry being + * removed from the cache via the take ownership flag. + * + * To deal with this, we first make note of the initial + * skip list length and size: + */ + initial_slist_len = cache_ptr->slist_len; + initial_slist_size = cache_ptr->slist_size; + + /* As mentioned above, there is the possibility that + * entries will be dirtied, resized, flushed, or removed + * from the cache via the take ownership flag during + * our pass through the skip list. To capture the number + * of entries added, and the skip list size delta, + * zero the slist_len_increase and slist_size_increase of + * the cache's instance of H5C_t. These fields will be + * updated elsewhere to account for slist insertions and/or + * dirty entry size changes. + */ + cache_ptr->slist_len_increase = 0; + cache_ptr->slist_size_increase = 0; + + /* at the end of the loop, use these values to compute the + * expected slist length and size and compare this with the + * value recorded in the cache's instance of H5C_t. + */ +#endif /* H5C_DO_SANITY_CHECKS */ + + restart_slist_scan = TRUE; + while ((restart_slist_scan) || (node_ptr != NULL)) { + if (restart_slist_scan) { + restart_slist_scan = FALSE; + + /* Start at beginning of skip list */ + node_ptr = H5SL_first(cache_ptr->slist_ptr); + if (node_ptr == NULL) + /* the slist is empty -- break out of inner loop */ + break; + + /* Get cache entry for this node */ + next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); + if (NULL == next_entry_ptr) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") + + assert(next_entry_ptr->is_dirty); + assert(next_entry_ptr->in_slist); + } /* end if */ + + entry_ptr = next_entry_ptr; + + /* With the advent of the fractal heap, the free space + * manager, and the version 3 cache, it is possible + * that the pre-serialize or serialize callback will + * dirty, resize, or take ownership of other entries + * in the cache. + * + * To deal with this, there is code to detect any + * change in the skip list not directly under the control + * of this function. If such modifications are detected, + * we must re-start the scan of the skip list to avoid + * the possibility that the target of the next_entry_ptr + * may have been flushed or deleted from the cache. + * + * To verify that all such possibilities have been dealt + * with, we do a bit of extra sanity checking on + * entry_ptr. + */ + assert(entry_ptr->in_slist); + assert(entry_ptr->is_dirty); + + if (!flush_marked_entries || entry_ptr->flush_marker) + assert(entry_ptr->ring >= ring); + + /* Advance node pointer now, before we delete its target + * from the slist. + */ + node_ptr = H5SL_next(node_ptr); + if (node_ptr != NULL) { + next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); + if (NULL == next_entry_ptr) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") + + assert(next_entry_ptr->is_dirty); + assert(next_entry_ptr->in_slist); + + if (!flush_marked_entries || next_entry_ptr->flush_marker) + assert(next_entry_ptr->ring >= ring); + + assert(entry_ptr != next_entry_ptr); + } /* end if */ + else + next_entry_ptr = NULL; + + if ((!flush_marked_entries || entry_ptr->flush_marker) && + ((!entry_ptr->flush_me_last) || + ((entry_ptr->flush_me_last) && ((cache_ptr->num_last_entries >= cache_ptr->slist_len) || + (flush_marked_entries && entry_ptr->flush_marker)))) && + ((entry_ptr->flush_dep_nchildren == 0) || (entry_ptr->flush_dep_ndirty_children == 0)) && + (entry_ptr->ring == ring)) { + + assert(entry_ptr->flush_dep_nunser_children == 0); + + if (entry_ptr->is_protected) { + /* we probably have major problems -- but lets + * flush everything we can before we decide + * whether to flag an error. + */ + tried_to_flush_protected_entry = TRUE; + protected_entries++; + } /* end if */ + else { + if (H5C__flush_single_entry(f, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry") + + if (cache_ptr->slist_changed) { + /* The slist has been modified by something + * other than the simple removal of the + * of the flushed entry after the flush. + * + * This has the potential to corrupt the + * scan through the slist, so restart it. + */ + restart_slist_scan = TRUE; + cache_ptr->slist_changed = FALSE; + H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr); + } /* end if */ + + flushed_entries_last_pass = TRUE; + } /* end else */ + } /* end if */ + } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */ + +#ifdef H5C_DO_SANITY_CHECKS + /* Verify that the slist size and length are as expected. */ + assert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) == + cache_ptr->slist_len); + assert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) == + cache_ptr->slist_size); +#endif /* H5C_DO_SANITY_CHECKS */ + } /* while */ + + assert(protected_entries <= cache_ptr->pl_len); + + if (((cache_ptr->pl_len > 0) && !ignore_protected) || tried_to_flush_protected_entry) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items") + +#ifdef H5C_DO_SANITY_CHECKS + if (!flush_marked_entries) { + assert(cache_ptr->slist_ring_len[ring] == 0); + assert(cache_ptr->slist_ring_size[ring] == 0); + } /* end if */ +#endif /* H5C_DO_SANITY_CHECKS */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__flush_ring() */ + +/*------------------------------------------------------------------------- + * Function: H5C__make_space_in_cache + * + * Purpose: Attempt to evict cache entries until the index_size + * is at least needed_space below max_cache_size. + * + * In passing, also attempt to bring cLRU_list_size to a + * value greater than min_clean_size. + * + * Depending on circumstances, both of these goals may + * be impossible, as in parallel mode, we must avoid generating + * a write as part of a read (to avoid deadlock in collective + * I/O), and in all cases, it is possible (though hopefully + * highly unlikely) that the protected list may exceed the + * maximum size of the cache. + * + * Thus the function simply does its best, returning success + * unless an error is encountered. + * + * Observe that this function cannot occasion a read. + * + * Return: Non-negative on success/Negative on failure. + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) +{ + H5C_t *cache_ptr = f->shared->cache; +#if H5C_COLLECT_CACHE_STATS + int32_t clean_entries_skipped = 0; + int32_t dirty_pf_entries_skipped = 0; + int32_t total_entries_scanned = 0; +#endif /* H5C_COLLECT_CACHE_STATS */ + uint32_t entries_examined = 0; + uint32_t initial_list_len; + size_t empty_space; + hbool_t reentrant_call = FALSE; + hbool_t prev_is_dirty = FALSE; + hbool_t didnt_flush_entry = FALSE; + hbool_t restart_scan; + H5C_cache_entry_t *entry_ptr; + H5C_cache_entry_t *prev_ptr; + H5C_cache_entry_t *next_ptr; +#ifndef NDEBUG + uint32_t num_corked_entries = 0; +#endif + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + assert(f); + assert(cache_ptr); + assert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size)); + + /* check to see if cache_ptr->msic_in_progress is TRUE. If it, this + * is a re-entrant call via a client callback called in the make + * space in cache process. To avoid an infinite recursion, set + * reentrant_call to TRUE, and goto done. + */ + if (cache_ptr->msic_in_progress) { + reentrant_call = TRUE; + HGOTO_DONE(SUCCEED); + } /* end if */ + + cache_ptr->msic_in_progress = TRUE; + + if (write_permitted) { + restart_scan = FALSE; + initial_list_len = cache_ptr->LRU_list_len; + entry_ptr = cache_ptr->LRU_tail_ptr; + + if (cache_ptr->index_size >= cache_ptr->max_cache_size) + empty_space = 0; + else + empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; + + while ((((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) || + ((empty_space + cache_ptr->clean_index_size) < (cache_ptr->min_clean_size))) && + (entries_examined <= (2 * initial_list_len)) && (entry_ptr != NULL)) { + assert(!(entry_ptr->is_protected)); + assert(!(entry_ptr->is_read_only)); + assert((entry_ptr->ro_ref_count) == 0); + + next_ptr = entry_ptr->next; + prev_ptr = entry_ptr->prev; + + if (prev_ptr != NULL) + prev_is_dirty = prev_ptr->is_dirty; + + if (entry_ptr->is_dirty && (entry_ptr->tag_info && entry_ptr->tag_info->corked)) { + /* Skip "dirty" corked entries. */ +#ifndef NDEBUG + ++num_corked_entries; +#endif + didnt_flush_entry = TRUE; + } + else if ((entry_ptr->type->id != H5AC_EPOCH_MARKER_ID) && !entry_ptr->flush_in_progress && + !entry_ptr->prefetched_dirty) { + didnt_flush_entry = FALSE; + if (entry_ptr->is_dirty) { +#if H5C_COLLECT_CACHE_STATS + if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) + cache_ptr->entries_scanned_to_make_space++; +#endif /* H5C_COLLECT_CACHE_STATS */ + + /* reset entries_removed_counter and + * last_entry_removed_ptr prior to the call to + * H5C__flush_single_entry() so that we can spot + * unexpected removals of entries from the cache, + * and set the restart_scan flag if proceeding + * would be likely to cause us to scan an entry + * that is no longer in the cache. + */ + cache_ptr->entries_removed_counter = 0; + cache_ptr->last_entry_removed_ptr = NULL; + + if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") + + if ((cache_ptr->entries_removed_counter > 1) || + (cache_ptr->last_entry_removed_ptr == prev_ptr)) + + restart_scan = TRUE; + } + else if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size +#ifdef H5_HAVE_PARALLEL + && !(entry_ptr->coll_access) +#endif /* H5_HAVE_PARALLEL */ + ) { +#if H5C_COLLECT_CACHE_STATS + cache_ptr->entries_scanned_to_make_space++; +#endif /* H5C_COLLECT_CACHE_STATS */ + + if (H5C__flush_single_entry(f, entry_ptr, + H5C__FLUSH_INVALIDATE_FLAG | + H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") + } + else { + /* We have enough space so don't flush clean entry. */ +#if H5C_COLLECT_CACHE_STATS + clean_entries_skipped++; +#endif /* H5C_COLLECT_CACHE_STATS */ + didnt_flush_entry = TRUE; + } + +#if H5C_COLLECT_CACHE_STATS + total_entries_scanned++; +#endif /* H5C_COLLECT_CACHE_STATS */ + } + else { + + /* Skip epoch markers, entries that are in the process + * of being flushed, and entries marked as prefetched_dirty + * (occurs in the R/O case only). + */ + didnt_flush_entry = TRUE; + +#if H5C_COLLECT_CACHE_STATS + if (entry_ptr->prefetched_dirty) + dirty_pf_entries_skipped++; +#endif /* H5C_COLLECT_CACHE_STATS */ + } + + if (prev_ptr != NULL) { + if (didnt_flush_entry) + /* epoch markers don't get flushed, and we don't touch + * entries that are in the process of being flushed. + * Hence no need for sanity checks, as we haven't + * flushed anything. Thus just set entry_ptr to prev_ptr + * and go on. + */ + entry_ptr = prev_ptr; + else if (restart_scan || prev_ptr->is_dirty != prev_is_dirty || prev_ptr->next != next_ptr || + prev_ptr->is_protected || prev_ptr->is_pinned) { + /* something has happened to the LRU -- start over + * from the tail. + */ + restart_scan = FALSE; + entry_ptr = cache_ptr->LRU_tail_ptr; + H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr); + } + else + entry_ptr = prev_ptr; + } + else + entry_ptr = NULL; + + entries_examined++; + + if (cache_ptr->index_size >= cache_ptr->max_cache_size) + empty_space = 0; + else + empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; + + assert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size)); + } + +#if H5C_COLLECT_CACHE_STATS + cache_ptr->calls_to_msic++; + + cache_ptr->total_entries_skipped_in_msic += clean_entries_skipped; + cache_ptr->total_dirty_pf_entries_skipped_in_msic += dirty_pf_entries_skipped; + cache_ptr->total_entries_scanned_in_msic += total_entries_scanned; + + if (clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic) + cache_ptr->max_entries_skipped_in_msic = clean_entries_skipped; + + if (dirty_pf_entries_skipped > cache_ptr->max_dirty_pf_entries_skipped_in_msic) + cache_ptr->max_dirty_pf_entries_skipped_in_msic = dirty_pf_entries_skipped; + + if (total_entries_scanned > cache_ptr->max_entries_scanned_in_msic) + cache_ptr->max_entries_scanned_in_msic = total_entries_scanned; +#endif /* H5C_COLLECT_CACHE_STATS */ + + /* NEED: work on a better assert for corked entries */ + assert((entries_examined > (2 * initial_list_len)) || + ((cache_ptr->pl_size + cache_ptr->pel_size + cache_ptr->min_clean_size) > + cache_ptr->max_cache_size) || + ((cache_ptr->clean_index_size + empty_space) >= cache_ptr->min_clean_size) || + ((num_corked_entries))); +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + + assert((entries_examined > (2 * initial_list_len)) || + (cache_ptr->cLRU_list_size <= cache_ptr->clean_index_size)); + assert((entries_examined > (2 * initial_list_len)) || + (cache_ptr->dLRU_list_size <= cache_ptr->dirty_index_size)); + +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + } + else { + assert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS); + +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + initial_list_len = cache_ptr->cLRU_list_len; + entry_ptr = cache_ptr->cLRU_tail_ptr; + + while (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) && + (entries_examined <= initial_list_len) && (entry_ptr != NULL)) { + assert(!(entry_ptr->is_protected)); + assert(!(entry_ptr->is_read_only)); + assert((entry_ptr->ro_ref_count) == 0); + assert(!(entry_ptr->is_dirty)); + + prev_ptr = entry_ptr->aux_prev; + + if (!entry_ptr->prefetched_dirty +#ifdef H5_HAVE_PARALLEL + && !entry_ptr->coll_access +#endif /* H5_HAVE_PARALLEL */ + ) { + if (H5C__flush_single_entry( + f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") + } /* end if */ + + /* we are scanning the clean LRU, so the serialize function + * will not be called on any entry -- thus there is no + * concern about the list being modified out from under + * this function. + */ + + entry_ptr = prev_ptr; + entries_examined++; + } +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + } + +done: + /* Sanity checks */ + assert(cache_ptr->msic_in_progress); + if (!reentrant_call) + cache_ptr->msic_in_progress = FALSE; + assert((!reentrant_call) || (cache_ptr->msic_in_progress)); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__make_space_in_cache() */ + +/*------------------------------------------------------------------------- + * Function: H5C__serialize_cache + * + * Purpose: Serialize (i.e. construct an on disk image) for all entries + * in the metadata cache including clean entries. + * + * Note that flush dependencies and "flush me last" flags + * must be observed in the serialization process. + * + * Note also that entries may be loaded, flushed, evicted, + * expunged, relocated, resized, or removed from the cache + * during this process, just as these actions may occur during + * a regular flush. + * + * However, we are given that the cache will contain no protected + * entries on entry to this routine (although entries may be + * briefly protected and then unprotected during the serialize + * process). + * + * The objective of this routine is serialize all entries and + * to force all entries into their actual locations on disk. + * + * The initial need for this routine is to settle all entries + * in the cache prior to construction of the metadata cache + * image so that the size of the cache image can be calculated. + * + * Return: Non-negative on success/Negative on failure or if there was + * a request to flush all items and something was protected. + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__serialize_cache(H5F_t *f) +{ +#ifdef H5C_DO_SANITY_CHECKS + int i; + uint32_t index_len = 0; + size_t index_size = (size_t)0; + size_t clean_index_size = (size_t)0; + size_t dirty_index_size = (size_t)0; + size_t slist_size = (size_t)0; + uint32_t slist_len = 0; +#endif /* H5C_DO_SANITY_CHECKS */ + H5C_ring_t ring; + H5C_t *cache_ptr; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + assert(f); + assert(f->shared); + cache_ptr = f->shared->cache; + assert(cache_ptr); + assert(cache_ptr->slist_ptr); + +#ifdef H5C_DO_SANITY_CHECKS + assert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0); + assert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); + assert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); + assert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); + assert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0); + assert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0); + + for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) { + index_len += cache_ptr->index_ring_len[i]; + index_size += cache_ptr->index_ring_size[i]; + clean_index_size += cache_ptr->clean_index_ring_size[i]; + dirty_index_size += cache_ptr->dirty_index_ring_size[i]; + + slist_len += cache_ptr->slist_ring_len[i]; + slist_size += cache_ptr->slist_ring_size[i]; + } /* end for */ + + assert(cache_ptr->index_len == index_len); + assert(cache_ptr->index_size == index_size); + assert(cache_ptr->clean_index_size == clean_index_size); + assert(cache_ptr->dirty_index_size == dirty_index_size); + assert(cache_ptr->slist_len == slist_len); + assert(cache_ptr->slist_size == slist_size); +#endif /* H5C_DO_SANITY_CHECKS */ + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + +#ifndef NDEBUG + /* if this is a debug build, set the serialization_count field of + * each entry in the cache to zero before we start the serialization. + * This allows us to detect the case in which any entry is serialized + * more than once (a performance issues), and more importantly, the + * case is which any flush dependency parent is serializes more than + * once (a correctness issue). + */ + { + H5C_cache_entry_t *scan_ptr = NULL; + + scan_ptr = cache_ptr->il_head; + while (scan_ptr != NULL) { + scan_ptr->serialization_count = 0; + scan_ptr = scan_ptr->il_next; + } /* end while */ + } /* end block */ +#endif + + /* set cache_ptr->serialization_in_progress to TRUE, and back + * to FALSE at the end of the function. Must maintain this flag + * to support H5C_get_serialization_in_progress(), which is in + * turn required to support sanity checking in some cache + * clients. + */ + assert(!cache_ptr->serialization_in_progress); + cache_ptr->serialization_in_progress = TRUE; + + /* Serialize each ring, starting from the outermost ring and + * working inward. + */ + ring = H5C_RING_USER; + while (ring < H5C_RING_NTYPES) { + assert(cache_ptr->close_warning_received); + switch (ring) { + case H5C_RING_USER: + break; + + case H5C_RING_RDFSM: + /* Settle raw data FSM */ + if (!cache_ptr->rdfsm_settled) + if (H5MF_settle_raw_data_fsm(f, &cache_ptr->rdfsm_settled) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed") + break; + + case H5C_RING_MDFSM: + /* Settle metadata FSM */ + if (!cache_ptr->mdfsm_settled) + if (H5MF_settle_meta_data_fsm(f, &cache_ptr->mdfsm_settled) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed") + break; + + case H5C_RING_SBE: + case H5C_RING_SB: + break; + + default: + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!") + break; + } /* end switch */ + + if (H5C__serialize_ring(f, ring) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialize ring failed") + + ring++; + } /* end while */ + +#ifndef NDEBUG + /* Verify that no entry has been serialized more than once. + * FD parents with multiple serializations should have been caught + * elsewhere, so no specific check for them here. + */ + { + H5C_cache_entry_t *scan_ptr = NULL; + + scan_ptr = cache_ptr->il_head; + while (scan_ptr != NULL) { + assert(scan_ptr->serialization_count <= 1); + + scan_ptr = scan_ptr->il_next; + } /* end while */ + } /* end block */ +#endif + +done: + cache_ptr->serialization_in_progress = FALSE; + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__serialize_cache() */ + +/*------------------------------------------------------------------------- + * Function: H5C__serialize_ring + * + * Purpose: Serialize the entries contained in the specified cache and + * ring. All entries in rings outside the specified ring + * must have been serialized on entry. + * + * If the cache contains protected entries in the specified + * ring, the function will fail, as protected entries cannot + * be serialized. However all unprotected entries in the + * target ring should be serialized before the function + * returns failure. + * + * If flush dependencies appear in the target ring, the + * function makes repeated passes through the index list + * serializing entries in flush dependency order. + * + * All entries outside the H5C_RING_SBE are marked for + * inclusion in the cache image. Entries in H5C_RING_SBE + * and below are marked for exclusion from the image. + * + * Return: Non-negative on success/Negative on failure or if there was + * a request to flush all items and something was protected. + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__serialize_ring(H5F_t *f, H5C_ring_t ring) +{ + hbool_t done = FALSE; + H5C_t *cache_ptr; + H5C_cache_entry_t *entry_ptr; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + assert(f); + assert(f->shared); + cache_ptr = f->shared->cache; + assert(cache_ptr); + assert(ring > H5C_RING_UNDEFINED); + assert(ring < H5C_RING_NTYPES); + + assert(cache_ptr->serialization_in_progress); + + /* The objective here is to serialize all entries in the cache ring + * in flush dependency order. + * + * The basic algorithm is to scan the cache index list looking for + * unserialized entries that are either not in a flush dependency + * relationship, or which have no unserialized children. Any such + * entry is serialized and its flush dependency parents (if any) are + * informed -- allowing them to decrement their userialized child counts. + * + * However, this algorithm is complicated by the ability + * of client serialization callbacks to perform operations on + * on the cache which can result in the insertion, deletion, + * relocation, resize, dirty, flush, eviction, or removal (via the + * take ownership flag) of entries. Changes in the flush dependency + * structure are also possible. + * + * On the other hand, the algorithm is simplified by the fact that + * we are serializing, not flushing. Thus, as long as all entries + * are serialized correctly, it doesn't matter if we have to go back + * and serialize an entry a second time. + * + * These possible actions result in the following modifications to + * the basic algorithm: + * + * 1) In the event of an entry expunge, eviction or removal, we must + * restart the scan as it is possible that the next entry in our + * scan is no longer in the cache. Were we to examine this entry, + * we would be accessing deallocated memory. + * + * 2) A resize, dirty, or insertion of an entry may result in the + * the increment of a flush dependency parent's dirty and/or + * unserialized child count. In the context of serializing the + * the cache, this is a non-issue, as even if we have already + * serialized the parent, it will be marked dirty and its image + * marked out of date if appropriate when the child is serialized. + * + * However, this is a major issue for a flush, as were this to happen + * in a flush, it would violate the invariant that the flush dependency + * feature is intended to enforce. As the metadata cache has no + * control over the behavior of cache clients, it has no way of + * preventing this behaviour. However, it should detect it if at all + * possible. + * + * Do this by maintaining a count of the number of times each entry is + * serialized during a cache serialization. If any flush dependency + * parent is serialized more than once, throw an assertion failure. + * + * 3) An entry relocation will typically change the location of the + * entry in the index list. This shouldn't cause problems as we + * will scan the index list until we make a complete pass without + * finding anything to serialize -- making relocations of either + * the current or next entries irrelevant. + * + * Note that since a relocation may result in our skipping part of + * the index list, we must always do at least one more pass through + * the index list after an entry relocation. + * + * 4) Changes in the flush dependency structure are possible on + * entry insertion, load, expunge, evict, or remove. Destruction + * of a flush dependency has no effect, as it can only relax the + * flush dependencies. Creation of a flush dependency can create + * an unserialized child of a flush dependency parent where all + * flush dependency children were previously serialized. Should + * this child dirty the flush dependency parent when it is serialized, + * the parent will be re-serialized. + * + * Per the discussion of 2) above, this is a non issue for cache + * serialization, and a major problem for cache flush. Using the + * same detection mechanism, throw an assertion failure if this + * condition appears. + * + * Observe that either eviction or removal of entries as a result of + * a serialization is not a problem as long as the flush dependency + * tree does not change beyond the removal of a leaf. + */ + while (!done) { + /* Reset the counters so that we can detect insertions, loads, + * moves, and flush dependency height changes caused by the pre_serialize + * and serialize callbacks. + */ + cache_ptr->entries_loaded_counter = 0; + cache_ptr->entries_inserted_counter = 0; + cache_ptr->entries_relocated_counter = 0; + + done = TRUE; /* set to FALSE if any activity in inner loop */ + entry_ptr = cache_ptr->il_head; + while (entry_ptr != NULL) { + /* Verify that either the entry is already serialized, or + * that it is assigned to either the target or an inner + * ring. + */ + assert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date)); + + /* Skip flush me last entries or inner ring entries */ + if (!entry_ptr->flush_me_last && entry_ptr->ring == ring) { + + /* if we encounter an unserialized entry in the current + * ring that is not marked flush me last, we are not done. + */ + if (!entry_ptr->image_up_to_date) + done = FALSE; + + /* Serialize the entry if its image is not up to date + * and it has no unserialized flush dependency children. + */ + if (!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) { + assert(entry_ptr->serialization_count == 0); + + /* Serialize the entry */ + if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed") + + assert(entry_ptr->flush_dep_nunser_children == 0); + assert(entry_ptr->serialization_count == 0); + +#ifndef NDEBUG + /* Increment serialization counter (to detect multiple serializations) */ + entry_ptr->serialization_count++; +#endif + } /* end if */ + } /* end if */ + + /* Check for the cache being perturbed during the entry serialize */ + if ((cache_ptr->entries_loaded_counter > 0) || (cache_ptr->entries_inserted_counter > 0) || + (cache_ptr->entries_relocated_counter > 0)) { + +#if H5C_COLLECT_CACHE_STATS + H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr); +#endif /* H5C_COLLECT_CACHE_STATS */ + + /* Reset the counters */ + cache_ptr->entries_loaded_counter = 0; + cache_ptr->entries_inserted_counter = 0; + cache_ptr->entries_relocated_counter = 0; + + /* Restart scan */ + entry_ptr = cache_ptr->il_head; + } /* end if */ + else + /* Advance to next entry */ + entry_ptr = entry_ptr->il_next; + } /* while ( entry_ptr != NULL ) */ + } /* while ( ! done ) */ + + /* Reset the counters so that we can detect insertions, loads, + * moves, and flush dependency height changes caused by the pre_serialize + * and