summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorQuincey Koziol <quincey@koziol.cc>2023-04-11 14:41:32 (GMT)
committerGitHub <noreply@github.com>2023-04-11 14:41:32 (GMT)
commit49a71463a064a80aec7f67d6de008d892954febf (patch)
tree4e0d432ab31655d169ce88a14dff70f2d0cefc2a /src
parent1ad030f6696159c498880a4d785f9a2f7ae6fd32 (diff)
downloadhdf5-49a71463a064a80aec7f67d6de008d892954febf.zip
hdf5-49a71463a064a80aec7f67d6de008d892954febf.tar.gz
hdf5-49a71463a064a80aec7f67d6de008d892954febf.tar.bz2
Comment cleanup (#2689)
* Clean up content and redundant logging in comments.
Diffstat (limited to 'src')
-rw-r--r--src/H5AC.c42
-rw-r--r--src/H5ACmpio.c2
-rw-r--r--src/H5C.c477
-rw-r--r--src/H5Cdbg.c6
-rw-r--r--src/H5Cepoch.c3
-rw-r--r--src/H5Cimage.c22
-rw-r--r--src/H5Cmpio.c56
-rw-r--r--src/H5Cpkg.h582
-rw-r--r--src/H5Cprivate.h51
-rw-r--r--src/H5FD.c8
-rw-r--r--src/H5FDint.c16
-rw-r--r--src/H5FDmpi.c30
-rw-r--r--src/H5FDsubfiling/H5FDioc_int.c2
-rw-r--r--src/H5FDsubfiling/H5FDioc_threads.c27
-rw-r--r--src/H5FDsubfiling/H5FDsubfile_int.c4
-rw-r--r--src/H5FDsubfiling/H5FDsubfiling.c12
-rw-r--r--src/H5FDsubfiling/H5subfiling_common.c13
-rw-r--r--src/H5detect.c2
-rw-r--r--src/H5make_libsettings.c2
19 files changed, 146 insertions, 1211 deletions
diff --git a/src/H5AC.c b/src/H5AC.c
index 49ff0d3..b93fca9 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -413,14 +413,6 @@ done:
* Programmer: Robb Matzke
* Jul 9 1997
*
- * Changes:
- *
- * In the parallel case, added code to setup the MDC slist
- * before the call to H5AC__flush_entries() and take it down
- * afterwards.
- *
- * JRM -- 7/29/20
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1191,13 +1183,12 @@ done:
*
* Function: H5AC_prep_for_file_flush
*
- * Purpose: This function should be called just prior to the first
- * call to H5AC_flush() during a file flush.
+ * Purpose: Handle any setup required prior to metadata cache flush.
*
- * Its purpose is to handly any setup required prior to
- * metadata cache flush.
+ * This function should be called just prior to the first
+ * call to H5AC_flush() during a file flush.
*
- * Initially, this means setting up the slist prior to the
+ * Initially, this means setting up the skip list prior to the
* flush. We do this in a separate call because
* H5F__flush_phase2() make repeated calls to H5AC_flush().
* Handling this detail in separate calls allows us to avoid
@@ -1209,8 +1200,6 @@ done:
* Programmer: John Mainzer
* 5/5/20
*
- * Changes: None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1242,9 +1231,6 @@ done:
* Purpose: This function should be called just after the last
* call to H5AC_flush() during a file flush.
*
- * Its purpose is to perform any necessary cleanup after the
- * metadata cache flush.
- *
* The objective of the call is to allow the metadata cache
* to do any necessary necessary cleanup work after a cache
* flush.
@@ -1261,8 +1247,6 @@ done:
* Programmer: John Mainzer
* 5/5/20
*
- * Changes: None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1474,24 +1458,6 @@ H5AC_resize_entry(void *thing, size_t new_size)
* amounts of dirty metadata creation in other areas -- which will
* cause aux_ptr->dirty_bytes to be incremented.
*
- * The bottom line is that this code is probably OK, but the above
- * points should be kept in mind.
- *
- * One final observation: This comment is occasioned by a bug caused
- * by moving the call to H5AC__log_dirtied_entry() after the call to
- * H5C_resize_entry(), and then only calling H5AC__log_dirtied_entry()
- * if entry_ptr->is_dirty was false.
- *
- * Since H5C_resize_entry() marks the target entry dirty unless there
- * is not change in size, this had the effect of not calling
- * H5AC__log_dirtied_entry() when it should be, and corrupting
- * the cleaned and dirtied lists used by rank 0 in the parallel
- * version of the metadata cache.
- *
- * The point here is that you should be very careful when working with
- * this code, and not modify it unless you fully understand it.
- *
- * JRM -- 2/28/22
*/
if ((!entry_ptr->is_dirty) && (entry_ptr->size != new_size)) {
diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c
index 197cc3c..40e68fd 100644
--- a/src/H5ACmpio.c
+++ b/src/H5ACmpio.c
@@ -1860,8 +1860,6 @@ done:
* Programmer: John Mainzer
* April 28, 2010
*
- * Changes: None.
- *
*-------------------------------------------------------------------------
*/
static herr_t
diff --git a/src/H5C.c b/src/H5C.c
index ae03d69..c41b143 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -185,82 +185,6 @@ H5FL_SEQ_DEFINE_STATIC(H5C_cache_entry_ptr_t);
* Programmer: John Mainzer
* 6/2/04
*
- * Modifications:
- *
- * JRM -- 7/20/04
- * Updated for the addition of the hash table.
- *
- * JRM -- 10/5/04
- * Added call to H5C_reset_cache_hit_rate_stats(). Also
- * added initialization for cache_is_full flag and for
- * resize_ctl.
- *
- * JRM -- 11/12/04
- * Added initialization for the new size_decreased field.
- *
- * JRM -- 11/17/04
- * Added/updated initialization for the automatic cache
- * size control data structures.
- *
- * JRM -- 6/24/05
- * Added support for the new write_permitted field of
- * the H5C_t structure.
- *
- * JRM -- 7/5/05
- * Added the new log_flush parameter and supporting code.
- *
- * JRM -- 9/21/05
- * Added the new aux_ptr parameter and supporting code.
- *
- * JRM -- 1/20/06
- * Added initialization of the new prefix field in H5C_t.
- *
- * JRM -- 3/16/06
- * Added initialization for the pinned entry related fields.
- *
- * JRM -- 5/31/06
- * Added initialization for the trace_file_ptr field.
- *
- * JRM -- 8/19/06
- * Added initialization for the flush_in_progress field.
- *
- * JRM -- 8/25/06
- * Added initialization for the slist_len_increase and
- * slist_size_increase fields. These fields are used
- * for sanity checking in the flush process, and are not
- * compiled in unless H5C_DO_SANITY_CHECKS is TRUE.
- *
- * JRM -- 3/28/07
- * Added initialization for the new is_read_only and
- * ro_ref_count fields.
- *
- * JRM -- 7/27/07
- * Added initialization for the new evictions_enabled
- * field of H5C_t.
- *
- * JRM -- 12/31/07
- * Added initialization for the new flash cache size increase
- * related fields of H5C_t.
- *
- * JRM -- 11/5/08
- * Added initialization for the new clean_index_size and
- * dirty_index_size fields of H5C_t.
- *
- *
- * Missing entries?
- *
- *
- * JRM -- 4/20/20
- * Added initialization for the slist_enabled field. Recall
- * that the slist is used to flush metadata cache entries
- * in (roughly) increasing address order. While this is
- * needed at flush and close, it is not used elsewhere.
- * The slist_enabled field exists to allow us to construct
- * the slist when needed, and leave it empty otherwise -- thus
- * avoiding the overhead of maintaining it.
- *
- * JRM -- 4/29/20
- *
*-------------------------------------------------------------------------
*/
H5C_t *
@@ -691,10 +615,7 @@ H5C_prep_for_file_close(H5F_t *f)
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- /* For now at least, it is possible to receive the
- * close warning more than once -- the following
- * if statement handles this.
- */
+ /* It is possible to receive the close warning more than once */
if (cache_ptr->close_warning_received)
HGOTO_DONE(SUCCEED)
cache_ptr->close_warning_received = TRUE;
@@ -759,27 +680,13 @@ done:
* This function fails if any object are protected since the
* resulting file might not be consistent.
*
- * Note that *cache_ptr has been freed upon successful return.
+ * Note: *cache_ptr has been freed upon successful return.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
* 6/2/04
*
- * Modifications:
- *
- * JRM -- 5/15/20
- *
- * Updated the function to enable the slist prior to the
- * call to H5C__flush_invalidate_cache().
- *
- * Arguably, it shouldn't be necessary to re-enable the
- * slist after the call to H5C__flush_invalidate_cache(), as
- * the metadata cache should be discarded. However, in the
- * test code, we make multiple calls to H5C_dest(). Thus
- * we re-enable the slist on failure if it and the cache
- * still exist.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -859,9 +766,12 @@ H5C_dest(H5F_t *f)
done:
if ((ret_value < 0) && (cache_ptr) && (cache_ptr->slist_ptr)) {
-
- /* need this for test code -- see change note for details */
-
+ /* Arguably, it shouldn't be necessary to re-enable the slist after
+ * the call to H5C__flush_invalidate_cache(), as the metadata cache
+ * should be discarded. However, in the test code, we make multiple
+ * calls to H5C_dest(). Thus we re-enable the slist on failure if it
+ * and the cache still exist. JRM -- 5/15/20
+ */
if (H5C_set_slist_enabled(f->shared->cache, FALSE, FALSE) < 0)
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist on flush dest failure failed")
@@ -881,14 +791,6 @@ done:
* Programmer: Vailin Choi
* Dec 2013
*
- * Modifications:
- *
- * JRM -- 5/5/20
- *
- * Added code to enable the skip list prior to the call
- * to H5C__flush_invalidate_cache(), and disable it
- * afterwards.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -923,9 +825,9 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_expunge_entry
*
- * Purpose: Use this function to tell the cache to expunge an entry
- * from the cache without writing it to disk even if it is
- * dirty. The entry may not be either pinned or protected.
+ * Purpose: Expunge an entry from the cache without writing it to disk
+ * even if it is dirty. The entry may not be either pinned or
+ * protected.
*
* Return: Non-negative on success/Negative on failure
*
@@ -1007,39 +909,11 @@ done:
* function returns failure.
*
* Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
+ * a request to flush all items and an entry was protected.
*
* Programmer: John Mainzer
* 6/2/04
*
- * Changes: Modified function to test for slist chamges in
- * pre_serialize and serialize callbacks, and re-start
- * scans through the slist when such changes occur.
- *
- * This has been a potential problem for some time,
- * and there has been code in this function to deal
- * with elements of this issue. However the shift
- * to the V3 cache in combination with the activities
- * of some of the cache clients (in particular the
- * free space manager and the fractal heap) have
- * made this re-work necessary.
- *
- * JRM -- 12/13/14
- *
- * Modified function to support rings. Basic idea is that
- * every entry in the cache is assigned to a ring. Entries
- * in the outermost ring are flushed first, followed by
- * those in the next outermost ring, and so on until the
- * innermost ring is flushed. See header comment on
- * H5C_ring_t in H5Cprivate.h for a more detailed
- * discussion.
- *
- * JRM -- 8/30/15
- *
- * Modified function to call the free space manager
- * settling functions.
- * JRM -- 6/9/16
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1226,8 +1100,6 @@ done:
* exist on disk yet, but it must have an address and disk
* space reserved.
*
- * Observe that this function cannot occasion a read.
- *
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
@@ -1443,9 +1315,6 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u
* oversized at the end of an unprotect. As a result, it is
* possible to have a vastly oversized cache with no protected
* entries as long as all the protects precede the unprotects.
- *
- * Since items 1 and 2 are not changing any time soon, I see
- * no point in worrying about the third.
*/
if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0)
@@ -1531,12 +1400,6 @@ done:
* Programmer: John Mainzer
* 5/15/06
*
- * JRM -- 11/5/08
- * Added call to H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY() to
- * update the new clean_index_size and dirty_index_size
- * fields of H5C_t in the case that the entry was clean
- * prior to this call, and is pinned and not protected.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -2092,9 +1955,6 @@ done:
* Programmer: John Mainzer
* 4/26/06
*
- * Changes: Added extreme sanity checks on entry and exit.
- * JRM -- 4/26/14
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -2242,14 +2102,14 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign
if (entry_ptr->type != type)
HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, NULL, "incorrect cache entry type")
- /* if this is a collective metadata read, the entry is not
- marked as collective, and is clean, it is possible that
- other processes will not have it in its cache and will
- expect a bcast of the entry from process 0. So process 0
- will bcast the entry to all other ranks. Ranks that _do_ have
- the entry in their cache still have to participate in the
- bcast. */
#ifdef H5_HAVE_PARALLEL
+ /* If this is a collective metadata read, the entry is not marked as
+ * collective, and is clean, it is possible that other processes will
+ * not have it in its cache and will expect a bcast of the entry from
+ * process 0. So process 0 will bcast the entry to all other ranks.
+ * Ranks that _do_ have the entry in their cache still have to
+ * participate in the bcast.
+ */
if (coll_access) {
if (!(entry_ptr->is_dirty) && !(entry_ptr->coll_access)) {
MPI_Comm comm; /* File MPI Communicator */
@@ -2415,24 +2275,16 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign
* oversized at the end of an unprotect. As a result, it is
* possible to have a vastly oversized cache with no protected
* entries as long as all the protects precede the unprotects.
- *
- * Since items 1, 2, and 3 are not changing any time soon, I
- * see no point in worrying about the fourth.
*/
-
if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed")
} /* end if */
- /* Insert the entry in the hash table. It can't be dirty yet, so
- * we don't even check to see if it should go in the skip list.
- *
- * This is no longer true -- due to a bug fix, we may modify
- * data on load to repair a file.
+ /* Insert the entry in the hash table.
*
* *******************************************
*
- * Set the flush_last field
+ * Set the flush_me_last field
* of the newly loaded entry before inserting it into the
* index. Must do this, as the index tracked the number of
* entries with the flush_last field set, but assumes that
@@ -2531,7 +2383,6 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign
* should also call H5C__make_space_in_cache() to bring us
* into compliance.
*/
-
if (cache_ptr->index_size >= cache_ptr->max_cache_size)
empty_space = 0;
else
@@ -2689,7 +2540,7 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_p
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown incr_mode?!?!?")
} /* end switch */
- /* logically, this is were configuration for flash cache size increases
+ /* logically, this is where configuration for flash cache size increases
* should go. However, this configuration depends on max_cache_size, so
* we wait until the end of the function, when this field is set.
*/
@@ -2842,8 +2693,7 @@ H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled)
/* There is no fundamental reason why we should not permit
* evictions to be disabled while automatic resize is enabled.
- * However, I can't think of any good reason why one would
- * want to, and allowing it would greatly complicate testing
+ * However, allowing it would greatly complicate testing
* the feature. Hence the following:
*/
if ((evictions_enabled != TRUE) && ((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) ||
@@ -2912,10 +2762,6 @@ done:
* Programmer: John Mainzer
* 5/1/20
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -3035,9 +2881,6 @@ done:
* Programmer: John Mainzer
* 3/22/06
*
- * Changes: Added extreme sanity checks on entry and exit.
- * JRM -- 4/26/14
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -3098,81 +2941,6 @@ done:
* Programmer: John Mainzer
* 6/2/04
*
- * Modifications:
- *
- * JRM -- 7/21/04
- * Updated for the addition of the hash table.
- *
- * JRM -- 10/28/04
- * Added code to set cache_full to TRUE whenever we try to
- * make space in the cache.
- *
- * JRM -- 11/12/04
- * Added code to call to H5C_make_space_in_cache() after the
- * call to H5C__auto_adjust_cache_size() if that function
- * sets the size_decreased flag is TRUE.
- *
- * JRM -- 4/25/05
- * The size_decreased flag can also be set to TRUE in
- * H5C_set_cache_auto_resize_config() if a new configuration
- * forces an immediate reduction in cache size. Modified
- * the code to deal with this eventuallity.
- *
- * JRM -- 6/24/05
- * Added support for the new write_permitted field of H5C_t.
- *
- * JRM -- 10/22/05
- * Hand optimizations.
- *
- * JRM -- 5/3/06
- * Added code to set the new dirtied field in
- * H5C_cache_entry_t to FALSE prior to return.
- *
- * JRM -- 6/23/06
- * Modified code to allow dirty entries to be loaded from
- * disk. This is necessary as a bug fix in the object
- * header code requires us to modify a header as it is read.
- *
- * JRM -- 3/28/07
- * Added the flags parameter and supporting code. At least
- * for now, this parameter is used to allow the entry to
- * be protected read only, thus allowing multiple protects.
- *
- * Also added code to allow multiple read only protects
- * of cache entries.
- *
- * JRM -- 7/27/07
- * Added code supporting the new evictions_enabled field
- * in H5C_t.
- *
- * JRM -- 1/3/08
- * Added to do a flash cache size increase if appropriate
- * when a large entry is loaded.
- *
- * JRM -- 11/13/08
- * Modified function to call H5C_make_space_in_cache() when
- * the min_clean_size is violated, not just when there isn't
- * enough space for and entry that has just been loaded.
- *
- * The purpose of this modification is to avoid "metadata
- * blizzards" in the write only case. In such instances,
- * the cache was allowed to fill with dirty metadata. When
- * we finally needed to evict an entry to make space, we had
- * to flush out a whole cache full of metadata -- which has
- * interesting performance effects. We hope to avoid (or
- * perhaps more accurately hide) this effect by maintaining
- * the min_clean_size, which should force us to start flushing
- * entries long before we actually have to evict something
- * to make space.
- *
- *
- * Missing entries?
- *
- *
- * JRM -- 5/8/20
- * Updated for the possibility that the slist will be
- * disabled.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -3427,14 +3195,10 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
}
} /* end if */
- /* this implementation of the "deleted" option is a bit inefficient, as
+ /* This implementation of the "deleted" option is a bit inefficient, as
* we re-insert the entry to be deleted into the replacement policy
* data structures, only to remove them again. Depending on how often
* we do this, we may want to optimize a bit.
- *
- * On the other hand, this implementation is reasonably clean, and
- * makes good use of existing code.
- * JRM - 5/19/04
*/
if (deleted) {
@@ -3476,8 +3240,7 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
} /* end if */
#ifdef H5_HAVE_PARALLEL
else if (clear_entry) {
-
- /* verify that the target entry is in the cache. */
+ /* Verify that the target entry is in the cache. */
H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
if (test_entry_ptr == NULL)
@@ -4704,8 +4467,6 @@ done:
* will be re-calculated, and will be enforced the next time
* we have to make space in the cache.
*
- * Observe that this function cannot occasion a read.
- *
* Return: Non-negative on success/Negative on failure.
*
* Programmer: John Mainzer, 11/22/04
@@ -5138,8 +4899,6 @@ H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t n
if (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) &&
(cache_ptr->max_cache_size < (cache_ptr->resize_ctl).max_size)) {
- /* we have work to do */
-
switch ((cache_ptr->resize_ctl).flash_incr_mode) {
case H5C_flash_incr__off:
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
@@ -5259,52 +5018,7 @@ done:
* a request to flush all items and something was protected.
*
* Programmer: John Mainzer
- * 3/24/065
- *
- * Modifications:
- *
- * To support the fractal heap, the cache must now deal with
- * entries being dirtied, resized, and/or renamed inside
- * flush callbacks. Updated function to support this.
- *
- * -- JRM 8/27/06
- *
- * Added code to detect and manage the case in which a
- * flush callback changes the s-list out from under
- * the function. The only way I can think of in which this
- * can happen is if a flush function loads an entry
- * into the cache that isn't there already. Quincey tells
- * me that this will never happen, but I'm not sure I
- * believe him.
- *
- * Note that this is a pretty bad scenario if it ever
- * happens. The code I have added should allow us to
- * handle the situation under all but the worst conditions,
- * but one can argue that we should just scream and die if
- * we ever detect the condition.
- *
- * -- JRM 10/13/07
- *
- * Missing entries?
- *
- *
- * Added support for the H5C__EVICT_ALLOW_LAST_PINS_FLAG.
- * This flag is used to flush and evict all entries in
- * the metadata cache that are not pinned -- typically,
- * everything other than the superblock.
- *
- * ??? -- ??/??/??
- *
- * Added sanity checks to verify that the skip list is
- * enabled on entry. On the face of it, it would make
- * sense to enable the slist on entry, and disable it
- * on exit, as this function is not called repeatedly.
- * However, since this function can be called from
- * H5C_flush_cache(), this would create cases in the test
- * code where we would have to check the flags to determine
- * whether we must setup and take down the slist.
- *
- * JRM -- 5/5/20
+ * 3/24/05
*
*-------------------------------------------------------------------------
*/
@@ -5472,20 +5186,6 @@ done:
* Programmer: John Mainzer
* 9/1/15
*
- * Changes: Added support for the H5C__EVICT_ALLOW_LAST_PINS_FLAG.
- * This flag is used to flush and evict all entries in
- * the metadata cache that are not pinned -- typically,
- * everything other than the superblock.
- *
- * ??? -- ??/??/??
- *
- * A recent optimization turns off the slist unless a flush
- * is in progress. This should not effect this function, as
- * it is only called during a flush. Added an assertion to
- * verify this.
- *
- * JRM -- 5/6/20
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -5546,13 +5246,11 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
* for some other cache entry), we can no longer promise to flush
* the cache entries in increasing address order.
*
- * Instead, we just do the best we can -- making a pass through
+ * Instead, we make a pass through
* the skip list, and then a pass through the "clean" entries, and
* then repeating as needed. Thus it is quite possible that an
* entry will be evicted from the cache only to be re-loaded later
- * in the flush process (From what Quincey tells me, the pin
- * mechanism makes this impossible, but even it it is true now,
- * we shouldn't count on it in the future.)
+ * in the flush process.
*
* The bottom line is that entries will probably be flushed in close
* to increasing address order, but there are no guarantees.
@@ -5706,8 +5404,7 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
(entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) {
if (entry_ptr->is_protected) {
-
- /* we have major problems -- but lets flush
+ /* We have major problems -- but lets flush
* everything we can before we flag an error.
*/
protected_entries++;
@@ -5792,7 +5489,7 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
* Writes to disk are possible here.
*/
- /* reset the counters so that we can detect insertions, loads,
+ /* Reset the counters so that we can detect insertions, loads,
* and moves caused by the pre_serialize and serialize calls.
*/
cache_ptr->entries_loaded_counter = 0;
@@ -6000,14 +5697,6 @@ done:
* Programmer: John Mainzer
* 9/1/15
*
- * Changes: A recent optimization turns off the slist unless a flush
- * is in progress. This should not effect this function, as
- * it is only called during a flush. Added an assertion to
- * verify this.
- *
- * JRM -- 5/6/20
- *
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -6158,7 +5847,7 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
* dirty, resize, or take ownership of other entries
* in the cache.
*
- * To deal with this, I have inserted code to detect any
+ * To deal with this, there is code to detect any
* change in the skip list not directly under the control
* of this function. If such modifications are detected,
* we must re-start the scan of the skip list to avoid
@@ -6310,69 +5999,6 @@ done:
*
* Programmer: John Mainzer, 5/5/04
*
- * Modifications:
- *
- * JRM -- 7/21/04
- * Updated function for the addition of the hash table.
- *
- * QAK -- 11/26/04
- * Updated function for the switch from TBBTs to skip lists.
- *
- * JRM -- 1/6/05
- * Updated function to reset the flush_marker field.
- * Also replace references to H5F_FLUSH_INVALIDATE and
- * H5F_FLUSH_CLEAR_ONLY with references to
- * H5C__FLUSH_INVALIDATE_FLAG and H5C__FLUSH_CLEAR_ONLY_FLAG
- * respectively.
- *
- * JRM -- 6/24/05
- * Added code to remove dirty entries from the slist after
- * they have been flushed. Also added a sanity check that
- * will scream if we attempt a write when writes are
- * completely disabled.
- *
- * JRM -- 7/5/05
- * Added code to call the new log_flush callback whenever
- * a dirty entry is written to disk. Note that the callback
- * is not called if the H5C__FLUSH_CLEAR_ONLY_FLAG is set,
- * as there is no write to file in this case.
- *
- * JRM -- 8/21/06
- * Added code maintaining the flush_in_progress and
- * destroy_in_progress fields in H5C_cache_entry_t.
- *
- * Also added flush_flags parameter to the call to
- * type_ptr->flush() so that the flush routine can report
- * whether the entry has been resized or renamed. Added
- * code using the flush_flags variable to detect the case
- * in which the target entry is resized during flush, and
- * update the caches data structures accordingly.
- *
- * JRM -- 3/29/07
- * Added sanity checks on the new is_read_only and
- * ro_ref_count fields.
- *
- * QAK -- 2/07/08
- * Separated "destroy entry" concept from "remove entry from
- * cache" concept, by adding the 'take_ownership' flag and
- * the "destroy_entry" variable.
- *
- * JRM -- 11/5/08
- * Added call to H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN() to
- * maintain the new clean_index_size and clean_index_size
- * fields of H5C_t.
- *
- *
- * Missing entries??
- *
- *
- * JRM -- 5/8/20
- * Updated sanity checks for the possibility that the slist
- * is disabled.
- *
- * Also updated main comment to conform more closely with
- * the current state of the code.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -6741,8 +6367,6 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
* A clear and a flush are the same from the point of
* view of the replacement policy and the slist.
* Hence no differentiation between them.
- *
- * JRM -- 7/7/07
*/
H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL)
@@ -6942,9 +6566,8 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
HDassert(take_ownership);
- /* client is taking ownership of the entry.
- * set bad magic here too so the cache will choke
- * unless the entry is re-inserted properly
+ /* Client is taking ownership of the entry. Set bad magic here too
+ * so the cache will choke unless the entry is re-inserted properly
*/
entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
@@ -8002,15 +7625,6 @@ H5C_entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr)
* Programmer: Mike McGreevy
* November 3, 2010
*
- * Changes: Modified function to setup the slist before calling
- * H%C_flush_cache(), and take it down afterwards. Note
- * that the slist need not be empty after the call to
- * H5C_flush_cache() since we are only flushing marked
- * entries. Thus must set the clear_slist parameter
- * of H5C_set_slist_enabled to TRUE.
- *
- * JRM -- 5/6/20
- *
*-------------------------------------------------------------------------
*/
@@ -8412,8 +8026,6 @@ H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, const H5C_cache_en
* The initial need for this routine is to settle all entries
* in the cache prior to construction of the metadata cache
* image so that the size of the cache image can be calculated.
- * However, I gather that other uses for the routine are
- * under consideration.
*
* Return: Non-negative on success/Negative on failure or if there was
* a request to flush all items and something was protected.
@@ -8582,16 +8194,16 @@ done:
* If the cache contains protected entries in the specified
* ring, the function will fail, as protected entries cannot
* be serialized. However all unprotected entries in the
- * target ring should be serialized before the function
- * returns failure.
+ * target ring should be serialized before the function
+ * returns failure.
*
* If flush dependencies appear in the target ring, the
* function makes repeated passes through the index list
- * serializing entries in flush dependency order.
+ * serializing entries in flush dependency order.
*
- * All entries outside the H5C_RING_SBE are marked for
- * inclusion in the cache image. Entries in H5C_RING_SBE
- * and below are marked for exclusion from the image.
+ * All entries outside the H5C_RING_SBE are marked for
+ * inclusion in the cache image. Entries in H5C_RING_SBE
+ * and below are marked for exclusion from the image.
*
* Return: Non-negative on success/Negative on failure or if there was
* a request to flush all items and something was protected.
@@ -8910,10 +8522,6 @@ done:
* Programmer: Mohamad Chaarawi
* 2/10/16
*
- * Changes: Updated sanity checks for the possibility that the skip
- * list is disabled.
- * JRM 5/16/20
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -8975,13 +8583,6 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
* in the parallel case, it will not detect an
* entry that dirties, resizes, and/or moves
* other entries during its flush.
- *
- * From what Quincey tells me, this test is
- * sufficient for now, as any flush routine that
- * does the latter will also do the former.
- *
- * If that ceases to be the case, further
- * tests will be necessary.
*/
if (cache_ptr->aux_ptr != NULL)
diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c
index 4d74a0a..0dc9756 100644
--- a/src/H5Cdbg.c
+++ b/src/H5Cdbg.c
@@ -259,12 +259,6 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name)
* Programmer: John Mainzer
* 11/15/14
*
- * Changes: Updated function for the slist_enabled field in H5C_t.
- * Recall that to minimize slist overhead, the slist is
- * empty and not maintained if cache_ptr->slist_enabled is
- * false.
- * JRM -- 5/6/20
- *
*-------------------------------------------------------------------------
*/
#ifndef NDEBUG
diff --git a/src/H5Cepoch.c b/src/H5Cepoch.c
index f6de3ff..1b55080 100644
--- a/src/H5Cepoch.c
+++ b/src/H5Cepoch.c
@@ -45,7 +45,7 @@
*
* As a strategy for automatic cache size reduction, the cache may insert
* marker entries in the LRU list at the end of each epoch. These markers
- * are then used to identify entries that have not been accessed for n
+ * are then used to identify entries that have not been accessed for 'n'
* epochs so that they can be evicted from the cache.
*
****************************************************************************/
@@ -98,7 +98,6 @@ const H5AC_class_t H5AC_EPOCH_MARKER[1] = {
*
* None of these functions should ever be called, so there is no point in
* documenting them separately.
- * JRM - 11/16/04
*
***************************************************************************/
diff --git a/src/H5Cimage.c b/src/H5Cimage.c
index 6fbd936..70944be 100644
--- a/src/H5Cimage.c
+++ b/src/H5Cimage.c
@@ -452,10 +452,6 @@ done:
*
* Programmer: John Mainzer, 8/10/15
*
- * Changes: Updated sanity checks for possibility that the slist
- * is disabled.
- * JRM -- 5/17/20
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1579,7 +1575,6 @@ H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_image_ctl
/* The collective metadata write code is not currently compatible
* with cache image. Until this is fixed, suppress cache image silently
* if there is more than one process.
- * JRM -- 11/8/16
*/
if (cache_ptr->aux_ptr) {
H5C_cache_image_ctl_t default_image_ctl = H5C__DEFAULT_CACHE_IMAGE_CTL;
@@ -2296,22 +2291,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C__prep_for_file_close__compute_fd_heights
*
- * Purpose: Recent modifications to flush dependency support in the
- * metadata cache have removed the notion of flush dependency
- * height. This is a problem for the cache image feature,
- * as flush dependency height is used to order entries in the
- * cache image so that flush dependency parents appear before
- * flush dependency children. (Recall that the flush dependency
- * height of an entry in a flush dependency relationship is the
- * length of the longest path from the entry to a leaf entry --
- * that is an entry with flush dependency parents, but no
- * flush dependency children. With the introduction of the
- * possibility of multiple flush dependency parents, we have
- * a flush partial dependency latice, not a flush dependency
- * tree. But since the partial latice is acyclic, the concept
- * of flush dependency height still makes sense.
- *
- * The purpose of this function is to compute the flush
+ * Purpose: The purpose of this function is to compute the flush
* dependency height of all entries that appear in the cache
* image.
*
diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c
index cfd0780..f154c8a 100644
--- a/src/H5Cmpio.c
+++ b/src/H5Cmpio.c
@@ -402,22 +402,11 @@ done:
* shouldn't be used elsewhere.
*
* Return: Success: SUCCEED
- *
* Failure: FAIL
*
* Programmer: John Mainzer
* 3/17/10
*
- * Changes: With the slist optimization, the slist is not maintained
- * unless a flush is in progress. Thus we can not longer use
- * cache_ptr->slist_size to determine the total size of
- * the entries we must insert in the candidate list.
- *
- * To address this, we now use cache_ptr->dirty_index_size
- * instead.
- *
- * JRM -- 7/27/20
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -440,15 +429,14 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
HDassert((!cache_ptr->slist_enabled) || (space_needed == cache_ptr->slist_size));
- /* Recall that while we shouldn't have any protected entries at this
- * point, it is possible that some dirty entries may reside on the
- * pinned list at this point.
+ /* We shouldn't have any protected entries at this point, but it is
+ * possible that some dirty entries may reside on the pinned list.
*/
HDassert(cache_ptr->dirty_index_size <= (cache_ptr->dLRU_list_size + cache_ptr->pel_size));
HDassert((!cache_ptr->slist_enabled) ||
(cache_ptr->slist_len <= (cache_ptr->dLRU_list_len + cache_ptr->pel_len)));
- if (space_needed > 0) { /* we have work to do */
+ if (space_needed > 0) {
H5C_cache_entry_t *entry_ptr;
unsigned nominated_entries_count = 0;
@@ -545,12 +533,6 @@ done:
* Programmer: John Mainzer
* 3/17/10
*
- * Changes: With the slist optimization, the slist is not maintained
- * unless a flush is in progress. Updated sanity checks to
- * reflect this.
- *
- * JRM -- 7/27/20
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -785,14 +767,8 @@ H5C_mark_entries_as_clean(H5F_t *f, unsigned ce_array_len, haddr_t *ce_array_ptr
* resizes, or removals of other entries can occur as
* a side effect of the flush. Hence, there is no need
* for the checks for entry removal / status change
- * that I ported to H5C_apply_candidate_list().
+ * that are in H5C_apply_candidate_list().
*
- * However, if (in addition to allowing such operations
- * in the parallel case), we allow such operations outside
- * of the pre_serialize / serialize routines, this may
- * cease to be the case -- requiring a review of this
- * point.
- * JRM -- 4/7/15
*/
entries_cleared = 0;
entries_examined = 0;
@@ -1086,8 +1062,6 @@ done:
* Programmer: John Mainzer
* 2/10/17
*
- * Changes: None.
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -1464,12 +1438,8 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu
cache_ptr->entries_removed_counter = 0;
cache_ptr->last_entry_removed_ptr = NULL;
- /* Add this entry to the list of entries to collectively write
- *
- * This comment is misleading -- the entry will be added to the
- * collective write list only if said list exists.
- *
- * JRM -- 2/9/17
+ /* Add this entry to the list of entries to collectively
+ * write, if the list exists.
*/
if (H5C__flush_single_entry(f, op_ptr, op_flags) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't flush entry")
@@ -1491,12 +1461,6 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu
entry_ptr->is_protected || !entry_ptr->is_pinned)) {
/* Something has happened to the pinned entry list -- start
* over from the head.
- *
- * Recall that this code should be un-reachable at present,
- * as all the operations by entries on flush that could cause
- * it to be reachable are disallowed in the parallel case at
- * present. Hence the following assertion which should be
- * removed if the above changes.
*/
HDassert(!restart_scan);
@@ -1505,7 +1469,13 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu
HDassert(!entry_ptr->is_protected);
HDassert(entry_ptr->is_pinned);
- HDassert(FALSE); /* see comment above */
+ /* This code should be un-reachable at present,
+ * as all the operations by entries on flush that could cause
+ * it to be reachable are disallowed in the parallel case at
+ * present. Hence the following assertion which should be
+ * removed if the above changes.
+ */
+ HDassert(FALSE);
restart_scan = FALSE;
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 23c2b78..24c0263 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -83,89 +83,6 @@
* to the HGOTO_ERROR macro, which may not be appropriate in all cases.
* If so, we will need versions of the insertion and deletion macros which
* do not reference the sanity checking macros.
- * JRM - 5/5/04
- *
- * Changes:
- *
- * - Removed the line:
- *
- * ( ( (Size) == (entry_ptr)->size ) && ( (len) != 1 ) ) ||
- *
- * from the H5C__DLL_PRE_REMOVE_SC macro. With the addition of the
- * epoch markers used in the age out based cache size reduction algorithm,
- * this invariant need not hold, as the epoch markers are of size 0.
- *
- * One could argue that I should have given the epoch markers a positive
- * size, but this would break the index_size = LRU_list_size + pl_size
- * + pel_size invariant.
- *
- * Alternatively, I could pass the current decr_mode in to the macro,
- * and just skip the check whenever epoch markers may be in use.
- *
- * However, any size errors should be caught when the cache is flushed
- * and destroyed. Until we are tracking such an error, this should be
- * good enough.
- * JRM - 12/9/04
- *
- *
- * - In the H5C__DLL_PRE_INSERT_SC macro, replaced the lines:
- *
- * ( ( (len) == 1 ) &&
- * ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) ||
- * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) )
- * )
- * ) ||
- *
- * with:
- *
- * ( ( (len) == 1 ) &&
- * ( ( (head_ptr) != (tail_ptr) ) ||
- * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) )
- * )
- * ) ||
- *
- * Epoch markers have size 0, so we can now have a non-empty list with
- * zero size. Hence the "( (Size) <= 0 )" clause cause false failures
- * in the sanity check. Since "Size" is typically a size_t, it can't
- * take on negative values, and thus the revised clause "( (Size) < 0 )"
- * caused compiler warnings.
- * JRM - 12/22/04
- *
- * - In the H5C__DLL_SC macro, replaced the lines:
- *
- * ( ( (len) == 1 ) &&
- * ( ( (head_ptr) != (tail_ptr) ) || ( (cache_ptr)->size <= 0 ) ||
- * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) )
- * )
- * ) ||
- *
- * with
- *
- * ( ( (len) == 1 ) &&
- * ( ( (head_ptr) != (tail_ptr) ) ||
- * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) )
- * )
- * ) ||
- *
- * Epoch markers have size 0, so we can now have a non-empty list with
- * zero size. Hence the "( (Size) <= 0 )" clause cause false failures
- * in the sanity check. Since "Size" is typically a size_t, it can't
- * take on negative values, and thus the revised clause "( (Size) < 0 )"
- * caused compiler warnings.
- * JRM - 1/10/05
- *
- * - Added the H5C__DLL_UPDATE_FOR_SIZE_CHANGE macro and the associated
- * sanity checking macros. These macro are used to update the size of
- * a DLL when one of its entries changes size.
- *
- * JRM - 9/8/05
- *
- * - Added macros supporting the index list -- a doubly liked list of
- * all entries in the index. This list is necessary to reduce the
- * cost of visiting all entries in the cache, which was previously
- * done via a scan of the hash table.
- *
- * JRM - 10/15/15
*
****************************************************************************/
@@ -966,28 +883,6 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
* When modifying these macros, remember to modify the similar macros
* in tst/cache.c
*
- * Changes:
- *
- * - Updated existing index macros and sanity check macros to maintain
- * the clean_index_size and dirty_index_size fields of H5C_t. Also
- * added macros to allow us to track entry cleans and dirties.
- *
- * JRM -- 11/5/08
- *
- * - Updated existing index macros and sanity check macros to maintain
- * the index_ring_len, index_ring_size, clean_index_ring_size, and
- * dirty_index_ring_size fields of H5C_t.
- *
- * JRM -- 9/1/15
- *
- * - Updated existing index macros and sanity checks macros to
- * maintain an doubly linked list of all entries in the index.
- * This is necessary to reduce the computational cost of visiting
- * all entries in the index, which used to be done by scanning
- * the hash table.
- *
- * JRM -- 10/15/15
- *
***********************************************************************/
/* H5C__HASH_TABLE_LEN is defined in H5Cpkg.h. It mut be a power of two. */
@@ -1518,9 +1413,6 @@ if ( ( (cache_ptr)->index_size != \
*
* Skip list insertion and deletion macros:
*
- * These used to be functions, but I converted them to macros to avoid some
- * function call overhead.
- *
**************************************************************************/
/*-------------------------------------------------------------------------
@@ -1535,56 +1427,6 @@ if ( ( (cache_ptr)->index_size != \
*
* Programmer: John Mainzer, 5/10/04
*
- * Modifications:
- *
- * JRM -- 7/21/04
- * Updated function to set the in_tree flag when inserting
- * an entry into the tree. Also modified the function to
- * update the tree size and len fields instead of the similar
- * index fields.
- *
- * All of this is part of the modifications to support the
- * hash table.
- *
- * JRM -- 7/27/04
- * Converted the function H5C_insert_entry_in_tree() into
- * the macro H5C__INSERT_ENTRY_IN_TREE in the hopes of
- * wringing a little more speed out of the cache.
- *
- * Note that we don't bother to check if the entry is already
- * in the tree -- if it is, H5SL_insert() will fail.
- *
- * QAK -- 11/27/04
- * Switched over to using skip list routines.
- *
- * JRM -- 6/27/06
- * Added fail_val parameter.
- *
- * JRM -- 8/25/06
- * Added the H5C_DO_SANITY_CHECKS version of the macro.
- *
- * This version maintains the slist_len_increase and
- * slist_size_increase fields that are used in sanity
- * checks in the flush routines.
- *
- * All this is needed as the fractal heap needs to be
- * able to dirty, resize and/or move entries during the
- * flush.
- *
- * JRM -- 12/13/14
- * Added code to set cache_ptr->slist_changed to TRUE
- * when an entry is inserted in the slist.
- *
- * JRM -- 9/1/15
- * Added code to maintain the cache_ptr->slist_ring_len
- * and cache_ptr->slist_ring_size arrays.
- *
- * JRM -- 4/29/20
- * Reworked macro to support the slist_enabled field
- * of H5C_t. If slist_enabled == TRUE, the macro
- * functions as before. Otherwise, the macro is a no-op,
- * and the slist must be empty.
- *
*-------------------------------------------------------------------------
*/
@@ -1716,33 +1558,6 @@ if ( ( (cache_ptr)->index_size != \
*
* Programmer: John Mainzer, 5/10/04
*
- * Modifications:
- *
- * JRM -- 7/21/04
- * Updated function for the addition of the hash table.
- *
- * JRM - 7/27/04
- * Converted from the function H5C_remove_entry_from_tree()
- * to the macro H5C__REMOVE_ENTRY_FROM_TREE in the hopes of
- * wringing a little more performance out of the cache.
- *
- * QAK -- 11/27/04
- * Switched over to using skip list routines.
- *
- * JRM -- 3/28/07
- * Updated sanity checks for the new is_read_only and
- * ro_ref_count fields in H5C_cache_entry_t.
- *
- * JRM -- 12/13/14
- * Added code to set cache_ptr->slist_changed to TRUE
- * when an entry is removed from the slist.
- *
- * JRM -- 4/29/20
- * Reworked macro to support the slist_enabled field
- * of H5C_t. If slist_enabled == TRUE, the macro
- * functions as before. Otherwise, the macro is a no-op,
- * and the slist must be empty.
- *
*-------------------------------------------------------------------------
*/
@@ -1853,33 +1668,6 @@ if ( ( (cache_ptr)->index_size != \
*
* Programmer: John Mainzer, 9/07/05
*
- * Modifications:
- *
- * JRM -- 8/27/06
- * Added the H5C_DO_SANITY_CHECKS version of the macro.
- *
- * This version maintains the slist_size_increase field
- * that are used in sanity checks in the flush routines.
- *
- * All this is needed as the fractal heap needs to be
- * able to dirty, resize and/or move entries during the
- * flush.
- *
- * JRM -- 12/13/14
- * Note that we do not set cache_ptr->slist_changed to TRUE
- * in this case, as the structure of the slist is not
- * modified.
- *
- * JRM -- 9/1/15
- * Added code to maintain the cache_ptr->slist_ring_len
- * and cache_ptr->slist_ring_size arrays.
- *
- * JRM -- 4/29/20
- * Reworked macro to support the slist_enabled field
- * of H5C_t. If slist_enabled == TRUE, the macro
- * functions as before. Otherwise, the macro is a no-op,
- * and the slist must be empty.
- *
*-------------------------------------------------------------------------
*/
@@ -1976,9 +1764,6 @@ if ( ( (cache_ptr)->index_size != \
*
* Replacement policy update macros:
*
- * These used to be functions, but I converted them to macros to avoid some
- * function call overhead.
- *
**************************************************************************/
/*-------------------------------------------------------------------------
@@ -2000,18 +1785,6 @@ if ( ( (cache_ptr)->index_size != \
*
* Programmer: John Mainzer, 10/13/05
*
- * Modifications:
- *
- * JRM -- 3/20/06
- * Modified macro to ignore pinned entries. Pinned entries
- * do not appear in the data structures maintained by the
- * replacement policy code, and thus this macro has nothing
- * to do if called for such an entry.
- *
- * JRM -- 3/28/07
- * Added sanity checks using the new is_read_only and
- * ro_ref_count fields of struct H5C_cache_entry_t.
- *
*-------------------------------------------------------------------------
*/
@@ -2130,30 +1903,6 @@ if ( ( (cache_ptr)->index_size != \
*
* Programmer: John Mainzer, 5/10/04
*
- * Modifications:
- *
- * JRM - 7/27/04
- * Converted the function H5C_update_rp_for_eviction() to the
- * macro H5C__UPDATE_RP_FOR_EVICTION in an effort to squeeze
- * a bit more performance out of the cache.
- *
- * At least for the first cut, I am leaving the comments and
- * white space in the macro. If they cause difficulties with
- * the pre-processor, I'll have to remove them.
- *
- * JRM - 7/28/04
- * Split macro into two version, one supporting the clean and
- * dirty LRU lists, and the other not. Yet another attempt
- * at optimization.
- *
- * JRM - 3/20/06
- * Pinned entries can't be evicted, so this entry should never
- * be called on a pinned entry. Added assert to verify this.
- *
- * JRM -- 3/28/07
- * Added sanity checks for the new is_read_only and
- * ro_ref_count fields of struct H5C_cache_entry_t.
- *
*-------------------------------------------------------------------------
*/
@@ -2241,32 +1990,6 @@ if ( ( (cache_ptr)->index_size != \
*
* Programmer: John Mainzer, 5/6/04
*
- * Modifications:
- *
- * JRM - 7/27/04
- * Converted the function H5C_update_rp_for_flush() to the
- * macro H5C__UPDATE_RP_FOR_FLUSH in an effort to squeeze
- * a bit more performance out of the cache.
- *
- * At least for the first cut, I am leaving the comments and
- * white space in the macro. If they cause difficulties with
- * pre-processor, I'll have to remove them.
- *
- * JRM - 7/28/04
- * Split macro into two versions, one supporting the clean and
- * dirty LRU lists, and the other not. Yet another attempt
- * at optimization.
- *
- * JRM - 3/20/06
- * While pinned entries can be flushed, they don't reside in
- * the replacement policy data structures when unprotected.
- * Thus I modified this macro to do nothing if the entry is
- * pinned.
- *
- * JRM - 3/28/07
- * Added sanity checks based on the new is_read_only and
- * ro_ref_count fields of struct H5C_cache_entry_t.
- *
*-------------------------------------------------------------------------
*/
@@ -2499,34 +2222,6 @@ if ( ( (cache_ptr)->index_size != \
*
* Programmer: John Mainzer, 5/17/04
*
- * Modifications:
- *
- * JRM - 7/27/04
- * Converted the function H5C_update_rp_for_insertion() to the
- * macro H5C__UPDATE_RP_FOR_INSERTION in an effort to squeeze
- * a bit more performance out of the cache.
- *
- * At least for the first cut, I am leaving the comments and
- * white space in the macro. If they cause difficulties with
- * pre-processor, I'll have to remove them.
- *
- * JRM - 7/28/04
- * Split macro into two version, one supporting the clean and
- * dirty LRU lists, and the other not. Yet another attempt
- * at optimization.
- *
- * JRM - 3/10/06
- * This macro should never be called on a pinned entry.
- * Inserted an assert to verify this.
- *
- * JRM - 8/9/06
- * Not any more. We must now allow insertion of pinned
- * entries. Updated macro to support this.
- *
- * JRM - 3/28/07
- * Added sanity checks using the new is_read_only and
- * ro_ref_count fields of struct H5C_cache_entry_t.
- *
*-------------------------------------------------------------------------
*/
@@ -2637,31 +2332,6 @@ if ( ( (cache_ptr)->index_size != \
*
* Programmer: John Mainzer, 5/17/04
*
- * Modifications:
- *
- * JRM - 7/27/04
- * Converted the function H5C_update_rp_for_protect() to the
- * macro H5C__UPDATE_RP_FOR_PROTECT in an effort to squeeze
- * a bit more performance out of the cache.
- *
- * At least for the first cut, I am leaving the comments and
- * white space in the macro. If they cause difficulties with
- * pre-processor, I'll have to remove them.
- *
- * JRM - 7/28/04
- * Split macro into two version, one supporting the clean and
- * dirty LRU lists, and the other not. Yet another attempt
- * at optimization.
- *
- * JRM - 3/17/06
- * Modified macro to attempt to remove pinned entriese from
- * the pinned entry list instead of from the data structures
- * maintained by the replacement policy.
- *
- * JRM - 3/28/07
- * Added sanity checks based on the new is_read_only and
- * ro_ref_count fields of struct H5C_cache_entry_t.
- *
*-------------------------------------------------------------------------
*/
@@ -2927,12 +2597,6 @@ if ( ( (cache_ptr)->index_size != \
*
* Programmer: John Mainzer, 8/23/06
*
- * Modifications:
- *
- * JRM -- 3/28/07
- * Added sanity checks based on the new is_read_only and
- * ro_ref_count fields of struct H5C_cache_entry_t.
- *
*-------------------------------------------------------------------------
*/
@@ -3060,12 +2724,6 @@ if ( ( (cache_ptr)->index_size != \
*
* Programmer: John Mainzer, 3/22/06
*
- * Modifications:
- *
- * JRM -- 3/28/07
- * Added sanity checks based on the new is_read_only and
- * ro_ref_count fields of struct H5C_cache_entry_t.
- *
*-------------------------------------------------------------------------
*/
@@ -3181,27 +2839,6 @@ if ( ( (cache_ptr)->index_size != \
*
* Programmer: John Mainzer, 5/19/04
*
- * Modifications:
- *
- * JRM - 7/27/04
- * Converted the function H5C_update_rp_for_unprotect() to
- * the macro H5C__UPDATE_RP_FOR_UNPROTECT in an effort to
- * squeeze a bit more performance out of the cache.
- *
- * At least for the first cut, I am leaving the comments and
- * white space in the macro. If they cause difficulties with
- * pre-processor, I'll have to remove them.
- *
- * JRM - 7/28/04
- * Split macro into two version, one supporting the clean and
- * dirty LRU lists, and the other not. Yet another attempt
- * at optimization.
- *
- * JRM - 3/17/06
- * Modified macro to put pinned entries on the pinned entry
- * list instead of inserting them in the data structures
- * maintained by the replacement policy.
- *
*-------------------------------------------------------------------------
*/
@@ -3608,24 +3245,9 @@ typedef struct H5C_tag_info_t {
* While the cache was designed with multiple replacement policies in mind,
* at present only a modified form of LRU is supported.
*
- * JRM - 4/26/04
- *
- * Profiling has indicated that searches in the instance of H5TB_TREE are
- * too expensive. To deal with this issue, I have augmented the cache
- * with a hash table in which all entries will be stored. Given the
- * advantages of flushing entries in increasing address order, the TBBT
- * is retained, but only dirty entries are stored in it. At least for
- * now, we will leave entries in the TBBT after they are flushed.
- *
- * Note that index_size and index_len now refer to the total size of
- * and number of entries in the hash table.
- *
- * JRM - 7/19/04
- *
- * The TBBT has since been replaced with a skip list. This change
- * greatly predates this note.
- *
- * JRM - 9/26/05
+ * The cache has a hash table in which all entries are stored. Given the
+ * advantages of flushing entries in increasing address order, a skip list
+ * is used to track dirty entries.
*
* magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC.
* This field is used to validate pointers to instances of
@@ -3719,13 +3341,8 @@ typedef struct H5C_tag_info_t {
* The cache requires an index to facilitate searching for entries. The
* following fields support that index.
*
- * Addendum: JRM -- 10/14/15
- *
- * We sometimes need to visit all entries in the cache. In the past, this
- * was done by scanning the hash table. However, this is expensive, and
- * we have come to scan the hash table often enough that it has become a
- * performance issue. To repair this, I have added code to maintain a
- * list of all entries in the index -- call this list the index list.
+ * We sometimes need to visit all entries in the cache, they are stored in
+ * the index list.
*
* The index list is maintained by the same macros that maintain the
* index, and must have the same length and size as the index proper.
@@ -3759,12 +3376,10 @@ typedef struct H5C_tag_info_t {
* dirty_index_size == index_size.
*
* WARNING:
- *
- * The value of the clean_index_size must not be mistaken
- * for the current clean size of the cache. Rather, the
- * clean size of the cache is the current value of
- * clean_index_size plus the amount of empty space (if any)
- * in the cache.
+ * The value of the clean_index_size must not be mistaken for
+ * the current clean size of the cache. Rather, the clean size
+ * of the cache is the current value of clean_index_size plus
+ * the amount of empty space (if any) in the cache.
*
* clean_index_ring_size: Array of size_t of length H5C_RING_NTYPES used to
* maintain the sum of the sizes of all clean entries in the
@@ -3786,7 +3401,7 @@ typedef struct H5C_tag_info_t {
* H5C__HASH_TABLE_LEN. At present, this value is a power
* of two, not the usual prime number.
*
- * I hope that the variable size of cache elements, the large
+ * Hopefully the variable size of cache elements, the large
* hash table size, and the way in which HDF5 allocates space
* will combine to avoid problems with periodicity. If so, we
* can use a trivial hash function (a bit-and and a 3 bit left
@@ -3827,11 +3442,10 @@ typedef struct H5C_tag_info_t {
* This field is NULL if the index is empty.
*
*
- * With the addition of the take ownership flag, it is possible that
- * an entry may be removed from the cache as the result of the flush of
- * a second entry. In general, this causes little trouble, but it is
- * possible that the entry removed may be the next entry in the scan of
- * a list. In this case, we must be able to detect the fact that the
+ * It is possible that an entry may be removed from the cache as the result
+ * of the flush of a second entry. In general, this causes little trouble,
+ * but it is possible that the entry removed may be the next entry in the
+ * scan of a list. In this case, we must be able to detect the fact that the
* entry has been removed, so that the scan doesn't attempt to proceed with
* an entry that is no longer in the cache.
*
@@ -3859,29 +3473,19 @@ typedef struct H5C_tag_info_t {
* one.
*
* entry_watched_for_removal: Pointer to an instance of H5C_cache_entry_t
- * which contains the 'next' entry for an iteration. Removing
- * this entry must trigger a rescan of the iteration, so each
- * entry removed from the cache is compared against this pointer
- * and the pointer is reset to NULL if the watched entry is
- * removed.
- * (This functions similarly to a "dead man's switch")
+ * which contains the 'next' entry for an iteration. Removing
+ * this entry must trigger a rescan of the iteration, so each
+ * entry removed from the cache is compared against this pointer
+ * and the pointer is reset to NULL if the watched entry is
+ * removed. (This functions similarly to a "dead man's switch")
*
*
* When we flush the cache, we need to write entries out in increasing
* address order. An instance of a skip list is used to store dirty entries in
- * sorted order. Whether it is cheaper to sort the dirty entries as needed,
- * or to maintain the list is an open question. At a guess, it depends
- * on how frequently the cache is flushed. We will see how it goes.
- *
- * For now at least, I will not remove dirty entries from the list as they
- * are flushed. (this has been changed -- dirty entries are now removed from
- * the skip list as they are flushed. JRM - 10/25/05)
- *
- * Update 4/21/20:
+ * sorted order.
*
- * Profiling indicates that the cost of maintaining the skip list is
- * significant. As it is only used on flush and close, maintaining it
- * only when needed is an obvious optimization.
+ * The cost of maintaining the skip list is significant. As it is only used
+ * on flush and close, it is maintained only when needed.
*
* To do this, we add a flag to control maintenanace of the skip list.
* This flag is initially set to FALSE, which disables all operations
@@ -3940,30 +3544,21 @@ typedef struct H5C_tag_info_t {
* order, which results in significant savings.
*
* b) It facilitates checking for adjacent dirty entries when
- * attempting to evict entries from the cache. While we
- * don't use this at present, I hope that this will allow
- * some optimizations when I get to it.
+ * attempting to evict entries from the cache.
*
* num_last_entries: The number of entries in the cache that can only be
* flushed after all other entries in the cache have
- * been flushed. At this time, this will only ever be
- * one entry (the superblock), and the code has been
- * protected with HDasserts to enforce this. This restraint
- * can certainly be relaxed in the future if the need for
- * multiple entries being flushed last arises, though
- * explicit tests for that case should be added when said
- * HDasserts are removed.
- *
- * Update: There are now two possible last entries
- * (superblock and file driver info message). This
- * number will probably increase as we add superblock
- * messages. JRM -- 11/18/14
- *
- * With the addition of the fractal heap, the cache must now deal with
- * the case in which entries may be dirtied, moved, or have their sizes
- * changed during a flush. To allow sanity checks in this situation, the
- * following two fields have been added. They are only compiled in when
- * H5C_DO_SANITY_CHECKS is TRUE.
+ * been flushed.
+ *
+ * Note: At this time, the this field will only be applied to
+ * two types of entries: the superblock and the file driver info
+ * message. The code utilizing these flags is protected with
+ * HDasserts to enforce this.
+ *
+ * The cache must deal with the case in which entries may be dirtied, moved,
+ * or have their sizes changed during a flush. To allow sanity checks in this
+ * situation, the following two fields have been added. They are only
+ * compiled in when H5C_DO_SANITY_CHECKS is TRUE.
*
* slist_len_increase: Number of entries that have been added to the
* slist since the last time this field was set to zero.
@@ -4020,8 +3615,8 @@ typedef struct H5C_tag_info_t {
*
*
* For very frequently used entries, the protect/unprotect overhead can
- * become burdensome. To avoid this overhead, I have modified the cache
- * to allow entries to be "pinned". A pinned entry is similar to a
+ * become burdensome. To avoid this overhead, the cache
+ * allows entries to be "pinned". A pinned entry is similar to a
* protected entry, in the sense that it cannot be evicted, and that
* the entry can be modified at any time.
*
@@ -4072,29 +3667,15 @@ typedef struct H5C_tag_info_t {
* The cache must have a replacement policy, and the fields supporting this
* policy must be accessible from this structure.
*
- * While there has been interest in several replacement policies for
- * this cache, the initial development schedule is tight. Thus I have
- * elected to support only a modified LRU (least recently used) policy
- * for the first cut.
- *
- * To further simplify matters, I have simply included the fields needed
- * by the modified LRU in this structure. When and if we add support for
- * other policies, it will probably be easiest to just add the necessary
- * fields to this structure as well -- we only create one instance of this
- * structure per file, so the overhead is not excessive.
- *
- *
* Fields supporting the modified LRU policy:
*
- * See most any OS text for a discussion of the LRU replacement policy.
- *
* When operating in parallel mode, we must ensure that a read does not
* cause a write. If it does, the process will hang, as the write will
* be collective and the other processes will not know to participate.
*
- * To deal with this issue, I have modified the usual LRU policy by adding
+ * To deal with this issue, the usual LRU policy has been modified by adding
* clean and dirty LRU lists to the usual LRU list. In general, these
- * lists are only exist in parallel builds.
+ * lists only exist in parallel builds.
*
* The clean LRU list is simply the regular LRU list with all dirty cache
* entries removed.
@@ -4191,7 +3772,7 @@ typedef struct H5C_tag_info_t {
* While the default cache size is adequate for most cases, we can run into
* cases where the default is too small. Ideally, we will let the user
* adjust the cache size as required. However, this is not possible in all
- * cases. Thus I have added automatic cache size adjustment code.
+ * cases, so the cache has automatic cache size adjustment code.
*
* The configuration for the automatic cache size adjustment is stored in
* the structure described below:
@@ -4222,10 +3803,9 @@ typedef struct H5C_tag_info_t {
*
* resize_enabled: This is another convenience flag which is set whenever
* a new set of values for resize_ctl are provided. Very
- * simply,
+ * simply:
*
- * resize_enabled = size_increase_possible ||
- * size_decrease_possible;
+ * resize_enabled = size_increase_possible || size_decrease_possible;
*
* cache_full: Boolean flag used to keep track of whether the cache is
* full, so we can refrain from increasing the size of a
@@ -4248,11 +3828,6 @@ typedef struct H5C_tag_info_t {
* and to prevent the infinite recursion that would otherwise
* occur.
*
- * Note that this issue is not hypothetical -- this field
- * was added 12/29/15 to fix a bug exposed in the testing
- * of changes to the file driver info superblock extension
- * management code needed to support rings.
- *
* msic_in_progress: As the metadata cache has become re-entrant, and as
* the free space manager code has become more tightly
* integrated with the metadata cache, it is possible that
@@ -4265,11 +3840,6 @@ typedef struct H5C_tag_info_t {
* and prevent the infinite regression that would otherwise
* occur.
*
- * Note that this is issue is not hypothetical -- this field
- * was added 2/16/17 to address this issue when it was
- * exposed by modifications to test/fheap.c to cause it to
- * use paged allocation.
- *
* resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration
* data for automatic cache resizing.
*
@@ -4362,8 +3932,8 @@ typedef struct H5C_tag_info_t {
* call to H5C_protect().
*
* image_loaded: Boolean flag indicating that the metadata cache has
- * loaded the metadata cache image as directed by the
- * MDC cache image superblock extension message.
+ * loaded the metadata cache image as directed by the
+ * MDC cache image superblock extension message.
*
* delete_image: Boolean flag indicating whether the metadata cache image
* superblock message should be deleted and the cache image
@@ -4476,11 +4046,11 @@ typedef struct H5C_tag_info_t {
* free space manager metadata.
*
* mdfsm_settled: Boolean flag indicating whether the meta data free space
- * manager is settled -- i.e. whether the correct space has
- * been allocated for it in the file.
+ * manager is settled -- i.e. whether the correct space has
+ * been allocated for it in the file.
*
- * Note that the name of this field is deceptive. In the
- * multi file case, the flag applies only to free space
+ * Note that the name of this field is deceptive. In the
+ * multi-file case, the flag applies only to free space
* managers that are involved in allocating space for free
* space managers.
*
@@ -4699,16 +4269,16 @@ typedef struct H5C_tag_info_t {
* close, this field should only be set at that time.
*
* images_read: Integer field containing the number of cache images
- * read from file. Note that reading an image is different
- * from loading it -- reading the image means just that,
- * while loading the image refers to decoding it and loading
- * it into the metadata cache.
+ * read from file. Note that reading an image is different
+ * from loading it -- reading the image means just that,
+ * while loading the image refers to decoding it and loading
+ * it into the metadata cache.
*
- * In the serial case, image_read should always equal
- * images_loaded. However, in the parallel case, the
- * image should only be read by process 0. All other
- * processes should receive the cache image via a broadcast
- * from process 0.
+ * In the serial case, image_read should always equal
+ * images_loaded. However, in the parallel case, the
+ * image should only be read by process 0. All other
+ * processes should receive the cache image via a broadcast
+ * from process 0.
*
* images_loaded: Integer field containing the number of cache images
* loaded since the last time statistics were reset.
@@ -4719,21 +4289,19 @@ typedef struct H5C_tag_info_t {
* should only change on those events.
*
* last_image_size: Size of the most recently loaded metadata cache image
- * loaded into the cache, or zero if no image has been
- * loaded.
+ * loaded into the cache, or zero if no image has been loaded.
*
- * At present, at most one cache image can be loaded into
- * the metadata cache for any given file, and this image
- * will be loaded either on the first protect, or on file
- * close if no entry is protected before then.
+ * At present, at most one cache image can be loaded into
+ * the metadata cache for any given file, and this image
+ * will be loaded either on the first protect, or on file
+ * close if no entry is protected before then.
*
*
* Fields for tracking prefetched entries. Note that flushes and evictions
* of prefetched entries are tracked in the flushes and evictions arrays
* discussed above.
*
- * prefetches: Number of prefetched entries that are loaded to the
- * cache.
+ * prefetches: Number of prefetched entries that are loaded to the cache.
*
* dirty_prefetches: Number of dirty prefetched entries that are loaded
* into the cache.
@@ -4741,9 +4309,9 @@ typedef struct H5C_tag_info_t {
* prefetch_hits: Number of prefetched entries that are actually used.
*
*
- * As entries are now capable of moving, loading, dirtying, and deleting
- * other entries in their pre_serialize and serialize callbacks, it has
- * been necessary to insert code to restart scans of lists so as to avoid
+ * Entries may move, load, dirty, and delete
+ * other entries in their pre_serialize and serialize callbacks, there is
+ * code to restart scans of lists so as to avoid
* improper behavior if the next entry in the list is the target of one on
* these operations.
*
@@ -4757,9 +4325,9 @@ typedef struct H5C_tag_info_t {
* entry in the scan.
*
* LRU_scan_restarts: Number of times a scan of the LRU list (that contains
- * calls to H5C__flush_single_entry()) has been restarted to
- * avoid potential issues with change of status of the next
- * entry in the scan.
+ * calls to H5C__flush_single_entry()) has been restarted to
+ * avoid potential issues with change of status of the next
+ * entry in the scan.
*
* index_scan_restarts: Number of times a scan of the index has been
* restarted to avoid potential issues with load, insertion
@@ -4794,14 +4362,14 @@ typedef struct H5C_tag_info_t {
* flushed in the current epoch.
*
* max_size: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum size of any single entry
+ * are used to record the maximum size of any single entry
* with type id equal to the array index that has resided in
* the cache in the current epoch.
*
* max_pins: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum number of times that any single
- * entry with type id equal to the array index that has been
- * marked as pinned in the cache in the current epoch.
+ * are used to record the maximum number of times that any single
+ * entry with type id equal to the array index that has been
+ * marked as pinned in the cache in the current epoch.
*
*
* Fields supporting testing:
@@ -4811,9 +4379,9 @@ typedef struct H5C_tag_info_t {
* the processes mpi rank.
*
* get_entry_ptr_from_addr_counter: Counter used to track the number of
- * times the H5C_get_entry_ptr_from_addr() function has been
- * called successfully. This field is only defined when
- * NDEBUG is not #defined.
+ * times the H5C_get_entry_ptr_from_addr() function has been
+ * called successfully. This field is only defined when
+ * NDEBUG is not #defined.
*
****************************************************************************/
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index 6a661d2..949c3e1 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -42,8 +42,6 @@
/* This sanity checking constant was picked out of the air. Increase
* or decrease it if appropriate. Its purposes is to detect corrupt
* object sizes, so it probably doesn't matter if it is a bit big.
- *
- * JRM - 5/17/04
*/
#define H5C_MAX_ENTRY_SIZE ((size_t)(32 * 1024 * 1024))
@@ -978,8 +976,6 @@ typedef int H5C_ring_t;
*
* The fields of this structure are discussed individually below:
*
- * JRM - 4/26/04
- *
* magic: Unsigned 32 bit integer that must always be set to
* H5C__H5C_CACHE_ENTRY_T_MAGIC when the entry is valid.
* The field must be set to H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC
@@ -1100,15 +1096,9 @@ typedef int H5C_ring_t;
* be unpinned (and possibly unprotected) during the
* flush.
*
- * JRM -- 3/16/06
- *
* in_slist: Boolean flag indicating whether the entry is in the skip list
- * As a general rule, entries are placed in the list when they
- * are marked dirty. However they may remain in the list after
- * being flushed.
- *
- * Update: Dirty entries are now removed from the skip list
- * when they are flushed.
+ * As a general rule, entries are placed in the list when they are
+ * marked dirty.
*
* flush_marker: Boolean flag indicating that the entry is to be flushed
* the next time H5C_flush_cache() is called with the
@@ -1116,24 +1106,13 @@ typedef int H5C_ring_t;
* the entry is flushed for whatever reason.
*
* flush_me_last: Boolean flag indicating that this entry should not be
- * flushed from the cache until all other entries without
- * the flush_me_last flag set have been flushed.
- *
- * Note:
+ * flushed from the cache until all other entries without the
+ * flush_me_last flag set have been flushed.
*
- * At this time, the flush_me_last
- * flag will only be applied to one entry, the superblock,
- * and the code utilizing these flags is protected with HDasserts
- * to enforce this. This restraint can certainly be relaxed in
- * the future if the need for multiple entries getting flushed
- * last or collectively arises, though the code allowing for that
- * will need to be expanded and tested appropriately if that
- * functionality is desired.
- *
- * Update: There are now two possible last entries
- * (superblock and file driver info message). This
- * number will probably increase as we add superblock
- * messages. JRM -- 11/18/14
+ * Note: At this time, the flush_me_last flag will only be applied to
+ * two types of entries: the superblock and the file driver info
+ * message. The code utilizing these flags is protected with
+ * HDasserts to enforce this.
*
* clear_on_unprotect: Boolean flag used only in PHDF5. When H5C is used
* to implement the metadata cache In the parallel case, only
@@ -1228,8 +1207,6 @@ typedef int H5C_ring_t;
* If there are multiple entries in any hash bin, they are stored in a doubly
* linked list.
*
- * Addendum: JRM -- 10/14/15
- *
* We have come to scan all entries in the cache frequently enough that
* the cost of doing so by scanning the hash table has become unacceptable.
* To reduce this cost, the index now also maintains a doubly linked list
@@ -1691,8 +1668,6 @@ typedef struct H5C_cache_entry_t {
*
* The fields of this structure are discussed individually below:
*
- * JRM - 8/5/15
- *
* magic: Unsigned 32 bit integer that must always be set to
* H5C_IMAGE_ENTRY_T_MAGIC when the entry is valid.
* The field must be set to H5C_IMAGE_ENTRY_T_BAD_MAGIC
@@ -1855,7 +1830,7 @@ typedef struct H5C_image_entry_t {
* H5C_auto_size_ctl_t passed to the cache must have a known
* version number, or an error will be flagged.
*
- * report_fcn: Pointer to the function that is to be called to report
+ * rpt_fcn: Pointer to the function that is to be called to report
* activities each time the auto cache resize code is executed. If the
* field is NULL, no call is made.
*
@@ -1978,10 +1953,6 @@ typedef struct H5C_image_entry_t {
* performance, however the above flash increment algorithm will not be
* triggered.
*
- * Hopefully, the add space algorithm detailed above will be sufficient
- * for the performance problems encountered to date. However, we should
- * expect to revisit the issue.
- *
* flash_multiple: Double containing the multiple described above in the
* H5C_flash_incr__add_space section of the discussion of the
* flash_incr_mode section. This field is ignored unless flash_incr_mode
@@ -2048,8 +2019,8 @@ typedef struct H5C_image_entry_t {
* The field is a double containing the multiplier used to derive the
* new cache size from the old if a cache size decrement is triggered.
* The decrement must be in the range 0.0 (in which case the cache will
- * try to contract to its minimum size) to 1.0 (in which case the
- * cache will never shrink).
+ * try to contract to its minimum size) to 1.0 (in which case the
+ * cache will never shrink).
*
* apply_max_decrement: Boolean flag used to determine whether decrements
* in cache size are to be limited by the max_decrement field.
diff --git a/src/H5FD.c b/src/H5FD.c
index fd82217..9de4ad9 100644
--- a/src/H5FD.c
+++ b/src/H5FD.c
@@ -1501,8 +1501,6 @@ done:
*
* Programmer: JRM -- 6/10/20
*
- * Changes: None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1581,8 +1579,6 @@ done:
*
* Programmer: JRM -- 6/10/20
*
- * Changes: None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1678,8 +1674,6 @@ done:
*
* Programmer: NAF -- 5/19/21
*
- * Changes: None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1779,8 +1773,6 @@ done:
*
* Programmer: NAF -- 5/14/21
*
- * Changes: None.
- *
*-------------------------------------------------------------------------
*/
herr_t
diff --git a/src/H5FDint.c b/src/H5FDint.c
index e1cb5ff..c5b8713 100644
--- a/src/H5FDint.c
+++ b/src/H5FDint.c
@@ -346,8 +346,6 @@ done:
*
* Programmer: JRM -- 6/10/20
*
- * Changes: None
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -558,8 +556,6 @@ done:
*
* Programmer: JRM -- 6/10/20
*
- * Changes: None
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -742,8 +738,6 @@ done:
*
* Programmer: NAF -- 5/13/21
*
- * Changes: None
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -1066,8 +1060,6 @@ done:
*
* Programmer: NAF -- 3/29/21
*
- * Changes: None
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1232,8 +1224,6 @@ done:
*
* Programmer: NAF -- 5/19/21
*
- * Changes: None
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1387,8 +1377,6 @@ done:
*
* Programmer: NAF -- 5/13/21
*
- * Changes: None
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -1709,8 +1697,6 @@ done:
*
* Programmer: NAF -- 3/29/21
*
- * Changes: None
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1867,8 +1853,6 @@ done:
*
* Programmer: NAF -- 5/19/21
*
- * Changes: None
- *
*-------------------------------------------------------------------------
*/
herr_t
diff --git a/src/H5FDmpi.c b/src/H5FDmpi.c
index e71c739..b7b7489 100644
--- a/src/H5FDmpi.c
+++ b/src/H5FDmpi.c
@@ -40,16 +40,6 @@
* Programmer: Quincey Koziol
* Friday, January 30, 2004
*
- * Changes: Reworked function to use the ctl callback so we can get
- * rid of H5FD_class_mpi_t. Since there are no real limits
- * on what the ctl callback can do, its file parameter can't
- * be constant. Thus, I had to remove the const qualifier
- * on this functions file parameter as well. Note also the
- * circumlocution required to use the ctl callbacks output
- * parameter to pass back the rank without introducing
- * compiler warnings.
- * JRM -- 8/13/21
- *
*-------------------------------------------------------------------------
*/
int
@@ -92,16 +82,6 @@ done:
* Programmer: Quincey Koziol
* Friday, January 30, 2004
*
- * Changes: Reworked function to use the ctl callback so we can get
- * rid of H5FD_class_mpi_t. Since there are no real limits
- * on what the ctl callback can do, its file parameter can't
- * be constant. Thus, I had to remove the const qualifier
- * on this functions file parameter as well. Note also the
- * circumlocution required to use the ctl callbacks output
- * parameter to pass back the rank without introducing
- * compiler warnings.
- * JRM -- 8/13/21
- *
*-------------------------------------------------------------------------
*/
int
@@ -145,16 +125,6 @@ done:
* Programmer: Quincey Koziol
* Friday, January 30, 2004
*
- * Changes: Reworked function to use the ctl callback so we can get
- * rid of H5FD_class_mpi_t. Since there are no real limits
- * on what the ctl callback can do, its file parameter can't
- * be constant. Thus, I had to remove the const qualifier
- * on this functions file parameter as well. Note also the
- * circumlocution required to use the ctl callbacks output
- * parameter to pass back the rank without introducing
- * compiler warnings.
- * JRM -- 8/13/21
- *
*-------------------------------------------------------------------------
*/
MPI_Comm
diff --git a/src/H5FDsubfiling/H5FDioc_int.c b/src/H5FDsubfiling/H5FDioc_int.c
index 42f088e..ce5a000 100644
--- a/src/H5FDsubfiling/H5FDioc_int.c
+++ b/src/H5FDsubfiling/H5FDioc_int.c
@@ -91,7 +91,6 @@ cast_to_void(const void *data)
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
*-------------------------------------------------------------------------
*/
herr_t
@@ -263,7 +262,6 @@ done:
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
*-------------------------------------------------------------------------
*/
herr_t
diff --git a/src/H5FDsubfiling/H5FDioc_threads.c b/src/H5FDsubfiling/H5FDioc_threads.c
index fd6fc01..abf816d 100644
--- a/src/H5FDsubfiling/H5FDioc_threads.c
+++ b/src/H5FDsubfiling/H5FDioc_threads.c
@@ -105,8 +105,6 @@ static void ioc_io_queue_add_entry(ioc_data_t *ioc_data, sf_work_request_t *wk_r
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
- *
*-------------------------------------------------------------------------
*/
int
@@ -264,8 +262,6 @@ finalize_ioc_threads(void *_sf_context)
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
- *
*-------------------------------------------------------------------------
*/
static HG_THREAD_RETURN_TYPE
@@ -339,7 +335,6 @@ ioc_thread_main(void *arg)
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
*-------------------------------------------------------------------------
*/
static int
@@ -493,8 +488,6 @@ translate_opcode(io_op_t op)
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
- *
*-------------------------------------------------------------------------
*/
static HG_THREAD_RETURN_TYPE
@@ -591,8 +584,6 @@ handle_work_request(void *arg)
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
- *
*-------------------------------------------------------------------------
*/
void
@@ -612,8 +603,6 @@ H5FD_ioc_begin_thread_exclusive(void)
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
- *
*-------------------------------------------------------------------------
*/
void
@@ -679,8 +668,6 @@ from the thread pool threads...
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
- *
*-------------------------------------------------------------------------
*/
static int
@@ -870,8 +857,6 @@ done:
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
- *
*-------------------------------------------------------------------------
*/
static int
@@ -1217,8 +1202,6 @@ done:
* Programmer: John Mainzer
* 7/17/2020
*
- * Changes: Initial Version/None.
- *
*-------------------------------------------------------------------------
*/
@@ -1283,8 +1266,6 @@ done:
*
* Programmer: JRM -- 11/6/21
*
- * Changes: None.
- *
*-------------------------------------------------------------------------
*/
static ioc_io_queue_entry_t *
@@ -1338,8 +1319,6 @@ ioc_io_queue_alloc_entry(void)
*
* Programmer: JRM -- 11/7/21
*
- * Changes: None.
- *
*-------------------------------------------------------------------------
*/
static void
@@ -1461,8 +1440,6 @@ ioc_io_queue_add_entry(ioc_data_t *ioc_data, sf_work_request_t *wk_req_ptr)
*
* Programmer: JRM -- 11/7/21
*
- * Changes: None.
- *
*-------------------------------------------------------------------------
*/
/* TODO: Keep an eye on statistics and optimize this algorithm if necessary. While it is O(N)
@@ -1629,8 +1606,6 @@ ioc_io_queue_dispatch_eligible_entries(ioc_data_t *ioc_data, hbool_t try_lock)
*
* Programmer: JRM -- 11/7/21
*
- * Changes: None.
- *
*-------------------------------------------------------------------------
*/
static void
@@ -1715,8 +1690,6 @@ ioc_io_queue_complete_entry(ioc_data_t *ioc_data, ioc_io_queue_entry_t *entry_pt
*
* Programmer: JRM -- 11/6/21
*
- * Changes: None.
- *
*-------------------------------------------------------------------------
*/
static void
diff --git a/src/H5FDsubfiling/H5FDsubfile_int.c b/src/H5FDsubfiling/H5FDsubfile_int.c
index d4aef35..4c583e8 100644
--- a/src/H5FDsubfiling/H5FDsubfile_int.c
+++ b/src/H5FDsubfiling/H5FDsubfile_int.c
@@ -65,8 +65,6 @@
*
* Programmer: JRM -- 12/13/21
*
- * Changes: None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -280,8 +278,6 @@ done:
*
* Programmer: JRM -- 1/18/22
*
- * Changes: None.
- *
*-------------------------------------------------------------------------
*/
herr_t
diff --git a/src/H5FDsubfiling/H5FDsubfiling.c b/src/H5FDsubfiling/H5FDsubfiling.c
index afdf073..e086190 100644
--- a/src/H5FDsubfiling/H5FDsubfiling.c
+++ b/src/H5FDsubfiling/H5FDsubfiling.c
@@ -429,8 +429,6 @@ done:
* Programmer: John Mainzer
* 9/10/17
*
- * Changes: None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -945,8 +943,6 @@ done:
* Programmer: John Mainzer
* 9/8/17
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static void *
@@ -1028,8 +1024,6 @@ done:
* Programmer: John Mainzer
* 9/8/17
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static void *
@@ -1072,8 +1066,6 @@ done:
* Programmer: John Mainzer
* 9/8/17
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -2055,8 +2047,6 @@ done:
*
* Programmer: RAW -- ??/??/21
*
- * Changes: None.
- *
* Notes: Thus function doesn't actually implement vector read.
* Instead, it comverts the vector read call into a series
* of scalar read calls. Fix this when time permits.
@@ -2219,8 +2209,6 @@ done:
*
* Programmer: RAW -- ??/??/21
*
- * Changes: None.
- *
* Notes: Thus function doesn't actually implement vector write.
* Instead, it comverts the vector write call into a series
* of scalar read calls. Fix this when time permits.
diff --git a/src/H5FDsubfiling/H5subfiling_common.c b/src/H5FDsubfiling/H5subfiling_common.c
index e4dcf25..58f3643 100644
--- a/src/H5FDsubfiling/H5subfiling_common.c
+++ b/src/H5FDsubfiling/H5subfiling_common.c
@@ -596,7 +596,6 @@ done:
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
*-------------------------------------------------------------------------
*/
herr_t
@@ -1709,7 +1708,6 @@ done:
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
*-------------------------------------------------------------------------
*/
static herr_t
@@ -1898,7 +1896,6 @@ done:
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
*-------------------------------------------------------------------------
*/
static herr_t
@@ -1961,8 +1958,6 @@ done:
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -2038,8 +2033,6 @@ done:
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
- *
*-------------------------------------------------------------------------
*/
static void
@@ -2091,8 +2084,6 @@ clear_fid_map_entry(uint64_t file_id, int64_t sf_context_id)
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -2653,7 +2644,6 @@ done:
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
*-------------------------------------------------------------------------
*/
/*-------------------------------------------------------------------------
@@ -2678,7 +2668,6 @@ done:
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
*-------------------------------------------------------------------------
*/
herr_t
@@ -2980,8 +2969,6 @@ done:
* Programmer: Richard Warren
* 7/17/2020
*
- * Changes: Initial Version/None.
- *
*-------------------------------------------------------------------------
*/
int64_t
diff --git a/src/H5detect.c b/src/H5detect.c
index a491343..daf7708 100644
--- a/src/H5detect.c
+++ b/src/H5detect.c
@@ -839,7 +839,7 @@ bit.\n";
fprintf(rawoutstream, " *\t\t\t");
}
- fprintf(rawoutstream, " *\n * Modifications:\n *\n");
+ fprintf(rawoutstream, " *\n");
fprintf(rawoutstream, " *\tDO NOT MAKE MODIFICATIONS TO THIS FILE!\n");
fprintf(rawoutstream, " *\tIt was generated by code in `H5detect.c'.\n");
diff --git a/src/H5make_libsettings.c b/src/H5make_libsettings.c
index a428062..88a6d7d 100644
--- a/src/H5make_libsettings.c
+++ b/src/H5make_libsettings.c
@@ -230,7 +230,7 @@ information about the library build configuration\n";
HDfprintf(rawoutstream, " *\t\t\t");
}
- HDfprintf(rawoutstream, " *\n * Modifications:\n *\n");
+ HDfprintf(rawoutstream, " *\n");
HDfprintf(rawoutstream, " *\tDO NOT MAKE MODIFICATIONS TO THIS FILE!\n");
HDfprintf(rawoutstream, " *\tIt was generated by code in `H5make_libsettings.c'.\n");