From 030543bf0df05153d4189bc8556a6f8506cb0cff Mon Sep 17 00:00:00 2001 From: John Mainzer Date: Mon, 9 Apr 2007 13:58:42 -0500 Subject: [svn-r13618] Modified metadata cache to support multiple read only protects of cache entries. Added test code to test the new feature. Also some minor cleanum h5committested -- passed on copper and sol. Failed on osage with a configuration error that appears unrelated to my changes. Serial test (debug mode) passes on Phoenix (Linux x86 2.6 kernel), so I went ahead with the checkin. --- src/H5AC.c | 41 ++- src/H5C.c | 714 ++++++++++++++++++++++++++++++++++++---------------- src/H5Cpkg.h | 22 ++ src/H5Cprivate.h | 43 +++- test/cache.c | 666 +++++++++++++++++++++++++++++++++++++++++++++++- test/cache_common.c | 307 +++++++++++++++++++++- test/cache_common.h | 12 + 7 files changed, 1579 insertions(+), 226 deletions(-) diff --git a/src/H5AC.c b/src/H5AC.c index 1a32623..720e663 100644 --- a/src/H5AC.c +++ b/src/H5AC.c @@ -1872,6 +1872,14 @@ done: * JRM - 6/6/06 * Added trace file support. * + * JRM - 3/18/07 + * Modified code to support the new flags parameter for + * H5C_protect(). For now, that means passing in the + * H5C_READ_ONLY_FLAG if rw == H5AC_READ. + * + * Also updated the trace file output to save the + * rw parameter, since we are now doing something with it. + * *------------------------------------------------------------------------- */ void * @@ -1881,9 +1889,10 @@ H5AC_protect(H5F_t *f, haddr_t addr, const void *udata1, void *udata2, - H5AC_protect_t UNUSED rw) + H5AC_protect_t rw) { /* char * fcn_name = "H5AC_protect"; */ + unsigned protect_flags = H5C__NO_FLAGS_SET; void * thing = (void *)NULL; void * ret_value; /* Return value */ #if H5AC__TRACE_FILE_ENABLED @@ -1915,12 +1924,33 @@ H5AC_protect(H5F_t *f, ( H5C_get_trace_file_ptr(f->shared->cache, &trace_file_ptr) >= 0 ) && ( trace_file_ptr != NULL ) ) { - sprintf(trace, "H5AC_protect %lx %d", + char * rw_string; + + if ( rw == H5AC_WRITE ) { + + rw_string = "H5AC_WRITE"; + + } else if ( rw == H5AC_READ ) { + + rw_string = "H5AC_READ"; + + } else { + + rw_string = "???"; + } + + sprintf(trace, "H5AC_protect %lx %d %s", (unsigned long)addr, - (int)(type->id)); + (int)(type->id), + rw_string); } #endif /* H5AC__TRACE_FILE_ENABLED */ + if ( rw == H5AC_READ ) { + + protect_flags |= H5C__READ_ONLY_FLAG; + } + thing = H5C_protect(f, dxpl_id, H5AC_noblock_dxpl_id, @@ -1928,7 +1958,8 @@ H5AC_protect(H5F_t *f, type, addr, udata1, - udata2); + udata2, + protect_flags); if ( thing == NULL ) { @@ -2276,7 +2307,7 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr, ( H5C_get_trace_file_ptr(f->shared->cache, &trace_file_ptr) >= 0 ) && ( trace_file_ptr != NULL ) ) { - sprintf(trace, "H5AC_protect %lx %d", + sprintf(trace, "H5AC_unprotect %lx %d", (unsigned long)addr, (int)(type->id)); diff --git a/src/H5C.c b/src/H5C.c index 4410aa4..08ef464 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -537,6 +537,10 @@ if ( ( (entry_ptr) == NULL ) || \ * JRM -- 8/9/06 * More pinned entry stats related updates. * + * JRM -- 3/31/07 + * Updated H5C__UPDATE_STATS_FOR_PROTECT() to keep stats on + * read and write protects. + * ***********************************************************************/ #define H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) \ @@ -686,24 +690,34 @@ if ( ( (entry_ptr) == NULL ) || \ = (entry_ptr)->size; \ } -#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \ - if ( hit ) \ - ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \ - else \ - ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \ - if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ - (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ - if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \ - (cache_ptr)->max_index_size = (cache_ptr)->index_size; \ - if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \ - (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \ - if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \ - (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ - if ( (entry_ptr)->size > \ - ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) { \ - ((cache_ptr)->max_size)[(entry_ptr)->type->id] \ - = (entry_ptr)->size; \ - } \ +#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \ + if ( hit ) \ + ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \ + else \ + ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \ + if ( ! ((entry_ptr)->is_read_only) ) { \ + ((cache_ptr)->write_protects)[(entry_ptr)->type->id]++; \ + } else { \ + ((cache_ptr)->read_protects)[(entry_ptr)->type->id]++; \ + if ( ((entry_ptr)->ro_ref_count) > \ + ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] ) { \ + ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] = \ + ((entry_ptr)->ro_ref_count); \ + } \ + } \ + if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ + (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ + if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \ + (cache_ptr)->max_index_size = (cache_ptr)->index_size; \ + if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \ + (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \ + if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \ + (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ + if ( (entry_ptr)->size > \ + ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) { \ + ((cache_ptr)->max_size)[(entry_ptr)->type->id] \ + = (entry_ptr)->size; \ + } \ ((entry_ptr)->accesses)++; #define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \ @@ -752,18 +766,28 @@ if ( ( (entry_ptr) == NULL ) || \ if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; -#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \ - if ( hit ) \ - ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \ - else \ - ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \ - if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ - (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ - if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \ - (cache_ptr)->max_index_size = (cache_ptr)->index_size; \ - if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \ - (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \ - if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \ +#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \ + if ( hit ) \ + ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \ + else \ + ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \ + if ( ! ((entry_ptr)->is_read_only) ) { \ + ((cache_ptr)->write_protects)[(entry_ptr)->type->id]++; \ + } else { \ + ((cache_ptr)->read_protects)[(entry_ptr)->type->id]++; \ + if ( ((entry_ptr)->ro_ref_count) > \ + ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] ) { \ + ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] = \ + ((entry_ptr)->ro_ref_count); \ + } \ + } \ + if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ + (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ + if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \ + (cache_ptr)->max_index_size = (cache_ptr)->index_size; \ + if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \ + (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \ + if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \ (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; #define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \ @@ -1184,6 +1208,10 @@ if ( ( (cache_ptr) == NULL ) || \ * QAK -- 11/27/04 * Switched over to using skip list routines. * + * JRM -- 3/28/07 + * Updated sanity checks for the new is_read_only and + * ro_ref_count fields in H5C_cache_entry_t. + * *------------------------------------------------------------------------- */ @@ -1193,6 +1221,8 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ HDassert( (entry_ptr)->in_slist ); \ HDassert( (cache_ptr)->slist_ptr ); \ @@ -1321,6 +1351,10 @@ if ( ( (cache_ptr) == NULL ) || \ * replacement policy code, and thus this macro has nothing * to do if called for such an entry. * + * JRM -- 3/28/07 + * Added sanity checks using the new is_read_only and + * ro_ref_count fields of struct H5C_cache_entry_t. + * *------------------------------------------------------------------------- */ @@ -1332,6 +1366,8 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ \ if ( ! ((entry_ptr)->is_pinned) ) { \ @@ -1393,6 +1429,8 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ \ if ( ! ((entry_ptr)->is_pinned) ) { \ @@ -1455,6 +1493,10 @@ if ( ( (cache_ptr) == NULL ) || \ * Pinned entries can't be evicted, so this entry should never * be called on a pinned entry. Added assert to verify this. * + * JRM -- 3/28/07 + * Added sanity checks for the new is_read_only and + * ro_ref_count fields of struct H5C_cache_entry_t. + * *------------------------------------------------------------------------- */ @@ -1466,6 +1508,8 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( !((entry_ptr)->is_pinned) ); \ HDassert( (entry_ptr)->size > 0 ); \ \ @@ -1506,6 +1550,8 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( !((entry_ptr)->is_pinned) ); \ HDassert( (entry_ptr)->size > 0 ); \ \ @@ -1560,6 +1606,10 @@ if ( ( (cache_ptr) == NULL ) || \ * Thus I modified this macro to do nothing if the entry is * pinned. * + * JRM - 3/28/07 + * Added sanity checks based on the new is_read_only and + * ro_ref_count fields of struct H5C_cache_entry_t. + * *------------------------------------------------------------------------- */ @@ -1571,6 +1621,8 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ \ if ( ! ((entry_ptr)->is_pinned) ) { \ @@ -1631,6 +1683,8 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ \ if ( ! ((entry_ptr)->is_pinned) ) { \ @@ -1698,6 +1752,10 @@ if ( ( (cache_ptr) == NULL ) || \ * Not any more. We must now allow insertion of pinned * entries. Updated macro to support this. * + * JRM - 3/28/07 + * Added sanity checks using the new is_read_only and + * ro_ref_count fields of struct H5C_cache_entry_t. + * *------------------------------------------------------------------------- */ @@ -1709,6 +1767,8 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ \ if ( (entry_ptr)->is_pinned ) { \ @@ -1757,6 +1817,8 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ \ if ( (entry_ptr)->is_pinned ) { \ @@ -1825,6 +1887,10 @@ if ( ( (cache_ptr) == NULL ) || \ * the pinned entry list instead of from the data structures * maintained by the replacement policy. * + * JRM - 3/28/07 + * Added sanity checks based on the new is_read_only and + * ro_ref_count fields of struct H5C_cache_entry_t. + * *------------------------------------------------------------------------- */ @@ -1836,6 +1902,8 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ \ if ( (entry_ptr)->is_pinned ) { \ @@ -1896,6 +1964,8 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ \ if ( (entry_ptr)->is_pinned ) { \ @@ -1981,6 +2051,10 @@ if ( ( (cache_ptr) == NULL ) || \ * in the replacement policy data structures, so there is * nothing to be done. * + * JRM - 3/28/07 + * Added sanity checks using the new is_read_only and + * ro_ref_count fields of struct H5C_cache_entry_t. + * *------------------------------------------------------------------------- */ @@ -1992,6 +2066,8 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ \ if ( ! ((entry_ptr)->is_pinned) ) { \ @@ -2060,6 +2136,8 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ \ if ( ! ((entry_ptr)->is_pinned) ) { \ @@ -2111,7 +2189,9 @@ if ( ( (cache_ptr) == NULL ) || \ * * Modifications: * - * None. + * JRM -- 3/28/07 + * Added sanity checks based on the new is_read_only and + * ro_ref_count fields of struct H5C_cache_entry_t. * *------------------------------------------------------------------------- */ @@ -2124,6 +2204,8 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ HDassert( new_size > 0 ); \ \ @@ -2178,6 +2260,8 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ HDassert( new_size > 0 ); \ \ @@ -2229,7 +2313,9 @@ if ( ( (cache_ptr) == NULL ) || \ * * Modifications: * - * None. + * JRM -- 3/28/07 + * Added sanity checks based on the new is_read_only and + * ro_ref_count fields of struct H5C_cache_entry_t. * *------------------------------------------------------------------------- */ @@ -2241,7 +2327,9 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr) ); \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ - HDassert( ! ((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->is_pinned); \ HDassert( (entry_ptr)->size > 0 ); \ \ @@ -2291,7 +2379,9 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( (cache_ptr) ); \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ - HDassert( ! ((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->is_pinned); \ HDassert( (entry_ptr)->size > 0 ); \ \ @@ -2741,6 +2831,10 @@ done: * for sanity checking in the flush process, and are not * compiled in unless H5C_DO_SANITY_CHECKS is TRUE. * + * JRM -- 3/28/07 + * Added initialization for the new is_read_only and + * ro_ref_count fields. + * *------------------------------------------------------------------------- */ @@ -2892,27 +2986,30 @@ H5C_create(size_t max_cache_size, for ( i = 0; i < H5C__MAX_EPOCH_MARKERS; i++ ) { - (cache_ptr->epoch_marker_active)[i] = FALSE; - - ((cache_ptr->epoch_markers)[i]).addr = (haddr_t)i; - ((cache_ptr->epoch_markers)[i]).size = (size_t)0; - ((cache_ptr->epoch_markers)[i]).type = &epoch_marker_class; - ((cache_ptr->epoch_markers)[i]).is_dirty = FALSE; - ((cache_ptr->epoch_markers)[i]).dirtied = FALSE; - ((cache_ptr->epoch_markers)[i]).is_protected = FALSE; - ((cache_ptr->epoch_markers)[i]).is_pinned = FALSE; - ((cache_ptr->epoch_markers)[i]).in_slist = FALSE; - ((cache_ptr->epoch_markers)[i]).ht_next = NULL; - ((cache_ptr->epoch_markers)[i]).ht_prev = NULL; - ((cache_ptr->epoch_markers)[i]).next = NULL; - ((cache_ptr->epoch_markers)[i]).prev = NULL; - ((cache_ptr->epoch_markers)[i]).aux_next = NULL; - ((cache_ptr->epoch_markers)[i]).aux_prev = NULL; + (cache_ptr->epoch_marker_active)[i] = FALSE; + + ((cache_ptr->epoch_markers)[i]).addr = (haddr_t)i; + ((cache_ptr->epoch_markers)[i]).size = (size_t)0; + ((cache_ptr->epoch_markers)[i]).type = &epoch_marker_class; + ((cache_ptr->epoch_markers)[i]).is_dirty = FALSE; + ((cache_ptr->epoch_markers)[i]).dirtied = FALSE; + ((cache_ptr->epoch_markers)[i]).is_protected = FALSE; + ((cache_ptr->epoch_markers)[i]).is_read_only = FALSE; + ((cache_ptr->epoch_markers)[i]).ro_ref_count = 0; + ((cache_ptr->epoch_markers)[i]).max_ro_ref_count = 0; + ((cache_ptr->epoch_markers)[i]).is_pinned = FALSE; + ((cache_ptr->epoch_markers)[i]).in_slist = FALSE; + ((cache_ptr->epoch_markers)[i]).ht_next = NULL; + ((cache_ptr->epoch_markers)[i]).ht_prev = NULL; + ((cache_ptr->epoch_markers)[i]).next = NULL; + ((cache_ptr->epoch_markers)[i]).prev = NULL; + ((cache_ptr->epoch_markers)[i]).aux_next = NULL; + ((cache_ptr->epoch_markers)[i]).aux_prev = NULL; #if H5C_COLLECT_CACHE_ENTRY_STATS - ((cache_ptr->epoch_markers)[i]).accesses = 0; - ((cache_ptr->epoch_markers)[i]).clears = 0; - ((cache_ptr->epoch_markers)[i]).flushes = 0; - ((cache_ptr->epoch_markers)[i]).pins = 0; + ((cache_ptr->epoch_markers)[i]).accesses = 0; + ((cache_ptr->epoch_markers)[i]).clears = 0; + ((cache_ptr->epoch_markers)[i]).flushes = 0; + ((cache_ptr->epoch_markers)[i]).pins = 0; #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ } @@ -3707,6 +3804,12 @@ done: * leave it in (albeit commented out for now). If we can't * find a case where it helps, lets get rid of it. * + * + * Added some sanity checks to the change which verify the + * expected values of the new is_read_only and ro_ref_count + * fields. + * JRM - 3/29/07 + * *------------------------------------------------------------------------- */ herr_t @@ -3824,6 +3927,8 @@ H5C_flush_to_min_clean(H5F_t * f, ( entry_ptr != NULL ) ) { HDassert( ! (entry_ptr->is_protected) ); + HDassert( ! (entry_ptr->is_read_only) ); + HDassert( entry_ptr->ro_ref_count == 0 ); HDassert( entry_ptr->is_dirty ); HDassert( entry_ptr->in_slist ); @@ -4270,6 +4375,10 @@ done: * Added initialization for the new flush_in_progress and * destroy_in_progress fields. * + * JRM -- 3/29/07 + * Added initialization for the new is_read_only and + * ro_ref_count fields. + * *------------------------------------------------------------------------- */ @@ -4458,6 +4567,9 @@ H5C_insert_entry(H5F_t * f, */ entry_ptr->is_protected = FALSE; + entry_ptr->is_read_only = FALSE; + entry_ptr->ro_ref_count = 0; + entry_ptr->max_ro_ref_count = 0; /* JRM - delete this when possible */ entry_ptr->is_pinned = insert_pinned; @@ -4932,7 +5044,13 @@ done: * * Modifications: * - * None + * JRM -- 3/29/07 + * Added sanity check to verify that the pinned entry + * is not protected read only. + * + * This sanity check is commented out for now -- uncomment + * it once we deal with the problem of entries being protected + * read only, and then dirtied. * *------------------------------------------------------------------------- */ @@ -4952,7 +5070,9 @@ H5C_mark_pinned_or_protected_entry_dirty(H5C_t * cache_ptr, entry_ptr = (H5C_cache_entry_t *)thing; if ( entry_ptr->is_protected ) { - +#if 0 /* JRM - uncomment this when possible */ + HDassert( ! ((entry_ptr)->is_read_only) ); +#endif /* set the dirtied flag */ entry_ptr->dirtied = TRUE; @@ -5300,6 +5420,11 @@ done: * Added conditional compile to avoid unused parameter * warning in production compile. * + * JRM -- 4/4/07 + * Fixed typo -- canged macro call to + * H5C__UPDATE_STATS_FOR_UNPIN to call to + * H5C__UPDATE_STATS_FOR_PIN. + * *------------------------------------------------------------------------- */ #ifndef NDEBUG @@ -5337,7 +5462,7 @@ H5C_pin_protected_entry(H5C_t UNUSED * cache_ptr, entry_ptr->is_pinned = TRUE; - H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) + H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) done: @@ -5414,6 +5539,14 @@ done: * disk. This is necessary as a bug fix in the object * header code requires us to modify a header as it is read. * + * JRM -- 3/28/07 + * Added the flags parameter and supporting code. At least + * for now, this parameter is used to allow the entry to + * be protected read only, thus allowing multiple protects. + * + * Also added code to allow multiple read only protects + * of cache entries. + * *------------------------------------------------------------------------- */ @@ -5425,11 +5558,13 @@ H5C_protect(H5F_t * f, const H5C_class_t * type, haddr_t addr, const void * udata1, - void * udata2) + void * udata2, + unsigned flags) { hbool_t hit; hbool_t first_flush; hbool_t have_write_permitted = FALSE; + hbool_t read_only = FALSE; hbool_t write_permitted; herr_t result; void * thing; @@ -5448,14 +5583,19 @@ H5C_protect(H5F_t * f, HDassert( H5F_addr_defined(addr) ); #if H5C_DO_EXTREME_SANITY_CHECKS - if ( H5C_validate_lru_list(cache_ptr) < 0 ) { + if ( H5C_validate_lru_list(cache_ptr) < 0 ) { - HDassert(0); - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, \ - "LRU sanity check failed.\n"); - } + HDassert(0); + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, \ + "LRU sanity check failed.\n"); + } #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + if ( (flags & H5C__READ_ONLY_FLAG) != 0 ) + { + read_only = TRUE; + } + /* first check to see if the target is in cache */ H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL) @@ -5584,22 +5724,43 @@ H5C_protect(H5F_t * f, if ( entry_ptr->is_protected ) { - HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \ - "Target already protected?!?.") - } + if ( ( read_only ) && ( entry_ptr->is_read_only ) ) { + + HDassert( entry_ptr->ro_ref_count > 0 ); - H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, NULL) + (entry_ptr->ro_ref_count)++; - entry_ptr->is_protected = TRUE; + /* JRM - delete this when possible */ + if ( entry_ptr->ro_ref_count > entry_ptr->max_ro_ref_count ) { - entry_ptr->dirtied = FALSE; + entry_ptr->max_ro_ref_count = entry_ptr->ro_ref_count; + } + } else { - ret_value = thing; + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \ + "Target already protected & not read only?!?.") + } + } else { + + H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, NULL) + + entry_ptr->is_protected = TRUE; + + if ( read_only ) { + + entry_ptr->is_read_only = TRUE; + entry_ptr->ro_ref_count = 1; + entry_ptr->max_ro_ref_count = 1; + } + + entry_ptr->dirtied = FALSE; + } H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) + ret_value = thing; if ( ( cache_ptr->size_decreased ) || ( ( cache_ptr->resize_enabled ) && @@ -6149,6 +6310,10 @@ done: * JRM -- 8/23/06 * Added code supporting new flush related statistics. * + * JRM -- 3/31/07 + * Added code supporting the new write_protects, + * read_protects, and max_read_protects fields. + * *------------------------------------------------------------------------- */ @@ -6167,6 +6332,9 @@ H5C_stats(H5C_t * cache_ptr, int i; int64_t total_hits = 0; int64_t total_misses = 0; + int64_t total_write_protects = 0; + int64_t total_read_protects = 0; + int64_t max_read_protects = 0; int64_t total_insertions = 0; int64_t total_pinned_insertions = 0; int64_t total_clears = 0; @@ -6213,6 +6381,11 @@ H5C_stats(H5C_t * cache_ptr, total_hits += cache_ptr->hits[i]; total_misses += cache_ptr->misses[i]; + total_write_protects += cache_ptr->write_protects[i]; + total_read_protects += cache_ptr->read_protects[i]; + if ( max_read_protects < cache_ptr->max_read_protects[i] ) { + max_read_protects = cache_ptr->max_read_protects[i]; + } total_insertions += cache_ptr->insertions[i]; total_pinned_insertions += cache_ptr->pinned_insertions[i]; total_clears += cache_ptr->clears[i]; @@ -6356,6 +6529,13 @@ H5C_stats(H5C_t * cache_ptr, hit_rate); HDfprintf(stdout, + "%s Total write / read (max) protects = %ld / %ld (%d)\n", + cache_ptr->prefix, + (long)total_write_protects, + (long)total_read_protects, + max_read_protects); + + HDfprintf(stdout, "%s Total clears / flushes / evictions = %ld / %ld / %ld\n", cache_ptr->prefix, (long)total_clears, @@ -6443,6 +6623,13 @@ H5C_stats(H5C_t * cache_ptr, hit_rate); HDfprintf(stdout, + "%s write / read (max) protects = %ld / %ld (%d)\n", + cache_ptr->prefix, + (long)(cache_ptr->write_protects[i]), + (long)(cache_ptr->read_protects[i]), + (int)(cache_ptr->max_read_protects[i])); + + HDfprintf(stdout, "%s clears / flushes / evictions = %ld / %ld / %ld\n", cache_ptr->prefix, (long)(cache_ptr->clears[i]), @@ -6554,6 +6741,10 @@ done: * Added conditional compile code to avoid unused parameter * warning in the production build. * + * JRM 3/31/07 + * Added initialization for the new write_protects, + * read_protects, and max_read_protects fields. + * *------------------------------------------------------------------------- */ @@ -6580,6 +6771,9 @@ H5C_stats__reset(H5C_t UNUSED * cache_ptr) { cache_ptr->hits[i] = 0; cache_ptr->misses[i] = 0; + cache_ptr->write_protects[i] = 0; + cache_ptr->read_protects[i] = 0; + cache_ptr->max_read_protects[i] = 0; cache_ptr->insertions[i] = 0; cache_ptr->pinned_insertions[i] = 0; cache_ptr->clears[i] = 0; @@ -6778,6 +6972,13 @@ done: * H5C_cache_entry_t. If this field is TRUE, it is the * equivalent of setting the H5C__DIRTIED_FLAG. * + * JRM -- 3/29/07 + * Modified function to allow a entry to be protected + * more than once if the entry is protected read only. + * + * Also added sanity checks using the new is_read_only and + * ro_ref_count parameters. + * *------------------------------------------------------------------------- */ herr_t @@ -6850,203 +7051,266 @@ H5C_unprotect(H5F_t * f, } #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ -#ifdef H5_HAVE_PARALLEL - /* When the H5C code is used to implement the metadata cache in the - * PHDF5 case, only the cache on process 0 is allowed to write to file. - * All the other metadata caches must hold dirty entries until they - * are told that the entries are clean. - * - * The clear_on_unprotect flag in the H5C_cache_entry_t structure - * exists to deal with the case in which an entry is protected when - * its cache receives word that the entry is now clean. In this case, - * the clear_on_unprotect flag is set, and the entry is flushed with - * the H5C__FLUSH_CLEAR_ONLY_FLAG. - * - * All this is a bit awkward, but until the metadata cache entries - * are contiguous, with only one dirty flag, we have to let the supplied - * functions deal with the reseting the is_dirty flag. - */ - if ( entry_ptr->clear_on_unprotect ) { - HDassert( entry_ptr->is_dirty ); + /* if the entry has multiple read only protects, just decrement + * the ro_ref_counter. Don't actually unprotect until the ref count + * drops to zero. + */ + if ( entry_ptr->ro_ref_count > 1 ) { - entry_ptr->clear_on_unprotect = FALSE; + HDassert( entry_ptr->is_protected ); + HDassert( entry_ptr->is_read_only ); - if ( ! dirtied ) { + if ( dirtied ) { - clear_entry = TRUE; - } - } -#endif /* H5_HAVE_PARALLEL */ + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ + "Read only entry modified(1)??") + } - if ( ! (entry_ptr->is_protected) ) { + (entry_ptr->ro_ref_count)--; - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ - "Entry already unprotected??") - } + /* Pin or unpin the entry as requested. */ + if ( pin_entry ) { - /* mark the entry as dirty if appropriate */ - entry_ptr->is_dirty = ( (entry_ptr->is_dirty) || dirtied ); + if ( entry_ptr->is_pinned ) { - /* update for change in entry size if necessary */ - if ( ( size_changed ) && ( entry_ptr->size != new_size ) ) { + HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, \ + "Entry already pinned???") + } + entry_ptr->is_pinned = TRUE; + H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) - /* update the protected list */ - H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pl_len), \ - (cache_ptr->pl_size), \ - (entry_ptr->size), (new_size)); + } else if ( unpin_entry ) { - /* update the hash table */ - H5C__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size),\ - (new_size)); + if ( ! ( entry_ptr->is_pinned ) ) { - /* if the entry is in the skip list, update that too */ - if ( entry_ptr->in_slist ) { + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, \ + "Entry already unpinned???") + } + entry_ptr->is_pinned = FALSE; + H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) - H5C__UPDATE_SLIST_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size),\ - (new_size)); } - /* update statistics just before changing the entry size */ - H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE((cache_ptr), (entry_ptr), \ - (new_size)); + } else { - /* finally, update the entry size proper */ - entry_ptr->size = new_size; - } + if ( entry_ptr->is_read_only ) { - /* Pin or unpin the entry as requested. */ - if ( pin_entry ) { + HDassert( entry_ptr->ro_ref_count == 1 ); - if ( entry_ptr->is_pinned ) { + if ( ( dirtied ) && + /* JRM - delete the following line when possible */ + ( entry_ptr->max_ro_ref_count > 1 ) ) { - HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, \ - "Entry already pinned???") - } - entry_ptr->is_pinned = TRUE; - H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ + "Read only entry modified(2)??") + } + + entry_ptr->is_read_only = FALSE; + entry_ptr->ro_ref_count = 0; + entry_ptr->max_ro_ref_count = 0; + } + +#ifdef H5_HAVE_PARALLEL + /* When the H5C code is used to implement the metadata cache in the + * PHDF5 case, only the cache on process 0 is allowed to write to file. + * All the other metadata caches must hold dirty entries until they + * are told that the entries are clean. + * + * The clear_on_unprotect flag in the H5C_cache_entry_t structure + * exists to deal with the case in which an entry is protected when + * its cache receives word that the entry is now clean. In this case, + * the clear_on_unprotect flag is set, and the entry is flushed with + * the H5C__FLUSH_CLEAR_ONLY_FLAG. + * + * All this is a bit awkward, but until the metadata cache entries + * are contiguous, with only one dirty flag, we have to let the supplied + * functions deal with the reseting the is_dirty flag. + */ + if ( entry_ptr->clear_on_unprotect ) { + + HDassert( entry_ptr->is_dirty ); - } else if ( unpin_entry ) { + entry_ptr->clear_on_unprotect = FALSE; - if ( ! ( entry_ptr->is_pinned ) ) { + if ( ! dirtied ) { - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, \ - "Entry already unpinned???") + clear_entry = TRUE; + } } - entry_ptr->is_pinned = FALSE; - H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) +#endif /* H5_HAVE_PARALLEL */ - } + if ( ! (entry_ptr->is_protected) ) { - /* H5C__UPDATE_RP_FOR_UNPROTECT will places the unprotected entry on - * the pinned entry list if entry_ptr->is_pined is TRUE. - */ - H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, FAIL) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ + "Entry already unprotected??") + } - entry_ptr->is_protected = FALSE; + /* mark the entry as dirty if appropriate */ + entry_ptr->is_dirty = ( (entry_ptr->is_dirty) || dirtied ); - /* if the entry is dirty, 'or' its flush_marker with the set flush flag, - * and then add it to the skip list if it isn't there already. - */ + /* update for change in entry size if necessary */ + if ( ( size_changed ) && ( entry_ptr->size != new_size ) ) { - if ( entry_ptr->is_dirty ) { + /* update the protected list */ + H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pl_len), \ + (cache_ptr->pl_size), \ + (entry_ptr->size), (new_size)); - entry_ptr->flush_marker |= set_flush_marker; + /* update the hash table */ + H5C__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size),\ + (new_size)); - if ( ! (entry_ptr->in_slist) ) { + /* if the entry is in the skip list, update that too */ + if ( entry_ptr->in_slist ) { - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) + H5C__UPDATE_SLIST_FOR_SIZE_CHANGE((cache_ptr), \ + (entry_ptr->size),\ + (new_size)); + } + + /* update statistics just before changing the entry size */ + H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE((cache_ptr), (entry_ptr), \ + (new_size)); + + /* finally, update the entry size proper */ + entry_ptr->size = new_size; } - } - /* this implementation of the "deleted" option is a bit inefficient, as - * we re-insert the entry to be deleted into the replacement policy - * data structures, only to remove them again. Depending on how often - * we do this, we may want to optimize a bit. - * - * On the other hand, this implementation is reasonably clean, and - * makes good use of existing code. - * JRM - 5/19/04 - */ - if ( deleted ) { + /* Pin or unpin the entry as requested. */ + if ( pin_entry ) { - /* the following first flush flag will never be used as we are - * calling H5C_flush_single_entry with both the - * H5C__FLUSH_CLEAR_ONLY_FLAG and H5C__FLUSH_INVALIDATE_FLAG flags. - * However, it is needed for the function call. - */ - hbool_t dummy_first_flush = TRUE; + if ( entry_ptr->is_pinned ) { - /* we can't delete a pinned entry */ - HDassert ( ! (entry_ptr->is_pinned ) ); + HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, \ + "Entry already pinned???") + } + entry_ptr->is_pinned = TRUE; + H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) - /* verify that the target entry is in the cache. */ + } else if ( unpin_entry ) { - H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) + if ( ! ( entry_ptr->is_pinned ) ) { - if ( test_entry_ptr == NULL ) { + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, \ + "Entry already unpinned???") + } + entry_ptr->is_pinned = FALSE; + H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ - "entry not in hash table?!?.") } - else if ( test_entry_ptr != entry_ptr ) { - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ - "hash table contains multiple entries for addr?!?.") - } + /* H5C__UPDATE_RP_FOR_UNPROTECT will places the unprotected entry on + * the pinned entry list if entry_ptr->is_pined is TRUE. + */ + H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, FAIL) - if ( H5C_flush_single_entry(f, - primary_dxpl_id, - secondary_dxpl_id, - cache_ptr, - type, - addr, - (H5C__FLUSH_CLEAR_ONLY_FLAG | - H5C__FLUSH_INVALIDATE_FLAG), - &dummy_first_flush, - TRUE) < 0 ) { - - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush.") - } - } -#ifdef H5_HAVE_PARALLEL - else if ( clear_entry ) { + entry_ptr->is_protected = FALSE; - /* the following first flush flag will never be used as we are - * calling H5C_flush_single_entry with the H5C__FLUSH_CLEAR_ONLY_FLAG - * flag. However, it is needed for the function call. + /* if the entry is dirty, 'or' its flush_marker with the set flush flag, + * and then add it to the skip list if it isn't there already. */ - hbool_t dummy_first_flush = TRUE; - /* verify that the target entry is in the cache. */ + if ( entry_ptr->is_dirty ) { - H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) + entry_ptr->flush_marker |= set_flush_marker; - if ( test_entry_ptr == NULL ) { + if ( ! (entry_ptr->in_slist) ) { - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ - "entry not in hash table?!?.") + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) + } } - else if ( test_entry_ptr != entry_ptr ) { - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ - "hash table contains multiple entries for addr?!?.") + /* this implementation of the "deleted" option is a bit inefficient, as + * we re-insert the entry to be deleted into the replacement policy + * data structures, only to remove them again. Depending on how often + * we do this, we may want to optimize a bit. + * + * On the other hand, this implementation is reasonably clean, and + * makes good use of existing code. + * JRM - 5/19/04 + */ + if ( deleted ) { + + /* the following first flush flag will never be used as we are + * calling H5C_flush_single_entry with both the + * H5C__FLUSH_CLEAR_ONLY_FLAG and H5C__FLUSH_INVALIDATE_FLAG flags. + * However, it is needed for the function call. + */ + hbool_t dummy_first_flush = TRUE; + + /* we can't delete a pinned entry */ + HDassert ( ! (entry_ptr->is_pinned ) ); + + /* verify that the target entry is in the cache. */ + + H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) + + if ( test_entry_ptr == NULL ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ + "entry not in hash table?!?.") + } + else if ( test_entry_ptr != entry_ptr ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ + "hash table contains multiple entries for addr?!?.") + } + + if ( H5C_flush_single_entry(f, + primary_dxpl_id, + secondary_dxpl_id, + cache_ptr, + type, + addr, + (H5C__FLUSH_CLEAR_ONLY_FLAG | + H5C__FLUSH_INVALIDATE_FLAG), + &dummy_first_flush, + TRUE) < 0 ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush.") + } } +#ifdef H5_HAVE_PARALLEL + else if ( clear_entry ) { - if ( H5C_flush_single_entry(f, - primary_dxpl_id, - secondary_dxpl_id, - cache_ptr, - type, - addr, - H5C__FLUSH_CLEAR_ONLY_FLAG, - &dummy_first_flush, - TRUE) < 0 ) { + /* the following first flush flag will never be used as we are + * calling H5C_flush_single_entry with the + * H5C__FLUSH_CLEAR_ONLY_FLAG flag. However, it is needed for + * the function call. + */ + hbool_t dummy_first_flush = TRUE; - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear.") + /* verify that the target entry is in the cache. */ + + H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) + + if ( test_entry_ptr == NULL ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ + "entry not in hash table?!?.") + } + else if ( test_entry_ptr != entry_ptr ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ + "hash table contains multiple entries for addr?!?.") + } + + if ( H5C_flush_single_entry(f, + primary_dxpl_id, + secondary_dxpl_id, + cache_ptr, + type, + addr, + H5C__FLUSH_CLEAR_ONLY_FLAG, + &dummy_first_flush, + TRUE) < 0 ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear.") + } } - } #endif /* H5_HAVE_PARALLEL */ + } H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) @@ -8872,6 +9136,11 @@ done: * in which the target entry is resized during flush, and * update the caches data structures accordingly. * + * + * JRM -- 3/29/07 + * Added sanity checks on the new is_read_only and + * ro_ref_count fields. + * *------------------------------------------------------------------------- */ static herr_t @@ -9248,6 +9517,8 @@ H5C_flush_single_entry(H5F_t * f, HDassert( !(entry_ptr->flush_marker) ); HDassert( !(entry_ptr->in_slist) ); HDassert( !(entry_ptr->is_protected) ); + HDassert( !(entry_ptr->is_read_only) ); + HDassert( (entry_ptr->ro_ref_count) == 0 ); if ( (flush_flags & H5C_CALLBACK__SIZE_CHANGED_FLAG) != 0 ) { @@ -9368,6 +9639,10 @@ done: * Added initialization for the new flush_in_progress and * destroy in progress fields. * + * JRM - 3/29/07 + * Added initialization for the new is_read_only and + * ro_ref_count fields. + * *------------------------------------------------------------------------- */ @@ -9432,6 +9707,9 @@ H5C_load_entry(H5F_t * f, entry_ptr->addr = addr; entry_ptr->type = type; entry_ptr->is_protected = FALSE; + entry_ptr->is_read_only = FALSE; + entry_ptr->ro_ref_count = 0; + entry_ptr->max_ro_ref_count = 0; /* JRM - delete this when possible */ entry_ptr->in_slist = FALSE; entry_ptr->flush_marker = FALSE; #ifdef H5_HAVE_PARALLEL @@ -9520,6 +9798,10 @@ done: * is not full. This case occurs when we need to flush to * min clean size before the cache has filled. * + * JRM -- 3/29/07 + * Added sanity checks using the new is_read_only and + * ro_ref_count fields. + * *------------------------------------------------------------------------- */ @@ -9565,6 +9847,8 @@ H5C_make_space_in_cache(H5F_t * f, ) { HDassert( ! (entry_ptr->is_protected) ); + HDassert( ! (entry_ptr->is_read_only) ); + HDassert( (entry_ptr->ro_ref_count) == 0 ); prev_ptr = entry_ptr->prev; @@ -9631,6 +9915,8 @@ H5C_make_space_in_cache(H5F_t * f, ) { HDassert( ! (entry_ptr->is_protected) ); + HDassert( ! (entry_ptr->is_read_only) ); + HDassert( (entry_ptr->ro_ref_count) == 0 ); HDassert( entry_ptr->is_dirty ); HDassert( entry_ptr->in_slist ); @@ -9675,6 +9961,8 @@ H5C_make_space_in_cache(H5F_t * f, ) { HDassert( ! (entry_ptr->is_protected) ); + HDassert( ! (entry_ptr->is_read_only) ); + HDassert( (entry_ptr->ro_ref_count) == 0 ); HDassert( ! (entry_ptr->is_dirty) ); prev_ptr = entry_ptr->aux_prev; diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index e19de07..3b8d650 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -568,6 +568,25 @@ * equal to the array index has not been in cache when * requested in the current epoch. * + * write_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type id + * equal to the array index has been write protected in the + * current epoch. + * + * Observe that (hits + misses) = (write_protects + read_protects). + * + * read_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type id + * equal to the array index has been read protected in the + * current epoch. + * + * Observe that (hits + misses) = (write_protects + read_protects). + * + * max_read_protects: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. + * The cells are used to maximum number of simultaneous read + * protects on any entry with type id equal to the array index + * in the current epoch. + * * insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells * are used to record the number of times an entry with type * id equal to the array index has been inserted into the @@ -844,6 +863,9 @@ struct H5C_t /* stats fields */ int64_t hits[H5C__MAX_NUM_TYPE_IDS + 1]; int64_t misses[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t write_protects[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t read_protects[H5C__MAX_NUM_TYPE_IDS + 1]; + int32_t max_read_protects[H5C__MAX_NUM_TYPE_IDS + 1]; int64_t insertions[H5C__MAX_NUM_TYPE_IDS + 1]; int64_t pinned_insertions[H5C__MAX_NUM_TYPE_IDS + 1]; int64_t clears[H5C__MAX_NUM_TYPE_IDS + 1]; diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index bd7f446..dc71e16 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -265,6 +265,38 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr, * Note that protected entries are removed from the LRU lists * and inserted on the protected list. * + * is_read_only: Boolean flag that is only meaningful if is_protected is + * TRUE. In this circumstance, it indicates whether the + * entry has been protected read only, or read/write. + * + * If the entry has been protected read only (i.e. is_protected + * and is_read_only are both TRUE), we allow the entry to be + * protected more than once. + * + * In this case, the number of readers is maintained in the + * ro_ref_count field (see below), and unprotect calls simply + * decrement that field until it drops to zero, at which point + * the entry is actually unprotected. + * + * ro_ref_count: Integer field used to maintain a count of the number of + * outstanding read only protects on this entry. This field + * must be zero whenever either is_protected or is_read_only + * are TRUE. + * + * max_ro_ref_count: Integer field used to track the maximum value of + * ro_ref_count in the current protection of this entry. + * The field must be reset to zero when the entry is + * unprotected. + * + * This field exist to allow us to refrain from flagging + * an error if an entry is protected read only, and then + * unprotected dirtied if the ro_ref_count has not exceeded + * 1. + * + * It is a temporary fix which should be removed once we + * have corrected all the instances of this behaviour in the + * code that calls the metadata cache. + * * is_pinned: Boolean flag indicating whether the entry has been pinned * in the cache. * @@ -432,6 +464,9 @@ typedef struct H5C_cache_entry_t hbool_t is_dirty; hbool_t dirtied; hbool_t is_protected; + hbool_t is_read_only; + int ro_ref_count; + int max_ro_ref_count; /* delete this when possible */ hbool_t is_pinned; hbool_t in_slist; hbool_t flush_marker; @@ -775,6 +810,10 @@ typedef struct H5C_auto_size_ctl_t * H5C__SET_FLUSH_MARKER_FLAG * H5C__PIN_ENTRY_FLAG * + * These flags apply to H5C_protect() + * + * H5C__READ_ONLY_FLAG + * * These flags apply to H5C_unprotect(): * * H5C__SET_FLUSH_MARKER_FLAG @@ -811,6 +850,7 @@ typedef struct H5C_auto_size_ctl_t #define H5C__FLUSH_CLEAR_ONLY_FLAG 0x0080 #define H5C__FLUSH_MARKED_ENTRIES_FLAG 0x0100 #define H5C__FLUSH_IGNORE_PROTECTED_FLAG 0x0200 +#define H5C__READ_ONLY_FLAG 0x0400 H5_DLL H5C_t * H5C_create(size_t max_cache_size, @@ -918,7 +958,8 @@ H5_DLL void * H5C_protect(H5F_t * f, const H5C_class_t * type, haddr_t addr, const void * udata1, - void * udata2); + void * udata2, + unsigned flags); H5_DLL herr_t H5C_reset_cache_hit_rate_stats(H5C_t * cache_ptr); diff --git a/test/cache.c b/test/cache.c index b743e8b..e802f53 100644 --- a/test/cache.c +++ b/test/cache.c @@ -92,6 +92,7 @@ static void check_flush_cache__flush_op_eviction_test(H5C_t * cache_ptr); static void check_flush_protected_err(void); static void check_get_entry_status(void); static void check_expunge_entry(void); +static void check_multiple_read_protect(void); static void check_rename_entry(void); static void check_rename_entry__run_test(H5C_t * cache_ptr, int test_num, struct rename_entry_test_spec * spec_ptr); @@ -109,6 +110,8 @@ static void check_double_unprotect_err(void); static void check_mark_entry_dirty_errs(void); static void check_expunge_entry_errs(void); static void check_resize_entry_errs(void); +static void check_unprotect_ro_dirty_err(void); +static void check_protect_ro_rw_err(void); static void check_auto_cache_resize(void); static void check_auto_cache_resize_disable(void); static void check_auto_cache_resize_epoch_markers(void); @@ -197,6 +200,7 @@ smoke_check_1(void) /* do_renames */ TRUE, /* rename_to_main_addr */ FALSE, /* do_destroys */ TRUE, + /* do_mult_ro_protects */ TRUE, /* dirty_destroys */ dirty_destroys, /* dirty_unprotects */ dirty_unprotects); @@ -215,6 +219,7 @@ smoke_check_1(void) /* do_renames */ TRUE, /* rename_to_main_addr */ TRUE, /* do_destroys */ FALSE, + /* do_mult_ro_protects */ TRUE, /* dirty_destroys */ dirty_destroys, /* dirty_unprotects */ dirty_unprotects); @@ -233,6 +238,7 @@ smoke_check_1(void) /* do_renames */ TRUE, /* rename_to_main_addr */ FALSE, /* do_destroys */ FALSE, + /* do_mult_ro_protects */ TRUE, /* dirty_destroys */ dirty_destroys, /* dirty_unprotects */ dirty_unprotects); @@ -388,6 +394,7 @@ smoke_check_2(void) /* do_renames */ TRUE, /* rename_to_main_addr */ FALSE, /* do_destroys */ TRUE, + /* do_mult_ro_protects */ TRUE, /* dirty_destroys */ dirty_destroys, /* dirty_unprotects */ dirty_unprotects); @@ -406,6 +413,7 @@ smoke_check_2(void) /* do_renames */ TRUE, /* rename_to_main_addr */ TRUE, /* do_destroys */ FALSE, + /* do_mult_ro_protects */ TRUE, /* dirty_destroys */ dirty_destroys, /* dirty_unprotects */ dirty_unprotects); @@ -424,6 +432,7 @@ smoke_check_2(void) /* do_renames */ TRUE, /* rename_to_main_addr */ FALSE, /* do_destroys */ FALSE, + /* do_mult_ro_protects */ TRUE, /* dirty_destroys */ dirty_destroys, /* dirty_unprotects */ dirty_unprotects); @@ -578,6 +587,7 @@ smoke_check_3(void) /* do_renames */ TRUE, /* rename_to_main_addr */ FALSE, /* do_destroys */ TRUE, + /* do_mult_ro_protects */ TRUE, /* dirty_destroys */ dirty_destroys, /* dirty_unprotects */ dirty_unprotects); @@ -596,6 +606,7 @@ smoke_check_3(void) /* do_renames */ TRUE, /* rename_to_main_addr */ TRUE, /* do_destroys */ FALSE, + /* do_mult_ro_protects */ TRUE, /* dirty_destroys */ dirty_destroys, /* dirty_unprotects */ dirty_unprotects); @@ -614,6 +625,7 @@ smoke_check_3(void) /* do_renames */ TRUE, /* rename_to_main_addr */ FALSE, /* do_destroys */ FALSE, + /* do_mult_ro_protects */ TRUE, /* dirty_destroys */ dirty_destroys, /* dirty_unprotects */ dirty_unprotects); @@ -769,6 +781,7 @@ smoke_check_4(void) /* do_renames */ TRUE, /* rename_to_main_addr */ FALSE, /* do_destroys */ TRUE, + /* do_mult_ro_protects */ TRUE, /* dirty_destroys */ dirty_destroys, /* dirty_unprotects */ dirty_unprotects); @@ -787,6 +800,7 @@ smoke_check_4(void) /* do_renames */ TRUE, /* rename_to_main_addr */ TRUE, /* do_destroys */ FALSE, + /* do_mult_ro_protects */ TRUE, /* dirty_destroys */ dirty_destroys, /* dirty_unprotects */ dirty_unprotects); @@ -805,6 +819,7 @@ smoke_check_4(void) /* do_renames */ TRUE, /* rename_to_main_addr */ FALSE, /* do_destroys */ FALSE, + /* do_mult_ro_protects */ TRUE, /* dirty_destroys */ dirty_destroys, /* dirty_unprotects */ dirty_unprotects); @@ -1894,6 +1909,7 @@ write_permitted_check(void) /* do_renames */ TRUE, /* rename_to_main_addr */ FALSE, /* do_destroys */ TRUE, + /* do_mult_ro_protects */ TRUE, /* dirty_destroys */ TRUE, /* dirty_unprotects */ TRUE); @@ -1914,6 +1930,7 @@ write_permitted_check(void) /* do_renames */ TRUE, /* rename_to_main_addr */ TRUE, /* do_destroys */ FALSE, + /* do_mult_ro_protects */ TRUE, /* dirty_destroys */ FALSE, /* dirty_unprotects */ NO_CHANGE); @@ -1934,6 +1951,7 @@ write_permitted_check(void) /* do_renames */ TRUE, /* rename_to_main_addr */ FALSE, /* do_destroys */ FALSE, + /* do_mult_ro_protects */ TRUE, /* dirty_destroys */ TRUE, /* dirty_unprotects */ TRUE); @@ -12680,6 +12698,427 @@ check_expunge_entry(void) /*------------------------------------------------------------------------- + * Function: check_multiple_read_protect() + * + * Purpose: Verify that multiple, simultaneous read protects of a + * single entry perform as expectd. + * + * Return: void + * + * Programmer: John Mainzer + * 4/1/07 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + + +static void +check_multiple_read_protect(void) +{ + const char * fcn_name = "check_multiple_read_protect()"; + H5C_t * cache_ptr = NULL; + test_entry_t * entry_ptr; + + TESTING("multiple read only protects on a single entry"); + + pass = TRUE; + + /* allocate a cache. Should succeed. + * + * Then to start with, proceed as follows: + * + * Read protect an entry. + * + * Then read protect the entry again. Should succeed. + * + * Read protect yet again. Should succeed. + * + * Unprotect with no changes, and then read protect twice again. + * Should succeed. + * + * Now unprotect three times. Should succeed. + * + * If stats are enabled, verify that correct stats are collected at + * every step. + * + * Also, verify internal state of read protects at every step. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + entry_ptr = &((entries[0])[0]); + + if ( ( entry_ptr->header.is_protected ) || + ( entry_ptr->header.is_read_only ) || + ( entry_ptr->header.ro_ref_count != 0 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected ro protected status 1.\n"; + } + } + +#if H5C_COLLECT_CACHE_STATS + if ( ( cache_ptr->write_protects[0] != 0 ) || + ( cache_ptr->read_protects[0] != 0 ) || + ( cache_ptr->max_read_protects[0] != 0 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected protect stats 1.\n"; + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if ( pass ) + { + protect_entry_ro(cache_ptr, 0, 0); + + if ( ( ! ( entry_ptr->header.is_protected ) ) || + ( ! ( entry_ptr->header.is_read_only ) ) || + ( entry_ptr->header.ro_ref_count != 1 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected ro protected status 2.\n"; + } + } + +#if H5C_COLLECT_CACHE_STATS + if ( ( cache_ptr->write_protects[0] != 0 ) || + ( cache_ptr->read_protects[0] != 1 ) || + ( cache_ptr->max_read_protects[0] != 1 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected protect stats 2.\n"; + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if ( pass ) + { + protect_entry_ro(cache_ptr, 0, 0); + + if ( ( ! ( entry_ptr->header.is_protected ) ) || + ( ! ( entry_ptr->header.is_read_only ) ) || + ( entry_ptr->header.ro_ref_count != 2 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected ro protected status 3.\n"; + } + } + +#if H5C_COLLECT_CACHE_STATS + if ( ( cache_ptr->write_protects[0] != 0 ) || + ( cache_ptr->read_protects[0] != 2 ) || + ( cache_ptr->max_read_protects[0] != 2 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected protect stats 3.\n"; + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if ( pass ) + { + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET); + + if ( ( ! ( entry_ptr->header.is_protected ) ) || + ( ! ( entry_ptr->header.is_read_only ) ) || + ( entry_ptr->header.ro_ref_count != 1 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected ro protected status 4.\n"; + } + } + +#if H5C_COLLECT_CACHE_STATS + if ( ( cache_ptr->write_protects[0] != 0 ) || + ( cache_ptr->read_protects[0] != 2 ) || + ( cache_ptr->max_read_protects[0] != 2 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected protect stats 4.\n"; + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if ( pass ) + { + protect_entry_ro(cache_ptr, 0, 0); + + if ( ( ! ( entry_ptr->header.is_protected ) ) || + ( ! ( entry_ptr->header.is_read_only ) ) || + ( entry_ptr->header.ro_ref_count != 2 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected ro protected status 5.\n"; + } + } + +#if H5C_COLLECT_CACHE_STATS + if ( ( cache_ptr->write_protects[0] != 0 ) || + ( cache_ptr->read_protects[0] != 3 ) || + ( cache_ptr->max_read_protects[0] != 2 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected protect stats 5.\n"; + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if ( pass ) + { + protect_entry_ro(cache_ptr, 0, 0); + + if ( ( ! ( entry_ptr->header.is_protected ) ) || + ( ! ( entry_ptr->header.is_read_only ) ) || + ( entry_ptr->header.ro_ref_count != 3 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected ro protected status 6.\n"; + } + } + +#if H5C_COLLECT_CACHE_STATS + if ( ( cache_ptr->write_protects[0] != 0 ) || + ( cache_ptr->read_protects[0] != 4 ) || + ( cache_ptr->max_read_protects[0] != 3 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected protect stats 6.\n"; + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if ( pass ) + { + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET); + + if ( ( ! ( entry_ptr->header.is_protected ) ) || + ( ! ( entry_ptr->header.is_read_only ) ) || + ( entry_ptr->header.ro_ref_count != 2 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected ro protected status 7.\n"; + } + } + +#if H5C_COLLECT_CACHE_STATS + if ( ( cache_ptr->write_protects[0] != 0 ) || + ( cache_ptr->read_protects[0] != 4 ) || + ( cache_ptr->max_read_protects[0] != 3 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected protect stats 7.\n"; + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if ( pass ) + { + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET); + + if ( ( ! ( entry_ptr->header.is_protected ) ) || + ( ! ( entry_ptr->header.is_read_only ) ) || + ( entry_ptr->header.ro_ref_count != 1 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected ro protected status 8.\n"; + } + } + +#if H5C_COLLECT_CACHE_STATS + if ( ( cache_ptr->write_protects[0] != 0 ) || + ( cache_ptr->read_protects[0] != 4 ) || + ( cache_ptr->max_read_protects[0] != 3 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected protect stats 8.\n"; + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if ( pass ) + { + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET); + + if ( ( entry_ptr->header.is_protected ) || + ( entry_ptr->header.is_read_only ) || + ( entry_ptr->header.ro_ref_count != 0 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected ro protected status 9.\n"; + } + } + +#if H5C_COLLECT_CACHE_STATS + if ( ( cache_ptr->write_protects[0] != 0 ) || + ( cache_ptr->read_protects[0] != 4 ) || + ( cache_ptr->max_read_protects[0] != 3 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected protect stats 9.\n"; + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + + /* If we get this far, do a write protect and unprotect to verify + * that the stats are getting collected properly here as well. + */ + + if ( pass ) + { + protect_entry(cache_ptr, 0, 0); + + if ( ( ! ( entry_ptr->header.is_protected ) ) || + ( entry_ptr->header.is_read_only ) || + ( entry_ptr->header.ro_ref_count != 0 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected ro protected status 10.\n"; + } + } + +#if H5C_COLLECT_CACHE_STATS + if ( ( cache_ptr->write_protects[0] != 1 ) || + ( cache_ptr->read_protects[0] != 4 ) || + ( cache_ptr->max_read_protects[0] != 3 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected protect stats 10.\n"; + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if ( pass ) + { + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET); + + if ( ( entry_ptr->header.is_protected ) || + ( entry_ptr->header.is_read_only ) || + ( entry_ptr->header.ro_ref_count != 0 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected ro protected status 11.\n"; + } + } + +#if H5C_COLLECT_CACHE_STATS + if ( ( cache_ptr->write_protects[0] != 1 ) || + ( cache_ptr->read_protects[0] != 4 ) || + ( cache_ptr->max_read_protects[0] != 3 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected protect stats 11.\n"; + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + + /* Finally, mix things up a little, using a mix of reads and + * and writes on different entries. Also include a pin to verify + * that it works as well. + * + * Stats are looking OK, so we will only test them one more time + * at the end to ensure that all is at it should be. + */ + + if ( pass ) { + + protect_entry(cache_ptr, 0, 2); /* (0,2) write */ + protect_entry_ro(cache_ptr, 0, 4); /* (0,4) read only (1) */ + protect_entry(cache_ptr, 0, 6); /* (0,6) write */ + + unprotect_entry(cache_ptr, 0, 2, FALSE, /* (0,2) unprotect */ + H5C__NO_FLAGS_SET); + + protect_entry_ro(cache_ptr, 0, 2); /* (0,2) read only (1) */ + protect_entry(cache_ptr, 0, 1); /* (0,1) write */ + protect_entry_ro(cache_ptr, 0, 4); /* (0,4) read only (2) */ + protect_entry(cache_ptr, 0, 0); /* (0,0) write */ + protect_entry_ro(cache_ptr, 0, 2); /* (0,2) read only (2) */ + + unprotect_entry(cache_ptr, 0, 2, FALSE, /* (0,2) read only (1) pin */ + H5C__PIN_ENTRY_FLAG); + unprotect_entry(cache_ptr, 0, 6, FALSE, /* (0,6) unprotect */ + H5C__NO_FLAGS_SET); + + protect_entry_ro(cache_ptr, 0, 4); /* (0,4) read only (3) */ + + unprotect_entry(cache_ptr, 0, 2, FALSE, /* (0,2) unprotect */ + H5C__NO_FLAGS_SET); + unprotect_entry(cache_ptr, 0, 1, FALSE, /* (0,1) unprotect */ + H5C__NO_FLAGS_SET); + + if ( pass ) { + + entry_ptr = &((entries[0])[4]); + + if ( H5C_pin_protected_entry(cache_ptr, (void *)entry_ptr) < 0 ) { + + pass = FALSE; + failure_mssg = "H5C_pin_protected_entry() failed.\n"; + + } else if ( ! (entry_ptr->header.is_pinned) ) { + + pass = FALSE; + failure_mssg = "entry (0,4) not pinned.\n"; + + } else { + + /* keep test bed sanity checks happy */ + entry_ptr->is_pinned = TRUE; + + } + } + + unprotect_entry(cache_ptr, 0, 4, FALSE, /* (0,4) read only (2) */ + H5C__NO_FLAGS_SET); + unprotect_entry(cache_ptr, 0, 4, FALSE, /* (0,4) read only (1) */ + H5C__UNPIN_ENTRY_FLAG); + + if ( ( pass ) && ( entry_ptr->header.is_pinned ) ) { + + pass = FALSE; + failure_mssg = "enty (0,4) still pinned.\n"; + + } + + unprotect_entry(cache_ptr, 0, 4, FALSE, /* (0,4) unprotect */ + H5C__NO_FLAGS_SET); + unprotect_entry(cache_ptr, 0, 0, FALSE, /* (0,0) unprotect */ + H5C__NO_FLAGS_SET); + + unpin_entry(cache_ptr, 0, 2); + } + +#if H5C_COLLECT_CACHE_STATS + if ( ( cache_ptr->write_protects[0] != 5 ) || + ( cache_ptr->read_protects[0] != 9 ) || + ( cache_ptr->max_read_protects[0] != 3 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected protect stats 11.\n"; + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + + if ( pass ) { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) { + + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + } + + return; + +} /* check_multiple_read_protect() */ + + +/*------------------------------------------------------------------------- * Function: check_rename_entry() * * Purpose: Verify that H5C_rename_entry behaves as expected. In @@ -14656,6 +15095,11 @@ check_pin_entry_errs(void) * * Modifications: * + * - Modified call to H5C_protect() to pass H5C__NO_FLAGS_SET in the + * the new flags parameter. + * + * JRM -- 3/28/07 + * *------------------------------------------------------------------------- */ @@ -14691,7 +15135,8 @@ check_double_protect_err(void) if ( pass ) { cache_entry_ptr = H5C_protect(NULL, -1, -1, cache_ptr, &(types[0]), - entry_ptr->addr, NULL, NULL); + entry_ptr->addr, NULL, NULL, + H5C__NO_FLAGS_SET); if ( cache_entry_ptr != NULL ) { @@ -15171,6 +15616,222 @@ check_resize_entry_errs(void) /*------------------------------------------------------------------------- + * Function: check_unprotect_ro_dirty_err() + * + * Purpose: If an entry is protected read only, verify that unprotecting + * it dirty will generate an error. + * + * Return: void + * + * Programmer: John Mainzer + * 4/3/07 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static void +check_unprotect_ro_dirty_err(void) +{ + const char * fcn_name = "check_unprotect_ro_dirty_err()"; + //herr_t result; + int result; + H5C_t * cache_ptr = NULL; + test_entry_t * entry_ptr; + + TESTING("unprotect a read only entry dirty error"); + + pass = TRUE; + + /* allocate a cache, protect an entry read only, and then unprotect it + * with the dirtied flag set. This should fail. Destroy the cache + * -- should succeed. + */ + + /* at present this test will fail due to code allowing current code + * to function with errors that are not dangerous. Thus this test + * is commented out for now. Put in back into use as soon as possible. + */ +#if 0 /* JRM */ + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + protect_entry_ro(cache_ptr, 0, 0); + + entry_ptr = &((entries[0])[0]); + } + + if ( pass ) { + + result = H5C_unprotect(NULL, -1, -1, cache_ptr, &(types[0]), + entry_ptr->addr, (void *)entry_ptr, + H5C__DIRTIED_FLAG, (size_t)0); + + entry_ptr->is_dirty = TRUE; + + if ( result >= 0 ) { + + pass = FALSE; + failure_mssg = + "attempt to unprotect a ro entry dirty succeeded 1.\n"; + } + } + + if ( pass ) { + + takedown_cache(cache_ptr, FALSE, FALSE); + } +#endif + + /* allocate a another cache, protect an entry read only twice, and + * then unprotect it with the dirtied flag set. This should fail. + * Unprotect it with no flags set twice and then destroy the cache. + * This should succeed. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + protect_entry_ro(cache_ptr, 0, 0); + protect_entry_ro(cache_ptr, 0, 0); + + entry_ptr = &((entries[0])[0]); + } + + if ( pass ) { + + result = H5C_unprotect(NULL, -1, -1, cache_ptr, &(types[0]), + entry_ptr->addr, (void *)entry_ptr, + H5C__DIRTIED_FLAG, (size_t)0); + + if ( result > 0 ) { + + pass = FALSE; + failure_mssg = + "attempt to unprotect a ro entry dirty succeeded 2.\n"; + } + } + + if ( pass ) { + + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET); + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET); + + } + + if ( pass ) { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) { + + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + } + + return; + +} /* check_unprotect_ro_dirty_err() */ + + +/*------------------------------------------------------------------------- + * Function: check_protect_ro_rw_err() + * + * Purpose: If an entry is protected read only, verify that protecting + * it rw will generate an error. + * + * Return: void + * + * Programmer: John Mainzer + * 4/9/07 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static void +check_protect_ro_rw_err(void) +{ + const char * fcn_name = "check_protect_ro_rw_err()"; + H5C_t * cache_ptr = NULL; + test_entry_t * entry_ptr; + void * thing_ptr = NULL; + + TESTING("protect a read only entry rw error"); + + pass = TRUE; + + /* allocate a cache, protect an entry read only, and then try to protect + * it again rw. This should fail. + * + * Unprotect the entry and destroy the cache -- should succeed. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + protect_entry_ro(cache_ptr, 0, 0); + + entry_ptr = &((entries[0])[0]); + } + + if ( pass ) { + + thing_ptr = H5C_protect(NULL, -1, -1, cache_ptr, &(types[0]), + entry_ptr->addr, NULL, NULL, H5C__NO_FLAGS_SET); + + if ( thing_ptr != NULL ) { + + pass = FALSE; + failure_mssg = "attempt to protect a ro entry rw succeeded.\n"; + } + } + + if ( pass ) { + + unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET); + } + + if ( pass ) { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) { + + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + } + + return; + +} /* check_protect_ro_rw_err() */ + + +/*------------------------------------------------------------------------- * Function: check_auto_cache_resize() * * Purpose: Exercise the automatic cache resizing functionality. @@ -24181,6 +24842,7 @@ main(void) check_flush_cache(); check_get_entry_status(); check_expunge_entry(); + check_multiple_read_protect(); check_rename_entry(); check_pin_protected_entry(); check_resize_entry(); @@ -24197,6 +24859,8 @@ main(void) check_mark_entry_dirty_errs(); check_expunge_entry_errs(); check_resize_entry_errs(); + check_unprotect_ro_dirty_err(); + check_protect_ro_rw_err(); check_auto_cache_resize(); check_auto_cache_resize_disable(); check_auto_cache_resize_epoch_markers(); diff --git a/test/cache_common.c b/test/cache_common.c index 2d1fc5a..7764fd0 100644 --- a/test/cache_common.c +++ b/test/cache_common.c @@ -1537,6 +1537,10 @@ entry_in_cache(H5C_t * cache_ptr, * Added initialization for new pinned entry test related * fields. * + * JRM -- 4/1/07 + * Added initialization for the new is_read_only, and + * ro_ref_count fields. + * *------------------------------------------------------------------------- */ @@ -1574,6 +1578,9 @@ reset_entries(void) base_addr[j].header.type = NULL; base_addr[j].header.is_dirty = FALSE; base_addr[j].header.is_protected = FALSE; + base_addr[j].header.is_read_only = FALSE; + base_addr[j].header.ro_ref_count = FALSE; + base_addr[j].header.max_ro_ref_count = 0; base_addr[j].header.next = NULL; base_addr[j].header.prev = NULL; base_addr[j].header.aux_next = NULL; @@ -1592,6 +1599,8 @@ reset_entries(void) base_addr[j].writes = 0; base_addr[j].is_dirty = FALSE; base_addr[j].is_protected = FALSE; + base_addr[j].is_read_only = FALSE; + base_addr[j].ro_ref_count = FALSE; base_addr[j].is_pinned = FALSE; base_addr[j].pinning_ref_count = 0; @@ -2683,6 +2692,10 @@ rename_entry(H5C_t * cache_ptr, * 6/11/04 * * Modifications: + * + * - Modified call to H5C_protect to pass H5C__NO_FLAGS_SET in the + * new flags parameter. + * JRM -- 3/28/07 * *------------------------------------------------------------------------- */ @@ -2712,7 +2725,8 @@ protect_entry(H5C_t * cache_ptr, HDassert( !(entry_ptr->is_protected) ); cache_entry_ptr = H5C_protect(NULL, -1, -1, cache_ptr, &(types[type]), - entry_ptr->addr, NULL, NULL); + entry_ptr->addr, NULL, NULL, + H5C__NO_FLAGS_SET); if ( ( cache_entry_ptr != (void *)entry_ptr ) || ( !(entry_ptr->header.is_protected) ) || @@ -2764,6 +2778,86 @@ protect_entry(H5C_t * cache_ptr, /*------------------------------------------------------------------------- + * Function: protect_entry_ro() + * + * Purpose: Do a read only protect the entry indicated by the type + * and index. + * + * Do nothing if pass is FALSE on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 4/1/07 + * + * Modifications: + * + * - None. + * + *------------------------------------------------------------------------- + */ + +void +protect_entry_ro(H5C_t * cache_ptr, + int32_t type, + int32_t idx) +{ + /* const char * fcn_name = "protect_entry_ro()"; */ + test_entry_t * base_addr; + test_entry_t * entry_ptr; + H5C_cache_entry_t * cache_entry_ptr; + + if ( pass ) { + + HDassert( cache_ptr ); + HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) ); + HDassert( ( 0 <= idx ) && ( idx <= max_indices[type] ) ); + + base_addr = entries[type]; + entry_ptr = &(base_addr[idx]); + + HDassert( entry_ptr->index == idx ); + HDassert( entry_ptr->type == type ); + HDassert( entry_ptr == entry_ptr->self ); + HDassert( ( ! ( entry_ptr->is_protected ) ) || + ( ( entry_ptr->is_read_only ) && + ( entry_ptr->ro_ref_count > 0 ) ) ); + + cache_entry_ptr = H5C_protect(NULL, -1, -1, cache_ptr, &(types[type]), + entry_ptr->addr, NULL, NULL, + H5C__READ_ONLY_FLAG); + + if ( ( cache_entry_ptr != (void *)entry_ptr ) || + ( !(entry_ptr->header.is_protected) ) || + ( !(entry_ptr->header.is_read_only) ) || + ( entry_ptr->header.ro_ref_count <= 0 ) || + ( entry_ptr->header.type != &(types[type]) ) || + ( entry_ptr->size != entry_ptr->header.size ) || + ( entry_ptr->addr != entry_ptr->header.addr ) ) { + + pass = FALSE; + failure_mssg = "error in read only H5C_protect()."; + + } else { + + HDassert( ( entry_ptr->cache_ptr == NULL ) || + ( entry_ptr->cache_ptr == cache_ptr ) ); + + entry_ptr->cache_ptr = cache_ptr; + entry_ptr->is_protected = TRUE; + entry_ptr->is_read_only = TRUE; + entry_ptr->ro_ref_count++; + } + + HDassert( ((entry_ptr->header).type)->id == type ); + } + + return; + +} /* protect_entry_ro() */ + + +/*------------------------------------------------------------------------- * Function: unpin_entry() * * Purpose: Unpin the entry indicated by the type and index. @@ -2862,6 +2956,9 @@ unpin_entry(H5C_t * cache_ptr, * JRM -- 3/31/06 * Update for pinned entries. * + * JRM -- 4/1/07 + * Updated for new multiple read protects. + * *------------------------------------------------------------------------- */ @@ -2913,18 +3010,54 @@ unprotect_entry(H5C_t * cache_ptr, flags, (size_t)0); if ( ( result < 0 ) || - ( entry_ptr->header.is_protected ) || + ( ( entry_ptr->header.is_protected ) && + ( ( ! ( entry_ptr->is_read_only ) ) || + ( entry_ptr->ro_ref_count <= 0 ) ) ) || ( entry_ptr->header.type != &(types[type]) ) || ( entry_ptr->size != entry_ptr->header.size ) || ( entry_ptr->addr != entry_ptr->header.addr ) ) { +#if 1 /* JRM */ + if ( result < 0 ) { + HDfprintf(stdout, "result is negative.\n"); + } + if ( ( entry_ptr->header.is_protected ) && + ( ( ! ( entry_ptr->is_read_only ) ) || + ( entry_ptr->ro_ref_count <= 0 ) ) ) { + HDfprintf(stdout, "protected and not RO or refcnt <= 0.\n"); + } + if ( entry_ptr->header.type != &(types[type]) ) { + HDfprintf(stdout, "type disagreement.\n"); + } + if ( entry_ptr->size != entry_ptr->header.size ) { + HDfprintf(stdout, "size disagreement.\n"); + } + if ( entry_ptr->addr != entry_ptr->header.addr ) { + HDfprintf(stdout, "addr disagreement.\n"); + } +#endif /* JRM */ + pass = FALSE; failure_mssg = "error in H5C_unprotect()."; } else { - entry_ptr->is_protected = FALSE; + if ( entry_ptr->ro_ref_count > 1 ) { + + entry_ptr->ro_ref_count--; + + } else if ( entry_ptr->ro_ref_count == 1 ) { + + entry_ptr->is_protected = FALSE; + entry_ptr->is_read_only = FALSE; + entry_ptr->ro_ref_count = 0; + + } else { + + entry_ptr->is_protected = FALSE; + + } if ( pin_flag_set ) { @@ -2947,6 +3080,10 @@ unprotect_entry(H5C_t * cache_ptr, HDassert( entry_ptr->header.is_dirty ); HDassert( entry_ptr->is_dirty ); } + + HDassert( entry_ptr->header.is_protected == entry_ptr->is_protected ); + HDassert( entry_ptr->header.is_read_only == entry_ptr->is_read_only ); + HDassert( entry_ptr->header.ro_ref_count == entry_ptr->ro_ref_count ); } return; @@ -3092,6 +3229,10 @@ unprotect_entry_with_size_change(H5C_t * cache_ptr, * * Modifications: * + * JRM -- 4/4/07 + * Added code supporting multiple read only protects. + * Note that this increased the minimum lag to 10. + * *------------------------------------------------------------------------- */ @@ -3107,6 +3248,7 @@ row_major_scan_forward(H5C_t * cache_ptr, hbool_t do_renames, hbool_t rename_to_main_addr, hbool_t do_destroys, + hbool_t do_mult_ro_protects, int dirty_destroys, int dirty_unprotects) { @@ -3117,7 +3259,7 @@ row_major_scan_forward(H5C_t * cache_ptr, if ( verbose ) HDfprintf(stdout, "%s(): entering.\n", fcn_name); - HDassert( lag > 5 ); + HDassert( lag >= 10 ); type = 0; @@ -3132,6 +3274,11 @@ row_major_scan_forward(H5C_t * cache_ptr, while ( ( pass ) && ( idx <= (max_indices[type] + lag) ) ) { + if ( verbose ) { + + HDfprintf(stdout, "%d:%d: ", type, idx); + } + if ( ( pass ) && ( do_inserts ) && ( (idx + lag) >= 0 ) && ( (idx + lag) <= max_indices[type] ) && ( ((idx + lag) % 2) == 0 ) && @@ -3197,6 +3344,78 @@ row_major_scan_forward(H5C_t * cache_ptr, H5C__NO_FLAGS_SET); } + if ( do_mult_ro_protects ) + { + if ( ( pass ) && ( (idx + lag - 5) >= 0 ) && + ( (idx + lag - 5) < max_indices[type] ) && + ( (idx + lag - 5) % 9 == 0 ) ) { + + if ( verbose ) + HDfprintf(stdout, "(p-ro, %d, %d) ", type, + (idx + lag - 5)); + + protect_entry_ro(cache_ptr, type, (idx + lag - 5)); + } + + if ( ( pass ) && ( (idx + lag - 6) >= 0 ) && + ( (idx + lag - 6) < max_indices[type] ) && + ( (idx + lag - 6) % 11 == 0 ) ) { + + if ( verbose ) + HDfprintf(stdout, "(p-ro, %d, %d) ", type, + (idx + lag - 6)); + + protect_entry_ro(cache_ptr, type, (idx + lag - 6)); + } + + if ( ( pass ) && ( (idx + lag - 7) >= 0 ) && + ( (idx + lag - 7) < max_indices[type] ) && + ( (idx + lag - 7) % 13 == 0 ) ) { + + if ( verbose ) + HDfprintf(stdout, "(p-ro, %d, %d) ", type, + (idx + lag - 7)); + + protect_entry_ro(cache_ptr, type, (idx + lag - 7)); + } + + if ( ( pass ) && ( (idx + lag - 7) >= 0 ) && + ( (idx + lag - 7) < max_indices[type] ) && + ( (idx + lag - 7) % 9 == 0 ) ) { + + if ( verbose ) + HDfprintf(stdout, "(u-ro, %d, %d) ", type, + (idx + lag - 7)); + + unprotect_entry(cache_ptr, type, (idx + lag - 7), + FALSE, H5C__NO_FLAGS_SET); + } + + if ( ( pass ) && ( (idx + lag - 8) >= 0 ) && + ( (idx + lag - 8) < max_indices[type] ) && + ( (idx + lag - 8) % 11 == 0 ) ) { + + if ( verbose ) + HDfprintf(stdout, "(u-ro, %d, %d) ", type, + (idx + lag - 8)); + + unprotect_entry(cache_ptr, type, (idx + lag - 8), + FALSE, H5C__NO_FLAGS_SET); + } + + if ( ( pass ) && ( (idx + lag - 9) >= 0 ) && + ( (idx + lag - 9) < max_indices[type] ) && + ( (idx + lag - 9) % 13 == 0 ) ) { + + if ( verbose ) + HDfprintf(stdout, "(u-ro, %d, %d) ", type, + (idx + lag - 9)); + + unprotect_entry(cache_ptr, type, (idx + lag - 9), + FALSE, H5C__NO_FLAGS_SET); + } + } /* if ( do_mult_ro_protects ) */ + if ( ( pass ) && ( idx >= 0 ) && ( idx <= max_indices[type] ) ) { if ( verbose ) @@ -3205,7 +3424,6 @@ row_major_scan_forward(H5C_t * cache_ptr, protect_entry(cache_ptr, type, idx); } - if ( ( pass ) && ( (idx - lag + 2) >= 0 ) && ( (idx - lag + 2) <= max_indices[type] ) && ( ( (idx - lag + 2) % 7 ) == 0 ) ) { @@ -3433,6 +3651,10 @@ hl_row_major_scan_forward(H5C_t * cache_ptr, * * Modifications: * + * JRM -- 4/4/07 + * Added code supporting multiple read only protects. + * Note that this increased the minimum lag to 10. + * *------------------------------------------------------------------------- */ @@ -3448,6 +3670,7 @@ row_major_scan_backward(H5C_t * cache_ptr, hbool_t do_renames, hbool_t rename_to_main_addr, hbool_t do_destroys, + hbool_t do_mult_ro_protects, int dirty_destroys, int dirty_unprotects) { @@ -3458,7 +3681,7 @@ row_major_scan_backward(H5C_t * cache_ptr, if ( verbose ) HDfprintf(stdout, "%s(): Entering.\n", fcn_name); - HDassert( lag > 5 ); + HDassert( lag >= 10 ); type = NUMBER_OF_ENTRY_TYPES - 1; @@ -3538,6 +3761,78 @@ row_major_scan_backward(H5C_t * cache_ptr, H5C__NO_FLAGS_SET); } + if ( do_mult_ro_protects ) + { + if ( ( pass ) && ( (idx - lag + 5) >= 0 ) && + ( (idx - lag + 5) < max_indices[type] ) && + ( (idx - lag + 5) % 9 == 0 ) ) { + + if ( verbose ) + HDfprintf(stdout, "(p-ro, %d, %d) ", type, + (idx - lag + 5)); + + protect_entry_ro(cache_ptr, type, (idx - lag + 5)); + } + + if ( ( pass ) && ( (idx - lag + 6) >= 0 ) && + ( (idx - lag + 6) < max_indices[type] ) && + ( (idx - lag + 6) % 11 == 0 ) ) { + + if ( verbose ) + HDfprintf(stdout, "(p-ro, %d, %d) ", type, + (idx - lag + 6)); + + protect_entry_ro(cache_ptr, type, (idx - lag + 6)); + } + + if ( ( pass ) && ( (idx - lag + 7) >= 0 ) && + ( (idx - lag + 7) < max_indices[type] ) && + ( (idx - lag + 7) % 13 == 0 ) ) { + + if ( verbose ) + HDfprintf(stdout, "(p-ro, %d, %d) ", type, + (idx - lag + 7)); + + protect_entry_ro(cache_ptr, type, (idx - lag + 7)); + } + + if ( ( pass ) && ( (idx - lag + 7) >= 0 ) && + ( (idx - lag + 7) < max_indices[type] ) && + ( (idx - lag + 7) % 9 == 0 ) ) { + + if ( verbose ) + HDfprintf(stdout, "(u-ro, %d, %d) ", type, + (idx - lag + 7)); + + unprotect_entry(cache_ptr, type, (idx - lag + 7), + FALSE, H5C__NO_FLAGS_SET); + } + + if ( ( pass ) && ( (idx - lag + 8) >= 0 ) && + ( (idx - lag + 8) < max_indices[type] ) && + ( (idx - lag + 8) % 11 == 0 ) ) { + + if ( verbose ) + HDfprintf(stdout, "(u-ro, %d, %d) ", type, + (idx - lag + 8)); + + unprotect_entry(cache_ptr, type, (idx - lag + 8), + FALSE, H5C__NO_FLAGS_SET); + } + + if ( ( pass ) && ( (idx - lag + 9) >= 0 ) && + ( (idx - lag + 9) < max_indices[type] ) && + ( (idx - lag + 9) % 13 == 0 ) ) { + + if ( verbose ) + HDfprintf(stdout, "(u-ro, %d, %d) ", type, + (idx - lag + 9)); + + unprotect_entry(cache_ptr, type, (idx - lag + 9), + FALSE, H5C__NO_FLAGS_SET); + } + } /* if ( do_mult_ro_protects ) */ + if ( ( pass ) && ( idx >= 0 ) && ( idx <= max_indices[type] ) ) { if ( verbose ) diff --git a/test/cache_common.h b/test/cache_common.h index e2f8657..ed3b857 100644 --- a/test/cache_common.h +++ b/test/cache_common.h @@ -224,6 +224,12 @@ typedef struct test_entry_t hbool_t is_protected; /* entry should currently be on * the cache's protected list. */ + hbool_t is_read_only; /* TRUE iff the entry should be + * protected read only. + */ + int ro_ref_count; /* Number of outstanding read only + * protects on the entry. + */ hbool_t is_pinned; /* entry is currently pinned in * the cache. */ @@ -622,6 +628,10 @@ void protect_entry(H5C_t * cache_ptr, int32_t type, int32_t idx); +void protect_entry_ro(H5C_t * cache_ptr, + int32_t type, + int32_t idx); + hbool_t entry_in_cache(H5C_t * cache_ptr, int32_t type, int32_t idx); @@ -658,6 +668,7 @@ void row_major_scan_forward(H5C_t * cache_ptr, hbool_t do_renames, hbool_t rename_to_main_addr, hbool_t do_destroys, + hbool_t do_mult_ro_protects, int dirty_destroys, int dirty_unprotects); @@ -681,6 +692,7 @@ void row_major_scan_backward(H5C_t * cache_ptr, hbool_t do_renames, hbool_t rename_to_main_addr, hbool_t do_destroys, + hbool_t do_mult_ro_protects, int dirty_destroys, int dirty_unprotects); -- cgit v0.12