summaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
authorJohn Mainzer <mainzer@hdfgroup.org>2007-04-09 18:58:42 (GMT)
committerJohn Mainzer <mainzer@hdfgroup.org>2007-04-09 18:58:42 (GMT)
commit030543bf0df05153d4189bc8556a6f8506cb0cff (patch)
tree07146066b5536e88e44b1bc35c22c953735b5c2c /test
parenta3d8f174dc6405b8c439485d04a4343564a20e4a (diff)
downloadhdf5-030543bf0df05153d4189bc8556a6f8506cb0cff.zip
hdf5-030543bf0df05153d4189bc8556a6f8506cb0cff.tar.gz
hdf5-030543bf0df05153d4189bc8556a6f8506cb0cff.tar.bz2
[svn-r13618] Modified metadata cache to support multiple read only protects of
cache entries. Added test code to test the new feature. Also some minor cleanum h5committested -- passed on copper and sol. Failed on osage with a configuration error that appears unrelated to my changes. Serial test (debug mode) passes on Phoenix (Linux x86 2.6 kernel), so I went ahead with the checkin.
Diffstat (limited to 'test')
-rw-r--r--test/cache.c666
-rw-r--r--test/cache_common.c307
-rw-r--r--test/cache_common.h12
3 files changed, 978 insertions, 7 deletions
diff --git a/test/cache.c b/test/cache.c
index b743e8b..e802f53 100644
--- a/test/cache.c
+++ b/test/cache.c
@@ -92,6 +92,7 @@ static void check_flush_cache__flush_op_eviction_test(H5C_t * cache_ptr);
static void check_flush_protected_err(void);
static void check_get_entry_status(void);
static void check_expunge_entry(void);
+static void check_multiple_read_protect(void);
static void check_rename_entry(void);
static void check_rename_entry__run_test(H5C_t * cache_ptr, int test_num,
struct rename_entry_test_spec * spec_ptr);
@@ -109,6 +110,8 @@ static void check_double_unprotect_err(void);
static void check_mark_entry_dirty_errs(void);
static void check_expunge_entry_errs(void);
static void check_resize_entry_errs(void);
+static void check_unprotect_ro_dirty_err(void);
+static void check_protect_ro_rw_err(void);
static void check_auto_cache_resize(void);
static void check_auto_cache_resize_disable(void);
static void check_auto_cache_resize_epoch_markers(void);
@@ -197,6 +200,7 @@ smoke_check_1(void)
/* do_renames */ TRUE,
/* rename_to_main_addr */ FALSE,
/* do_destroys */ TRUE,
+ /* do_mult_ro_protects */ TRUE,
/* dirty_destroys */ dirty_destroys,
/* dirty_unprotects */ dirty_unprotects);
@@ -215,6 +219,7 @@ smoke_check_1(void)
/* do_renames */ TRUE,
/* rename_to_main_addr */ TRUE,
/* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
/* dirty_destroys */ dirty_destroys,
/* dirty_unprotects */ dirty_unprotects);
@@ -233,6 +238,7 @@ smoke_check_1(void)
/* do_renames */ TRUE,
/* rename_to_main_addr */ FALSE,
/* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
/* dirty_destroys */ dirty_destroys,
/* dirty_unprotects */ dirty_unprotects);
@@ -388,6 +394,7 @@ smoke_check_2(void)
/* do_renames */ TRUE,
/* rename_to_main_addr */ FALSE,
/* do_destroys */ TRUE,
+ /* do_mult_ro_protects */ TRUE,
/* dirty_destroys */ dirty_destroys,
/* dirty_unprotects */ dirty_unprotects);
@@ -406,6 +413,7 @@ smoke_check_2(void)
/* do_renames */ TRUE,
/* rename_to_main_addr */ TRUE,
/* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
/* dirty_destroys */ dirty_destroys,
/* dirty_unprotects */ dirty_unprotects);
@@ -424,6 +432,7 @@ smoke_check_2(void)
/* do_renames */ TRUE,
/* rename_to_main_addr */ FALSE,
/* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
/* dirty_destroys */ dirty_destroys,
/* dirty_unprotects */ dirty_unprotects);
@@ -578,6 +587,7 @@ smoke_check_3(void)
/* do_renames */ TRUE,
/* rename_to_main_addr */ FALSE,
/* do_destroys */ TRUE,
+ /* do_mult_ro_protects */ TRUE,
/* dirty_destroys */ dirty_destroys,
/* dirty_unprotects */ dirty_unprotects);
@@ -596,6 +606,7 @@ smoke_check_3(void)
/* do_renames */ TRUE,
/* rename_to_main_addr */ TRUE,
/* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
/* dirty_destroys */ dirty_destroys,
/* dirty_unprotects */ dirty_unprotects);
@@ -614,6 +625,7 @@ smoke_check_3(void)
/* do_renames */ TRUE,
/* rename_to_main_addr */ FALSE,
/* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
/* dirty_destroys */ dirty_destroys,
/* dirty_unprotects */ dirty_unprotects);
@@ -769,6 +781,7 @@ smoke_check_4(void)
/* do_renames */ TRUE,
/* rename_to_main_addr */ FALSE,
/* do_destroys */ TRUE,
+ /* do_mult_ro_protects */ TRUE,
/* dirty_destroys */ dirty_destroys,
/* dirty_unprotects */ dirty_unprotects);
@@ -787,6 +800,7 @@ smoke_check_4(void)
/* do_renames */ TRUE,
/* rename_to_main_addr */ TRUE,
/* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
/* dirty_destroys */ dirty_destroys,
/* dirty_unprotects */ dirty_unprotects);
@@ -805,6 +819,7 @@ smoke_check_4(void)
/* do_renames */ TRUE,
/* rename_to_main_addr */ FALSE,
/* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
/* dirty_destroys */ dirty_destroys,
/* dirty_unprotects */ dirty_unprotects);
@@ -1894,6 +1909,7 @@ write_permitted_check(void)
/* do_renames */ TRUE,
/* rename_to_main_addr */ FALSE,
/* do_destroys */ TRUE,
+ /* do_mult_ro_protects */ TRUE,
/* dirty_destroys */ TRUE,
/* dirty_unprotects */ TRUE);
@@ -1914,6 +1930,7 @@ write_permitted_check(void)
/* do_renames */ TRUE,
/* rename_to_main_addr */ TRUE,
/* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
/* dirty_destroys */ FALSE,
/* dirty_unprotects */ NO_CHANGE);
@@ -1934,6 +1951,7 @@ write_permitted_check(void)
/* do_renames */ TRUE,
/* rename_to_main_addr */ FALSE,
/* do_destroys */ FALSE,
+ /* do_mult_ro_protects */ TRUE,
/* dirty_destroys */ TRUE,
/* dirty_unprotects */ TRUE);
@@ -12680,6 +12698,427 @@ check_expunge_entry(void)
/*-------------------------------------------------------------------------
+ * Function: check_multiple_read_protect()
+ *
+ * Purpose: Verify that multiple, simultaneous read protects of a
+ * single entry perform as expectd.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/1/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+
+static void
+check_multiple_read_protect(void)
+{
+ const char * fcn_name = "check_multiple_read_protect()";
+ H5C_t * cache_ptr = NULL;
+ test_entry_t * entry_ptr;
+
+ TESTING("multiple read only protects on a single entry");
+
+ pass = TRUE;
+
+ /* allocate a cache. Should succeed.
+ *
+ * Then to start with, proceed as follows:
+ *
+ * Read protect an entry.
+ *
+ * Then read protect the entry again. Should succeed.
+ *
+ * Read protect yet again. Should succeed.
+ *
+ * Unprotect with no changes, and then read protect twice again.
+ * Should succeed.
+ *
+ * Now unprotect three times. Should succeed.
+ *
+ * If stats are enabled, verify that correct stats are collected at
+ * every step.
+ *
+ * Also, verify internal state of read protects at every step.
+ */
+
+ if ( pass ) {
+
+ reset_entries();
+
+ cache_ptr = setup_cache((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ entry_ptr = &((entries[0])[0]);
+
+ if ( ( entry_ptr->header.is_protected ) ||
+ ( entry_ptr->header.is_read_only ) ||
+ ( entry_ptr->header.ro_ref_count != 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected ro protected status 1.\n";
+ }
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 0 ) ||
+ ( cache_ptr->max_read_protects[0] != 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected protect stats 1.\n";
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( pass )
+ {
+ protect_entry_ro(cache_ptr, 0, 0);
+
+ if ( ( ! ( entry_ptr->header.is_protected ) ) ||
+ ( ! ( entry_ptr->header.is_read_only ) ) ||
+ ( entry_ptr->header.ro_ref_count != 1 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected ro protected status 2.\n";
+ }
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 1 ) ||
+ ( cache_ptr->max_read_protects[0] != 1 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected protect stats 2.\n";
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( pass )
+ {
+ protect_entry_ro(cache_ptr, 0, 0);
+
+ if ( ( ! ( entry_ptr->header.is_protected ) ) ||
+ ( ! ( entry_ptr->header.is_read_only ) ) ||
+ ( entry_ptr->header.ro_ref_count != 2 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected ro protected status 3.\n";
+ }
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 2 ) ||
+ ( cache_ptr->max_read_protects[0] != 2 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected protect stats 3.\n";
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( pass )
+ {
+ unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET);
+
+ if ( ( ! ( entry_ptr->header.is_protected ) ) ||
+ ( ! ( entry_ptr->header.is_read_only ) ) ||
+ ( entry_ptr->header.ro_ref_count != 1 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected ro protected status 4.\n";
+ }
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 2 ) ||
+ ( cache_ptr->max_read_protects[0] != 2 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected protect stats 4.\n";
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( pass )
+ {
+ protect_entry_ro(cache_ptr, 0, 0);
+
+ if ( ( ! ( entry_ptr->header.is_protected ) ) ||
+ ( ! ( entry_ptr->header.is_read_only ) ) ||
+ ( entry_ptr->header.ro_ref_count != 2 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected ro protected status 5.\n";
+ }
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 3 ) ||
+ ( cache_ptr->max_read_protects[0] != 2 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected protect stats 5.\n";
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( pass )
+ {
+ protect_entry_ro(cache_ptr, 0, 0);
+
+ if ( ( ! ( entry_ptr->header.is_protected ) ) ||
+ ( ! ( entry_ptr->header.is_read_only ) ) ||
+ ( entry_ptr->header.ro_ref_count != 3 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected ro protected status 6.\n";
+ }
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 4 ) ||
+ ( cache_ptr->max_read_protects[0] != 3 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected protect stats 6.\n";
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( pass )
+ {
+ unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET);
+
+ if ( ( ! ( entry_ptr->header.is_protected ) ) ||
+ ( ! ( entry_ptr->header.is_read_only ) ) ||
+ ( entry_ptr->header.ro_ref_count != 2 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected ro protected status 7.\n";
+ }
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 4 ) ||
+ ( cache_ptr->max_read_protects[0] != 3 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected protect stats 7.\n";
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( pass )
+ {
+ unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET);
+
+ if ( ( ! ( entry_ptr->header.is_protected ) ) ||
+ ( ! ( entry_ptr->header.is_read_only ) ) ||
+ ( entry_ptr->header.ro_ref_count != 1 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected ro protected status 8.\n";
+ }
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 4 ) ||
+ ( cache_ptr->max_read_protects[0] != 3 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected protect stats 8.\n";
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( pass )
+ {
+ unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET);
+
+ if ( ( entry_ptr->header.is_protected ) ||
+ ( entry_ptr->header.is_read_only ) ||
+ ( entry_ptr->header.ro_ref_count != 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected ro protected status 9.\n";
+ }
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 0 ) ||
+ ( cache_ptr->read_protects[0] != 4 ) ||
+ ( cache_ptr->max_read_protects[0] != 3 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected protect stats 9.\n";
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ /* If we get this far, do a write protect and unprotect to verify
+ * that the stats are getting collected properly here as well.
+ */
+
+ if ( pass )
+ {
+ protect_entry(cache_ptr, 0, 0);
+
+ if ( ( ! ( entry_ptr->header.is_protected ) ) ||
+ ( entry_ptr->header.is_read_only ) ||
+ ( entry_ptr->header.ro_ref_count != 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected ro protected status 10.\n";
+ }
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 1 ) ||
+ ( cache_ptr->read_protects[0] != 4 ) ||
+ ( cache_ptr->max_read_protects[0] != 3 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected protect stats 10.\n";
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( pass )
+ {
+ unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET);
+
+ if ( ( entry_ptr->header.is_protected ) ||
+ ( entry_ptr->header.is_read_only ) ||
+ ( entry_ptr->header.ro_ref_count != 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected ro protected status 11.\n";
+ }
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 1 ) ||
+ ( cache_ptr->read_protects[0] != 4 ) ||
+ ( cache_ptr->max_read_protects[0] != 3 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected protect stats 11.\n";
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ /* Finally, mix things up a little, using a mix of reads and
+ * and writes on different entries. Also include a pin to verify
+ * that it works as well.
+ *
+ * Stats are looking OK, so we will only test them one more time
+ * at the end to ensure that all is at it should be.
+ */
+
+ if ( pass ) {
+
+ protect_entry(cache_ptr, 0, 2); /* (0,2) write */
+ protect_entry_ro(cache_ptr, 0, 4); /* (0,4) read only (1) */
+ protect_entry(cache_ptr, 0, 6); /* (0,6) write */
+
+ unprotect_entry(cache_ptr, 0, 2, FALSE, /* (0,2) unprotect */
+ H5C__NO_FLAGS_SET);
+
+ protect_entry_ro(cache_ptr, 0, 2); /* (0,2) read only (1) */
+ protect_entry(cache_ptr, 0, 1); /* (0,1) write */
+ protect_entry_ro(cache_ptr, 0, 4); /* (0,4) read only (2) */
+ protect_entry(cache_ptr, 0, 0); /* (0,0) write */
+ protect_entry_ro(cache_ptr, 0, 2); /* (0,2) read only (2) */
+
+ unprotect_entry(cache_ptr, 0, 2, FALSE, /* (0,2) read only (1) pin */
+ H5C__PIN_ENTRY_FLAG);
+ unprotect_entry(cache_ptr, 0, 6, FALSE, /* (0,6) unprotect */
+ H5C__NO_FLAGS_SET);
+
+ protect_entry_ro(cache_ptr, 0, 4); /* (0,4) read only (3) */
+
+ unprotect_entry(cache_ptr, 0, 2, FALSE, /* (0,2) unprotect */
+ H5C__NO_FLAGS_SET);
+ unprotect_entry(cache_ptr, 0, 1, FALSE, /* (0,1) unprotect */
+ H5C__NO_FLAGS_SET);
+
+ if ( pass ) {
+
+ entry_ptr = &((entries[0])[4]);
+
+ if ( H5C_pin_protected_entry(cache_ptr, (void *)entry_ptr) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5C_pin_protected_entry() failed.\n";
+
+ } else if ( ! (entry_ptr->header.is_pinned) ) {
+
+ pass = FALSE;
+ failure_mssg = "entry (0,4) not pinned.\n";
+
+ } else {
+
+ /* keep test bed sanity checks happy */
+ entry_ptr->is_pinned = TRUE;
+
+ }
+ }
+
+ unprotect_entry(cache_ptr, 0, 4, FALSE, /* (0,4) read only (2) */
+ H5C__NO_FLAGS_SET);
+ unprotect_entry(cache_ptr, 0, 4, FALSE, /* (0,4) read only (1) */
+ H5C__UNPIN_ENTRY_FLAG);
+
+ if ( ( pass ) && ( entry_ptr->header.is_pinned ) ) {
+
+ pass = FALSE;
+ failure_mssg = "enty (0,4) still pinned.\n";
+
+ }
+
+ unprotect_entry(cache_ptr, 0, 4, FALSE, /* (0,4) unprotect */
+ H5C__NO_FLAGS_SET);
+ unprotect_entry(cache_ptr, 0, 0, FALSE, /* (0,0) unprotect */
+ H5C__NO_FLAGS_SET);
+
+ unpin_entry(cache_ptr, 0, 2);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( ( cache_ptr->write_protects[0] != 5 ) ||
+ ( cache_ptr->read_protects[0] != 9 ) ||
+ ( cache_ptr->max_read_protects[0] != 3 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected protect stats 11.\n";
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ if ( pass ) {
+
+ takedown_cache(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass ) {
+
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ fcn_name, failure_mssg);
+ }
+
+ return;
+
+} /* check_multiple_read_protect() */
+
+
+/*-------------------------------------------------------------------------
* Function: check_rename_entry()
*
* Purpose: Verify that H5C_rename_entry behaves as expected. In
@@ -14656,6 +15095,11 @@ check_pin_entry_errs(void)
*
* Modifications:
*
+ * - Modified call to H5C_protect() to pass H5C__NO_FLAGS_SET in the
+ * the new flags parameter.
+ *
+ * JRM -- 3/28/07
+ *
*-------------------------------------------------------------------------
*/
@@ -14691,7 +15135,8 @@ check_double_protect_err(void)
if ( pass ) {
cache_entry_ptr = H5C_protect(NULL, -1, -1, cache_ptr, &(types[0]),
- entry_ptr->addr, NULL, NULL);
+ entry_ptr->addr, NULL, NULL,
+ H5C__NO_FLAGS_SET);
if ( cache_entry_ptr != NULL ) {
@@ -15171,6 +15616,222 @@ check_resize_entry_errs(void)
/*-------------------------------------------------------------------------
+ * Function: check_unprotect_ro_dirty_err()
+ *
+ * Purpose: If an entry is protected read only, verify that unprotecting
+ * it dirty will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/3/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_unprotect_ro_dirty_err(void)
+{
+ const char * fcn_name = "check_unprotect_ro_dirty_err()";
+ //herr_t result;
+ int result;
+ H5C_t * cache_ptr = NULL;
+ test_entry_t * entry_ptr;
+
+ TESTING("unprotect a read only entry dirty error");
+
+ pass = TRUE;
+
+ /* allocate a cache, protect an entry read only, and then unprotect it
+ * with the dirtied flag set. This should fail. Destroy the cache
+ * -- should succeed.
+ */
+
+ /* at present this test will fail due to code allowing current code
+ * to function with errors that are not dangerous. Thus this test
+ * is commented out for now. Put in back into use as soon as possible.
+ */
+#if 0 /* JRM */
+ if ( pass ) {
+
+ reset_entries();
+
+ cache_ptr = setup_cache((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry_ro(cache_ptr, 0, 0);
+
+ entry_ptr = &((entries[0])[0]);
+ }
+
+ if ( pass ) {
+
+ result = H5C_unprotect(NULL, -1, -1, cache_ptr, &(types[0]),
+ entry_ptr->addr, (void *)entry_ptr,
+ H5C__DIRTIED_FLAG, (size_t)0);
+
+ entry_ptr->is_dirty = TRUE;
+
+ if ( result >= 0 ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "attempt to unprotect a ro entry dirty succeeded 1.\n";
+ }
+ }
+
+ if ( pass ) {
+
+ takedown_cache(cache_ptr, FALSE, FALSE);
+ }
+#endif
+
+ /* allocate a another cache, protect an entry read only twice, and
+ * then unprotect it with the dirtied flag set. This should fail.
+ * Unprotect it with no flags set twice and then destroy the cache.
+ * This should succeed.
+ */
+
+ if ( pass ) {
+
+ reset_entries();
+
+ cache_ptr = setup_cache((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry_ro(cache_ptr, 0, 0);
+ protect_entry_ro(cache_ptr, 0, 0);
+
+ entry_ptr = &((entries[0])[0]);
+ }
+
+ if ( pass ) {
+
+ result = H5C_unprotect(NULL, -1, -1, cache_ptr, &(types[0]),
+ entry_ptr->addr, (void *)entry_ptr,
+ H5C__DIRTIED_FLAG, (size_t)0);
+
+ if ( result > 0 ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "attempt to unprotect a ro entry dirty succeeded 2.\n";
+ }
+ }
+
+ if ( pass ) {
+
+ unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET);
+ unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET);
+
+ }
+
+ if ( pass ) {
+
+ takedown_cache(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass ) {
+
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ fcn_name, failure_mssg);
+ }
+
+ return;
+
+} /* check_unprotect_ro_dirty_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_protect_ro_rw_err()
+ *
+ * Purpose: If an entry is protected read only, verify that protecting
+ * it rw will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/9/07
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_protect_ro_rw_err(void)
+{
+ const char * fcn_name = "check_protect_ro_rw_err()";
+ H5C_t * cache_ptr = NULL;
+ test_entry_t * entry_ptr;
+ void * thing_ptr = NULL;
+
+ TESTING("protect a read only entry rw error");
+
+ pass = TRUE;
+
+ /* allocate a cache, protect an entry read only, and then try to protect
+ * it again rw. This should fail.
+ *
+ * Unprotect the entry and destroy the cache -- should succeed.
+ */
+
+ if ( pass ) {
+
+ reset_entries();
+
+ cache_ptr = setup_cache((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry_ro(cache_ptr, 0, 0);
+
+ entry_ptr = &((entries[0])[0]);
+ }
+
+ if ( pass ) {
+
+ thing_ptr = H5C_protect(NULL, -1, -1, cache_ptr, &(types[0]),
+ entry_ptr->addr, NULL, NULL, H5C__NO_FLAGS_SET);
+
+ if ( thing_ptr != NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "attempt to protect a ro entry rw succeeded.\n";
+ }
+ }
+
+ if ( pass ) {
+
+ unprotect_entry(cache_ptr, 0, 0, FALSE, H5C__NO_FLAGS_SET);
+ }
+
+ if ( pass ) {
+
+ takedown_cache(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass ) {
+
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ fcn_name, failure_mssg);
+ }
+
+ return;
+
+} /* check_protect_ro_rw_err() */
+
+
+/*-------------------------------------------------------------------------
* Function: check_auto_cache_resize()
*
* Purpose: Exercise the automatic cache resizing functionality.
@@ -24181,6 +24842,7 @@ main(void)
check_flush_cache();
check_get_entry_status();
check_expunge_entry();
+ check_multiple_read_protect();
check_rename_entry();
check_pin_protected_entry();
check_resize_entry();
@@ -24197,6 +24859,8 @@ main(void)
check_mark_entry_dirty_errs();
check_expunge_entry_errs();
check_resize_entry_errs();
+ check_unprotect_ro_dirty_err();
+ check_protect_ro_rw_err();
check_auto_cache_resize();
check_auto_cache_resize_disable();
check_auto_cache_resize_epoch_markers();
diff --git a/test/cache_common.c b/test/cache_common.c
index 2d1fc5a..7764fd0 100644
--- a/test/cache_common.c
+++ b/test/cache_common.c
@@ -1537,6 +1537,10 @@ entry_in_cache(H5C_t * cache_ptr,
* Added initialization for new pinned entry test related
* fields.
*
+ * JRM -- 4/1/07
+ * Added initialization for the new is_read_only, and
+ * ro_ref_count fields.
+ *
*-------------------------------------------------------------------------
*/
@@ -1574,6 +1578,9 @@ reset_entries(void)
base_addr[j].header.type = NULL;
base_addr[j].header.is_dirty = FALSE;
base_addr[j].header.is_protected = FALSE;
+ base_addr[j].header.is_read_only = FALSE;
+ base_addr[j].header.ro_ref_count = FALSE;
+ base_addr[j].header.max_ro_ref_count = 0;
base_addr[j].header.next = NULL;
base_addr[j].header.prev = NULL;
base_addr[j].header.aux_next = NULL;
@@ -1592,6 +1599,8 @@ reset_entries(void)
base_addr[j].writes = 0;
base_addr[j].is_dirty = FALSE;
base_addr[j].is_protected = FALSE;
+ base_addr[j].is_read_only = FALSE;
+ base_addr[j].ro_ref_count = FALSE;
base_addr[j].is_pinned = FALSE;
base_addr[j].pinning_ref_count = 0;
@@ -2683,6 +2692,10 @@ rename_entry(H5C_t * cache_ptr,
* 6/11/04
*
* Modifications:
+ *
+ * - Modified call to H5C_protect to pass H5C__NO_FLAGS_SET in the
+ * new flags parameter.
+ * JRM -- 3/28/07
*
*-------------------------------------------------------------------------
*/
@@ -2712,7 +2725,8 @@ protect_entry(H5C_t * cache_ptr,
HDassert( !(entry_ptr->is_protected) );
cache_entry_ptr = H5C_protect(NULL, -1, -1, cache_ptr, &(types[type]),
- entry_ptr->addr, NULL, NULL);
+ entry_ptr->addr, NULL, NULL,
+ H5C__NO_FLAGS_SET);
if ( ( cache_entry_ptr != (void *)entry_ptr ) ||
( !(entry_ptr->header.is_protected) ) ||
@@ -2764,6 +2778,86 @@ protect_entry(H5C_t * cache_ptr,
/*-------------------------------------------------------------------------
+ * Function: protect_entry_ro()
+ *
+ * Purpose: Do a read only protect the entry indicated by the type
+ * and index.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/1/07
+ *
+ * Modifications:
+ *
+ * - None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+protect_entry_ro(H5C_t * cache_ptr,
+ int32_t type,
+ int32_t idx)
+{
+ /* const char * fcn_name = "protect_entry_ro()"; */
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+ H5C_cache_entry_t * cache_entry_ptr;
+
+ if ( pass ) {
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices[type] ) );
+
+ base_addr = entries[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( ( ! ( entry_ptr->is_protected ) ) ||
+ ( ( entry_ptr->is_read_only ) &&
+ ( entry_ptr->ro_ref_count > 0 ) ) );
+
+ cache_entry_ptr = H5C_protect(NULL, -1, -1, cache_ptr, &(types[type]),
+ entry_ptr->addr, NULL, NULL,
+ H5C__READ_ONLY_FLAG);
+
+ if ( ( cache_entry_ptr != (void *)entry_ptr ) ||
+ ( !(entry_ptr->header.is_protected) ) ||
+ ( !(entry_ptr->header.is_read_only) ) ||
+ ( entry_ptr->header.ro_ref_count <= 0 ) ||
+ ( entry_ptr->header.type != &(types[type]) ) ||
+ ( entry_ptr->size != entry_ptr->header.size ) ||
+ ( entry_ptr->addr != entry_ptr->header.addr ) ) {
+
+ pass = FALSE;
+ failure_mssg = "error in read only H5C_protect().";
+
+ } else {
+
+ HDassert( ( entry_ptr->cache_ptr == NULL ) ||
+ ( entry_ptr->cache_ptr == cache_ptr ) );
+
+ entry_ptr->cache_ptr = cache_ptr;
+ entry_ptr->is_protected = TRUE;
+ entry_ptr->is_read_only = TRUE;
+ entry_ptr->ro_ref_count++;
+ }
+
+ HDassert( ((entry_ptr->header).type)->id == type );
+ }
+
+ return;
+
+} /* protect_entry_ro() */
+
+
+/*-------------------------------------------------------------------------
* Function: unpin_entry()
*
* Purpose: Unpin the entry indicated by the type and index.
@@ -2862,6 +2956,9 @@ unpin_entry(H5C_t * cache_ptr,
* JRM -- 3/31/06
* Update for pinned entries.
*
+ * JRM -- 4/1/07
+ * Updated for new multiple read protects.
+ *
*-------------------------------------------------------------------------
*/
@@ -2913,18 +3010,54 @@ unprotect_entry(H5C_t * cache_ptr,
flags, (size_t)0);
if ( ( result < 0 ) ||
- ( entry_ptr->header.is_protected ) ||
+ ( ( entry_ptr->header.is_protected ) &&
+ ( ( ! ( entry_ptr->is_read_only ) ) ||
+ ( entry_ptr->ro_ref_count <= 0 ) ) ) ||
( entry_ptr->header.type != &(types[type]) ) ||
( entry_ptr->size != entry_ptr->header.size ) ||
( entry_ptr->addr != entry_ptr->header.addr ) ) {
+#if 1 /* JRM */
+ if ( result < 0 ) {
+ HDfprintf(stdout, "result is negative.\n");
+ }
+ if ( ( entry_ptr->header.is_protected ) &&
+ ( ( ! ( entry_ptr->is_read_only ) ) ||
+ ( entry_ptr->ro_ref_count <= 0 ) ) ) {
+ HDfprintf(stdout, "protected and not RO or refcnt <= 0.\n");
+ }
+ if ( entry_ptr->header.type != &(types[type]) ) {
+ HDfprintf(stdout, "type disagreement.\n");
+ }
+ if ( entry_ptr->size != entry_ptr->header.size ) {
+ HDfprintf(stdout, "size disagreement.\n");
+ }
+ if ( entry_ptr->addr != entry_ptr->header.addr ) {
+ HDfprintf(stdout, "addr disagreement.\n");
+ }
+#endif /* JRM */
+
pass = FALSE;
failure_mssg = "error in H5C_unprotect().";
}
else
{
- entry_ptr->is_protected = FALSE;
+ if ( entry_ptr->ro_ref_count > 1 ) {
+
+ entry_ptr->ro_ref_count--;
+
+ } else if ( entry_ptr->ro_ref_count == 1 ) {
+
+ entry_ptr->is_protected = FALSE;
+ entry_ptr->is_read_only = FALSE;
+ entry_ptr->ro_ref_count = 0;
+
+ } else {
+
+ entry_ptr->is_protected = FALSE;
+
+ }
if ( pin_flag_set ) {
@@ -2947,6 +3080,10 @@ unprotect_entry(H5C_t * cache_ptr,
HDassert( entry_ptr->header.is_dirty );
HDassert( entry_ptr->is_dirty );
}
+
+ HDassert( entry_ptr->header.is_protected == entry_ptr->is_protected );
+ HDassert( entry_ptr->header.is_read_only == entry_ptr->is_read_only );
+ HDassert( entry_ptr->header.ro_ref_count == entry_ptr->ro_ref_count );
}
return;
@@ -3092,6 +3229,10 @@ unprotect_entry_with_size_change(H5C_t * cache_ptr,
*
* Modifications:
*
+ * JRM -- 4/4/07
+ * Added code supporting multiple read only protects.
+ * Note that this increased the minimum lag to 10.
+ *
*-------------------------------------------------------------------------
*/
@@ -3107,6 +3248,7 @@ row_major_scan_forward(H5C_t * cache_ptr,
hbool_t do_renames,
hbool_t rename_to_main_addr,
hbool_t do_destroys,
+ hbool_t do_mult_ro_protects,
int dirty_destroys,
int dirty_unprotects)
{
@@ -3117,7 +3259,7 @@ row_major_scan_forward(H5C_t * cache_ptr,
if ( verbose )
HDfprintf(stdout, "%s(): entering.\n", fcn_name);
- HDassert( lag > 5 );
+ HDassert( lag >= 10 );
type = 0;
@@ -3132,6 +3274,11 @@ row_major_scan_forward(H5C_t * cache_ptr,
while ( ( pass ) && ( idx <= (max_indices[type] + lag) ) )
{
+ if ( verbose ) {
+
+ HDfprintf(stdout, "%d:%d: ", type, idx);
+ }
+
if ( ( pass ) && ( do_inserts ) && ( (idx + lag) >= 0 ) &&
( (idx + lag) <= max_indices[type] ) &&
( ((idx + lag) % 2) == 0 ) &&
@@ -3197,6 +3344,78 @@ row_major_scan_forward(H5C_t * cache_ptr,
H5C__NO_FLAGS_SET);
}
+ if ( do_mult_ro_protects )
+ {
+ if ( ( pass ) && ( (idx + lag - 5) >= 0 ) &&
+ ( (idx + lag - 5) < max_indices[type] ) &&
+ ( (idx + lag - 5) % 9 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p-ro, %d, %d) ", type,
+ (idx + lag - 5));
+
+ protect_entry_ro(cache_ptr, type, (idx + lag - 5));
+ }
+
+ if ( ( pass ) && ( (idx + lag - 6) >= 0 ) &&
+ ( (idx + lag - 6) < max_indices[type] ) &&
+ ( (idx + lag - 6) % 11 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p-ro, %d, %d) ", type,
+ (idx + lag - 6));
+
+ protect_entry_ro(cache_ptr, type, (idx + lag - 6));
+ }
+
+ if ( ( pass ) && ( (idx + lag - 7) >= 0 ) &&
+ ( (idx + lag - 7) < max_indices[type] ) &&
+ ( (idx + lag - 7) % 13 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p-ro, %d, %d) ", type,
+ (idx + lag - 7));
+
+ protect_entry_ro(cache_ptr, type, (idx + lag - 7));
+ }
+
+ if ( ( pass ) && ( (idx + lag - 7) >= 0 ) &&
+ ( (idx + lag - 7) < max_indices[type] ) &&
+ ( (idx + lag - 7) % 9 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u-ro, %d, %d) ", type,
+ (idx + lag - 7));
+
+ unprotect_entry(cache_ptr, type, (idx + lag - 7),
+ FALSE, H5C__NO_FLAGS_SET);
+ }
+
+ if ( ( pass ) && ( (idx + lag - 8) >= 0 ) &&
+ ( (idx + lag - 8) < max_indices[type] ) &&
+ ( (idx + lag - 8) % 11 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u-ro, %d, %d) ", type,
+ (idx + lag - 8));
+
+ unprotect_entry(cache_ptr, type, (idx + lag - 8),
+ FALSE, H5C__NO_FLAGS_SET);
+ }
+
+ if ( ( pass ) && ( (idx + lag - 9) >= 0 ) &&
+ ( (idx + lag - 9) < max_indices[type] ) &&
+ ( (idx + lag - 9) % 13 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u-ro, %d, %d) ", type,
+ (idx + lag - 9));
+
+ unprotect_entry(cache_ptr, type, (idx + lag - 9),
+ FALSE, H5C__NO_FLAGS_SET);
+ }
+ } /* if ( do_mult_ro_protects ) */
+
if ( ( pass ) && ( idx >= 0 ) && ( idx <= max_indices[type] ) ) {
if ( verbose )
@@ -3205,7 +3424,6 @@ row_major_scan_forward(H5C_t * cache_ptr,
protect_entry(cache_ptr, type, idx);
}
-
if ( ( pass ) && ( (idx - lag + 2) >= 0 ) &&
( (idx - lag + 2) <= max_indices[type] ) &&
( ( (idx - lag + 2) % 7 ) == 0 ) ) {
@@ -3433,6 +3651,10 @@ hl_row_major_scan_forward(H5C_t * cache_ptr,
*
* Modifications:
*
+ * JRM -- 4/4/07
+ * Added code supporting multiple read only protects.
+ * Note that this increased the minimum lag to 10.
+ *
*-------------------------------------------------------------------------
*/
@@ -3448,6 +3670,7 @@ row_major_scan_backward(H5C_t * cache_ptr,
hbool_t do_renames,
hbool_t rename_to_main_addr,
hbool_t do_destroys,
+ hbool_t do_mult_ro_protects,
int dirty_destroys,
int dirty_unprotects)
{
@@ -3458,7 +3681,7 @@ row_major_scan_backward(H5C_t * cache_ptr,
if ( verbose )
HDfprintf(stdout, "%s(): Entering.\n", fcn_name);
- HDassert( lag > 5 );
+ HDassert( lag >= 10 );
type = NUMBER_OF_ENTRY_TYPES - 1;
@@ -3538,6 +3761,78 @@ row_major_scan_backward(H5C_t * cache_ptr,
H5C__NO_FLAGS_SET);
}
+ if ( do_mult_ro_protects )
+ {
+ if ( ( pass ) && ( (idx - lag + 5) >= 0 ) &&
+ ( (idx - lag + 5) < max_indices[type] ) &&
+ ( (idx - lag + 5) % 9 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p-ro, %d, %d) ", type,
+ (idx - lag + 5));
+
+ protect_entry_ro(cache_ptr, type, (idx - lag + 5));
+ }
+
+ if ( ( pass ) && ( (idx - lag + 6) >= 0 ) &&
+ ( (idx - lag + 6) < max_indices[type] ) &&
+ ( (idx - lag + 6) % 11 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p-ro, %d, %d) ", type,
+ (idx - lag + 6));
+
+ protect_entry_ro(cache_ptr, type, (idx - lag + 6));
+ }
+
+ if ( ( pass ) && ( (idx - lag + 7) >= 0 ) &&
+ ( (idx - lag + 7) < max_indices[type] ) &&
+ ( (idx - lag + 7) % 13 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p-ro, %d, %d) ", type,
+ (idx - lag + 7));
+
+ protect_entry_ro(cache_ptr, type, (idx - lag + 7));
+ }
+
+ if ( ( pass ) && ( (idx - lag + 7) >= 0 ) &&
+ ( (idx - lag + 7) < max_indices[type] ) &&
+ ( (idx - lag + 7) % 9 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u-ro, %d, %d) ", type,
+ (idx - lag + 7));
+
+ unprotect_entry(cache_ptr, type, (idx - lag + 7),
+ FALSE, H5C__NO_FLAGS_SET);
+ }
+
+ if ( ( pass ) && ( (idx - lag + 8) >= 0 ) &&
+ ( (idx - lag + 8) < max_indices[type] ) &&
+ ( (idx - lag + 8) % 11 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u-ro, %d, %d) ", type,
+ (idx - lag + 8));
+
+ unprotect_entry(cache_ptr, type, (idx - lag + 8),
+ FALSE, H5C__NO_FLAGS_SET);
+ }
+
+ if ( ( pass ) && ( (idx - lag + 9) >= 0 ) &&
+ ( (idx - lag + 9) < max_indices[type] ) &&
+ ( (idx - lag + 9) % 13 == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u-ro, %d, %d) ", type,
+ (idx - lag + 9));
+
+ unprotect_entry(cache_ptr, type, (idx - lag + 9),
+ FALSE, H5C__NO_FLAGS_SET);
+ }
+ } /* if ( do_mult_ro_protects ) */
+
if ( ( pass ) && ( idx >= 0 ) && ( idx <= max_indices[type] ) ) {
if ( verbose )
diff --git a/test/cache_common.h b/test/cache_common.h
index e2f8657..ed3b857 100644
--- a/test/cache_common.h
+++ b/test/cache_common.h
@@ -224,6 +224,12 @@ typedef struct test_entry_t
hbool_t is_protected; /* entry should currently be on
* the cache's protected list.
*/
+ hbool_t is_read_only; /* TRUE iff the entry should be
+ * protected read only.
+ */
+ int ro_ref_count; /* Number of outstanding read only
+ * protects on the entry.
+ */
hbool_t is_pinned; /* entry is currently pinned in
* the cache.
*/
@@ -622,6 +628,10 @@ void protect_entry(H5C_t * cache_ptr,
int32_t type,
int32_t idx);
+void protect_entry_ro(H5C_t * cache_ptr,
+ int32_t type,
+ int32_t idx);
+
hbool_t entry_in_cache(H5C_t * cache_ptr,
int32_t type,
int32_t idx);
@@ -658,6 +668,7 @@ void row_major_scan_forward(H5C_t * cache_ptr,
hbool_t do_renames,
hbool_t rename_to_main_addr,
hbool_t do_destroys,
+ hbool_t do_mult_ro_protects,
int dirty_destroys,
int dirty_unprotects);
@@ -681,6 +692,7 @@ void row_major_scan_backward(H5C_t * cache_ptr,
hbool_t do_renames,
hbool_t rename_to_main_addr,
hbool_t do_destroys,
+ hbool_t do_mult_ro_protects,
int dirty_destroys,
int dirty_unprotects);