summaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
authormainzer <mainzer#hdfgroup.org>2020-04-29 16:34:46 (GMT)
committerDavid Young <dyoung@hdfgroup.org>2020-05-21 14:51:39 (GMT)
commit18dab4e5767fcf9cdad2220d1a89f1ae7b002dd1 (patch)
treeaf43b990d3e5c90591c24974114ac0335df6ae25 /test
parentd5ad503cfe37263c903a2a755c464c42d67530c4 (diff)
downloadhdf5-18dab4e5767fcf9cdad2220d1a89f1ae7b002dd1.zip
hdf5-18dab4e5767fcf9cdad2220d1a89f1ae7b002dd1.tar.gz
hdf5-18dab4e5767fcf9cdad2220d1a89f1ae7b002dd1.tar.bz2
Modified page buffer to split entries only where necessary -- specifically
when handling an I/O request on a metadata entry that has been sub-allocated from a larger file space allocation (i.e. fixed and extensible array), and that crosses at least one page boundary. . This required modifying the metadata cache to provide the type of the metadata cache entry in the current I/O request. For now, this is done with a function call. Once we are sure this works, it may be appropriate to convert this to a macro, or to add a flags parameter to the H5F block read/write calls. Also updated the metadata cache to report whether a read request is speculative -- again via a function call. This allowed me to remove the last address static variable in the H5PB_read() call, which is necessary to support multiple files opened in VFD SWMR mode. Also re-wrote the H5PB_remove_entries() call to handle release of large metadata file space allocations that have been sub-allocated into multiple metadata entries. Also modified the call to H5PB_remove_entries() in H5MF__xfree_impl() to invoke it whenever the page buffer is enabled and the size of the space to be freed is of page size or larger. Tested serial / debug on charis and Jelly. Found a bug in H5MF_xfree_impl(), in which the call to H5PB_remove_entries() is skipped due to HGOTO_DONE calls earlier in the function. While the obvious action is to move the call earlier in the function, best to consult with Vailin first, as there is much going on and it would be best to avoid making the situation worse. If nothing else, there are some error management issues.
Diffstat (limited to 'test')
-rw-r--r--test/page_buffer.c1506
1 files changed, 1456 insertions, 50 deletions
diff --git a/test/page_buffer.c b/test/page_buffer.c
index 6b6de02..5da326e 100644
--- a/test/page_buffer.c
+++ b/test/page_buffer.c
@@ -24,6 +24,15 @@
#include "h5test.h"
+/*
+ * This file needs to access private information from the H5C package.
+ * This file also needs to access the metadata cache testing code.
+ */
+#define H5C_FRIEND /*suppress error about including H5Cpkg */
+#define H5C_TESTING /*suppress warning about H5C testing funcs*/
+#include "H5Cpkg.h" /* Cache */
+
+
#include "H5CXprivate.h" /* API Contexts */
#include "H5Iprivate.h"
#include "H5PBprivate.h"
@@ -65,6 +74,12 @@ static unsigned test_raw_data_handling(hid_t orig_fapl, const char *env_h5_drvr,
static unsigned test_lru_processing(hid_t orig_fapl, const char *env_h5_drvr);
static unsigned test_min_threshold(hid_t orig_fapl, const char *env_h5_drvr);
static unsigned test_stats_collection(hid_t orig_fapl, const char *env_h5_drvr);
+static unsigned md_entry_splitting_smoke_check(hid_t orig_fapl,
+ const char *env_h5_drvr, bool);
+static unsigned md_entry_splitting_boundary_test(hid_t orig_fapl,
+ const char *env_h5_drvr, bool);
+static unsigned verify_page_buffering_disabled(hid_t orig_fapl,
+ const char *env_h5_drvr);
#endif /* H5_HAVE_PARALLEL */
#define FILENAME "filepaged"
@@ -333,7 +348,8 @@ error:
HDfree(data);
} H5E_END_TRY;
return(1);
-}
+
+} /* create_file() */
/*-------------------------------------------------------------------------
@@ -488,7 +504,7 @@ set_multi_split(const char *env_h5_drvr, hid_t fapl, hsize_t pagesize)
error:
return 1;
-}
+} /* set_multi_split() */
#ifndef H5_HAVE_PARALLEL
@@ -807,7 +823,8 @@ error:
HDfree(odata);
} H5E_END_TRY;
return 1;
-}
+
+} /* test_mpmde_delay_basic() */
/*
@@ -1009,7 +1026,8 @@ error:
HDfree(odata);
} H5E_END_TRY;
return 1;
-}
+
+} /* test_spmde_lru_evict_basic() */
/*
@@ -1146,7 +1164,8 @@ error:
HDfree(odata);
} H5E_END_TRY;
return 1;
-}
+
+} /* test_spmde_delay_basic() */
/*
@@ -1179,6 +1198,19 @@ error:
* page buffer.
*
* JRM -- 10/26/18
+ *
+ * We have decided not to buffer raw data in the page buffer
+ * when operating in VFD SWMR mode. This is necessary as
+ * otherwise raw data can get stuck in the page buffer, thus
+ * delaying it's visibility to the reader.
+ *
+ * Obviously, there is a potential performance trade off
+ * here, but it shouldn't be significant in the expected
+ * VFD SWMR use cases. Needless to say, we will revisit this
+ * if necessary.
+ *
+ * JRM -- 4/8/20
+ *
*/
/* Changes due to file space page size has a minimum size of 512 */
@@ -1235,7 +1267,8 @@ test_raw_data_handling(hid_t orig_fapl, const char *env_h5_drvr,
TEST_ERROR;
/* allocate space for 2000 elements */
- if (HADDR_UNDEF == (addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int) * (size_t)num_elements)))
+ if (HADDR_UNDEF == (addr = H5MF_alloc(f, H5FD_MEM_DRAW,
+ sizeof(int) * (size_t)num_elements)))
FAIL_STACK_ERROR;
if ((data = (int *)HDcalloc((size_t)num_elements, sizeof(int))) == NULL)
@@ -1244,7 +1277,8 @@ test_raw_data_handling(hid_t orig_fapl, const char *env_h5_drvr,
/* initialize all the elements to have a value of -1 */
for(i=0 ; i<num_elements ; i++)
data[i] = -1;
- if (H5F_block_write(f, H5FD_MEM_DRAW, addr, sizeof(int) * (size_t)num_elements, data) < 0)
+ if (H5F_block_write(f, H5FD_MEM_DRAW, addr,
+ sizeof(int) * (size_t)num_elements, data) < 0)
FAIL_STACK_ERROR;
/* update the first 100 elements to have values 0-99 - this will be
@@ -1258,48 +1292,75 @@ test_raw_data_handling(hid_t orig_fapl, const char *env_h5_drvr,
page_count ++;
- if (f->shared->pb_ptr->curr_pages != page_count + base_page_cnt)
+ if ( ( f->shared->pb_ptr->curr_pages != page_count + base_page_cnt ) &&
+ ( ( vfd_swmr_mode ) &&
+ ( f->shared->pb_ptr->curr_pages != base_page_cnt ) ) )
FAIL_STACK_ERROR;
/* update elements 300 - 450, with values 300 - - this will
bring two more pages into the page buffer. */
for(i=0 ; i<150 ; i++)
data[i] = i+300;
- if (H5F_block_write(f, H5FD_MEM_DRAW, addr + (sizeof(int) * 300), sizeof(int) * 150, data) < 0)
+
+ if (H5F_block_write(f, H5FD_MEM_DRAW, addr + (sizeof(int) * 300),
+ sizeof(int) * 150, data) < 0)
FAIL_STACK_ERROR;
+
page_count += 2;
- if (f->shared->pb_ptr->curr_pages != page_count + base_page_cnt)
+
+ if ( ( f->shared->pb_ptr->curr_pages != page_count + base_page_cnt ) &&
+ ( ( vfd_swmr_mode ) &&
+ ( f->shared->pb_ptr->curr_pages != base_page_cnt ) ) )
FAIL_STACK_ERROR;
/* update elements 100 - 300, this will go to disk but also update
existing pages in the page buffer. */
for(i=0 ; i<200 ; i++)
data[i] = i+100;
- if (H5F_block_write(f, H5FD_MEM_DRAW, addr + (sizeof(int) * 100), sizeof(int) * 200, data) < 0)
+
+ if (H5F_block_write(f, H5FD_MEM_DRAW, addr + (sizeof(int) * 100),
+ sizeof(int) * 200, data) < 0)
FAIL_STACK_ERROR;
- if (f->shared->pb_ptr->curr_pages != page_count + base_page_cnt)
+
+ if ( ( f->shared->pb_ptr->curr_pages != page_count + base_page_cnt ) &&
+ ( ( vfd_swmr_mode ) &&
+ ( f->shared->pb_ptr->curr_pages != base_page_cnt ) ) )
FAIL_STACK_ERROR;
/* Update elements 225-300 - this will update an existing page in the PB */
/* Changes: 450 - 600; 150 */
for(i=0 ; i<150 ; i++)
data[i] = i+450;
- if (H5F_block_write(f, H5FD_MEM_DRAW, addr + (sizeof(int) * 450), sizeof(int) * 150, data) < 0)
+
+ if (H5F_block_write(f, H5FD_MEM_DRAW, addr + (sizeof(int) * 450),
+ sizeof(int) * 150, data) < 0)
FAIL_STACK_ERROR;
- if (f->shared->pb_ptr->curr_pages != page_count + base_page_cnt)
+
+ if ( ( f->shared->pb_ptr->curr_pages != page_count + base_page_cnt ) &&
+ ( ( vfd_swmr_mode ) &&
+ ( f->shared->pb_ptr->curr_pages != base_page_cnt ) ) )
FAIL_STACK_ERROR;
/* Do a full page write to block 600-800 - should bypass the PB */
for(i=0 ; i<200 ; i++)
data[i] = i+600;
- if (H5F_block_write(f, H5FD_MEM_DRAW, addr + (sizeof(int) * 600), sizeof(int) * 200, data) < 0)
+
+ if (H5F_block_write(f, H5FD_MEM_DRAW, addr + (sizeof(int) * 600),
+ sizeof(int) * 200, data) < 0)
FAIL_STACK_ERROR;
- if (f->shared->pb_ptr->curr_pages != page_count + base_page_cnt)
+
+ if ( ( f->shared->pb_ptr->curr_pages != page_count + base_page_cnt ) &&
+ ( ( vfd_swmr_mode ) &&
+ ( f->shared->pb_ptr->curr_pages != base_page_cnt ) ) )
FAIL_STACK_ERROR;
- /* read elements 800 - 1200, this should not affect the PB, and should read -1s */
- if (H5F_block_read(f, H5FD_MEM_DRAW, addr + (sizeof(int) * 800), sizeof(int) * 400, data) < 0)
+ /* read elements 800 - 1200, this should not affect the PB, and should
+ * read -1s
+ */
+ if (H5F_block_read(f, H5FD_MEM_DRAW, addr + (sizeof(int) * 800),
+ sizeof(int) * 400, data) < 0)
FAIL_STACK_ERROR;
+
for (i=0; i < 400; i++) {
if (data[i] != -1) {
HDfprintf(stderr, "Read different values than written\n");
@@ -1307,14 +1368,19 @@ test_raw_data_handling(hid_t orig_fapl, const char *env_h5_drvr,
FAIL_STACK_ERROR;
}
}
- if (f->shared->pb_ptr->curr_pages != page_count + base_page_cnt)
+
+ if ( ( f->shared->pb_ptr->curr_pages != page_count + base_page_cnt ) &&
+ ( ( vfd_swmr_mode ) &&
+ ( f->shared->pb_ptr->curr_pages != base_page_cnt ) ) )
FAIL_STACK_ERROR;
/* read elements 1200 - 1201, this should read -1 and bring in an
* entire page of addr 1200
*/
- if (H5F_block_read(f, H5FD_MEM_DRAW, addr + (sizeof(int) * 1200), sizeof(int) * 1, data) < 0)
+ if (H5F_block_read(f, H5FD_MEM_DRAW, addr + (sizeof(int) * 1200),
+ sizeof(int) * 1, data) < 0)
FAIL_STACK_ERROR;
+
for (i=0; i < 1; i++) {
if (data[i] != -1) {
HDfprintf(stderr, "Read different values than written\n");
@@ -1323,14 +1389,19 @@ test_raw_data_handling(hid_t orig_fapl, const char *env_h5_drvr,
}
}
page_count ++;
- if (f->shared->pb_ptr->curr_pages != page_count + base_page_cnt)
+
+ if ( ( f->shared->pb_ptr->curr_pages != page_count + base_page_cnt ) &&
+ ( ( vfd_swmr_mode ) &&
+ ( f->shared->pb_ptr->curr_pages != base_page_cnt ) ) )
TEST_ERROR;
/* read elements 175 - 225, this should use the PB existing pages */
/* Changes: 350 - 450 */
/* read elements 175 - 225, this should use the PB existing pages */
- if (H5F_block_read(f, H5FD_MEM_DRAW, addr + (sizeof(int) * 350), sizeof(int) * 100, data) < 0)
+ if (H5F_block_read(f, H5FD_MEM_DRAW, addr + (sizeof(int) * 350),
+ sizeof(int) * 100, data) < 0)
FAIL_STACK_ERROR;
+
for (i=0; i < 100; i++) {
if (data[i] != i + 350) {
HDfprintf(stderr, "Read different values than written\n");
@@ -1339,16 +1410,27 @@ test_raw_data_handling(hid_t orig_fapl, const char *env_h5_drvr,
TEST_ERROR;
}
}
- if (f->shared->pb_ptr->curr_pages != page_count + base_page_cnt)
+
+ if ( ( f->shared->pb_ptr->curr_pages != page_count + base_page_cnt ) &&
+ ( ( vfd_swmr_mode ) &&
+ ( f->shared->pb_ptr->curr_pages != base_page_cnt ) ) )
TEST_ERROR;
- /* read elements 0 - 800 using the VFD.. this should result in -1s
- except for the writes that went through the PB (100-300 & 600-800) */
- if (H5FD_read(f->shared->lf, H5FD_MEM_DRAW, addr, sizeof(int) * 800, data) < 0)
+ /* read elements 0 - 800 using the VFD.
+ *
+ * In the non-VFD SWMR case, this should result in -1s
+ * except for the writes that went through the PB (100-300 & 600-800)
+ *
+ * In the VFD SWMR case, the page buffer is bypassed for raw data,
+ * thus all writes should be visible.
+ */
+ if (H5FD_read(f->shared->lf, H5FD_MEM_DRAW, addr,
+ sizeof(int) * 800, data) < 0)
FAIL_STACK_ERROR;
+
i = 0;
while (i < 800) {
- if((i>=100 && i<300) || i >= 600) {
+ if((vfd_swmr_mode) || (i>=100 && i<300) || i >= 600) {
if (data[i] != i) {
HDfprintf(stderr, "Read different values than written\n");
HDfprintf(stderr, "data[%d] = %d, %d expected.\n",
@@ -1372,8 +1454,12 @@ test_raw_data_handling(hid_t orig_fapl, const char *env_h5_drvr,
*/
if (H5F_block_read(f, H5FD_MEM_DRAW, addr, sizeof(int) * 800, data) < 0)
FAIL_STACK_ERROR;
- if (f->shared->pb_ptr->curr_pages != page_count + base_page_cnt)
+
+ if ( ( f->shared->pb_ptr->curr_pages != page_count + base_page_cnt ) &&
+ ( ( vfd_swmr_mode ) &&
+ ( f->shared->pb_ptr->curr_pages != base_page_cnt ) ) )
TEST_ERROR;
+
for (i=0; i < 800; i++) {
if (data[i] != i) {
HDfprintf(stderr, "Read different values than written\n");
@@ -1389,10 +1475,16 @@ test_raw_data_handling(hid_t orig_fapl, const char *env_h5_drvr,
*/
for(i=0 ; i<1000 ; i++)
data[i] = 0;
- if (H5F_block_write(f, H5FD_MEM_DRAW, addr + (sizeof(int) * 400), sizeof(int) * 1000, data) < 0)
+
+ if (H5F_block_write(f, H5FD_MEM_DRAW, addr + (sizeof(int) * 400),
+ sizeof(int) * 1000, data) < 0)
FAIL_STACK_ERROR;
+
page_count -= 2;
- if (f->shared->pb_ptr->curr_pages != page_count + base_page_cnt)
+
+ if ( ( f->shared->pb_ptr->curr_pages != page_count + base_page_cnt ) &&
+ ( ( vfd_swmr_mode ) &&
+ ( f->shared->pb_ptr->curr_pages != base_page_cnt ) ) )
TEST_ERROR;
/* read elements 0 - 1000.. this should go to disk then update the
@@ -1400,6 +1492,7 @@ test_raw_data_handling(hid_t orig_fapl, const char *env_h5_drvr,
*/
if (H5F_block_read(f, H5FD_MEM_DRAW, addr, sizeof(int) * 1000, data) < 0)
FAIL_STACK_ERROR;
+
i=0;
while (i < 1000) {
if(i<400) {
@@ -1420,7 +1513,10 @@ test_raw_data_handling(hid_t orig_fapl, const char *env_h5_drvr,
}
i++;
}
- if (f->shared->pb_ptr->curr_pages != page_count + base_page_cnt)
+
+ if ( ( f->shared->pb_ptr->curr_pages != page_count + base_page_cnt ) &&
+ ( ( vfd_swmr_mode ) &&
+ ( f->shared->pb_ptr->curr_pages != base_page_cnt ) ) )
TEST_ERROR;
if (H5Fclose(file_id) < 0)
@@ -2670,22 +2766,24 @@ test_stats_collection(hid_t orig_fapl, const char *env_h5_drvr)
sizeof(int)*100, data) < 0)
FAIL_STACK_ERROR;
- if ( ( f->shared->pb_ptr->accesses[0] != 9 ) ||
+ /* was 9, 16, 0 -- review this */
+ if ( ( f->shared->pb_ptr->accesses[0] != 10 ) ||
( f->shared->pb_ptr->accesses[1] != 16 ) ||
( f->shared->pb_ptr->accesses[2] != 0 ) ) {
- HDfprintf(stderr, "accesses[] = {%d, %d, %d}. {9, 16, 0} expected\n",
+ HDfprintf(stderr, "accesses[] = {%d, %d, %d}. {10, 16, 0} expected\n",
f->shared->pb_ptr->accesses[0],
f->shared->pb_ptr->accesses[1],
f->shared->pb_ptr->accesses[2]);
TEST_ERROR;
}
- if ( ( f->shared->pb_ptr->bypasses[0] != 2 ) ||
+ /* was 2, 1, 1 -- review this */
+ if ( ( f->shared->pb_ptr->bypasses[0] != 0 ) ||
( f->shared->pb_ptr->bypasses[1] != 1 ) ||
( f->shared->pb_ptr->bypasses[2] != 1 ) ) {
- HDfprintf(stderr, "bypasses[] = {%d, %d, %d}. {2, 1, 1} expected\n",
+ HDfprintf(stderr, "bypasses[] = {%d, %d, %d}. {0, 1, 1} expected\n",
f->shared->pb_ptr->bypasses[0],
f->shared->pb_ptr->bypasses[1],
f->shared->pb_ptr->bypasses[2]);
@@ -2703,18 +2801,20 @@ test_stats_collection(hid_t orig_fapl, const char *env_h5_drvr)
TEST_ERROR;
}
- if ( ( f->shared->pb_ptr->misses[0] != 9 ) ||
+ /* was 9, 16. 0 -- review this */
+ if ( ( f->shared->pb_ptr->misses[0] != 10 ) ||
( f->shared->pb_ptr->misses[1] != 16 ) ||
( f->shared->pb_ptr->misses[2] != 0 ) ) {
- HDfprintf(stderr, "misses[] = {%d, %d, %d}. {9, 16, 0} expected\n",
+ HDfprintf(stderr, "misses[] = {%d, %d, %d}. {10, 16, 0} expected\n",
f->shared->pb_ptr->misses[0],
f->shared->pb_ptr->misses[1],
f->shared->pb_ptr->misses[2]);
TEST_ERROR;
}
- if ( ( f->shared->pb_ptr->evictions[0] != 7) ||
+ /* was 7, 9, 0 -- review this */
+ if ( ( f->shared->pb_ptr->evictions[0] != 9) ||
( f->shared->pb_ptr->evictions[1] != 9) ||
( f->shared->pb_ptr->evictions[2] != 0 ) ) {
@@ -2736,17 +2836,19 @@ test_stats_collection(hid_t orig_fapl, const char *env_h5_drvr)
evictions, bypasses) < 0)
FAIL_STACK_ERROR;
- if ( ( accesses[0] != 9 ) ||
+ /* was 9, 16, 0 -- review this */
+ if ( ( accesses[0] != 10 ) ||
( accesses[1] != 16 ) ||
( accesses[2] != 0 ) ) {
HDfprintf(stderr,
- "accesses[] = {%d, %d, %d}. {9, 16, 0} expected\n",
+ "accesses[] = {%d, %d, %d}. {10, 16, 0} expected\n",
accesses[0], accesses[1], accesses[2]);
TEST_ERROR;
}
- if ( ( bypasses[0] != 2 ) ||
+ /* was 2, 1, 1 -- review this */
+ if ( ( bypasses[0] != 0 ) ||
( bypasses[1] != 1 ) ||
( bypasses[2] != 1 ) ) {
@@ -2764,22 +2866,24 @@ test_stats_collection(hid_t orig_fapl, const char *env_h5_drvr)
TEST_ERROR;
}
- if ( ( misses[0] != 9 ) ||
+ /* was 9, 16. 0 -- review this */
+ if ( ( misses[0] != 10 ) ||
( misses[1] != 16 ) ||
( misses[2] != 0 ) ) {
- HDfprintf(stderr, "misses[] = {%d, %d, %d}. {9, 16, 0} expected\n",
+ HDfprintf(stderr, "misses[] = {%d, %d, %d}. {10, 16, 0} expected\n",
misses[0], misses[1], misses[2]);
TEST_ERROR;
}
- if ( ( evictions[0] != 7 ) ||
+ /* was 9, 9, 0 -- review this */
+ if ( ( evictions[0] != 9 ) ||
( evictions[1] != 9 ) ||
( evictions[2] != 0 ) ) {
HDfprintf(stderr,
- "evictions[] = {%d, %d, %d}. {%d, %d, 0} expected\n",
- evictions[0], evictions[1], evictions[2], 7, 9);
+ "evictions[] = {%d, %d, %d}. {9, 9, 0} expected\n",
+ evictions[0], evictions[1], evictions[2]);
TEST_ERROR;
}
@@ -2955,10 +3059,1307 @@ error:
return 1;
-}
+} /* verify_page_buffering_disabled() */
+
#endif /* H5_HAVE_PARALLEL */
+/*************************************************************************
+ *
+ * Function: md_entry_splitting_smoke_check()
+ *
+ * Purpose: Normally, file space for metadata entries is allocated
+ * indvidually. In the context of paged allocation, this
+ * ensures that all entries that cross page boundaries start
+ * on a page boundary, and that any space between the end of
+ * a multi-page metadata entry and the next page boundary
+ * is un-used.
+ *
+ * In the context of VFD SWMR, this fact along with atomic
+ * metadata entry I/O is used to minimize the size of the
+ * index in the metadata file, and to optimize metadata
+ * metadata reads on the VFD SWMR reader side. It is also
+ * used as a simplifying assumption in normal page buffer
+ * operation.
+ *
+ * Unfortunately, it turns out that some metadata cache
+ * clients (H5FA & H5EA) allocate the needed file space in
+ * a single block, and sub-allocate space for individual
+ * entries out of this block.
+ *
+ * While this is a design flaw from the perspective
+ * VFD SWMR, repairing the issue no feasible at this time,
+ * and in any case, there will always be the issue of
+ * existing files.
+ *
+ * Thus, for now at least, the page buffer has to code around
+ * the issue when operating in VFD SWMR mode.
+ *
+ * It does this by examining metadata I/O requests that
+ * cross page boundaries, and querying the metadata cache
+ * for the ID of the associated cache client.
+ *
+ * If the request is associated with a cache client that
+ * that uses sub-allocation, the I/O request must be broken
+ * into the minimal number of sub-requests such that each
+ * request either doesn't cross page boundaries, or is
+ * page aligned, and of length equal to some multiple of
+ * the page size.
+ *
+ * This test exists to verify that such entries are read
+ * and written correctly.
+ *
+ * Note that it does not concern itself with verifying
+ * the correct handling of the split I/O requests, as
+ * the split is done immediately upon receipt, and each
+ * of the sub-requests is treated as a normal metadata
+ * I/O request.
+ *
+ * Note that this test requires us to modify the page buffer
+ * hint fields in the metadata cache to trick it into
+ * re-laying the desired hints to the page buffer, even
+ * though it is not generating the I/O requests in this
+ * test.
+ *
+ * Return: 0 if test is sucessful
+ * 1 if test fails
+ *
+ * Programmer: John Mainzer
+ * 4/9/20
+ *
+ * Changes: None.
+ *
+ *************************************************************************/
+
+#define HDR_SIZE 40
+#define MD_PAGE_SIZE 250
+#define TOT_SYNTH_ENTRY_SIZES (HDR_SIZE + (3 * MD_PAGE_SIZE))
+
+static unsigned
+md_entry_splitting_smoke_check(hid_t orig_fapl, const char *env_h5_drvr,
+ bool vfd_swmr_mode)
+{
+ char filename[FILENAME_LEN]; /* Filename to use */
+ hid_t file_id = -1; /* File ID */
+ hid_t fcpl = -1;
+ hid_t fapl = -1;
+ int i;
+ int * synth_md_vals = NULL;
+ int * synth_md_test_buf = NULL;
+ haddr_t base_addr;
+ haddr_t p0_addr;
+ haddr_t p1_addr;
+ haddr_t p2_addr;
+ H5F_t *f = NULL;
+ const uint32_t max_lag = 5;
+
+ TESTING("%sMetadata Entry Splitting Smoke Check", \
+ vfd_swmr_mode ? "VFD SWMR " : "");
+
+ h5_fixname(namebase, orig_fapl, filename, sizeof(filename));
+
+ if ((fapl = H5Pcopy(orig_fapl)) < 0)
+ TEST_ERROR;
+
+ if (set_multi_split(env_h5_drvr, fapl, sizeof(int) * 200) != 0)
+ TEST_ERROR;
+
+ if ((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 0, 1) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_file_space_page_size(fcpl, (size_t)1000) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_page_buffer_size(fapl, sizeof(int) * 2000, 0, 0) < 0)
+ TEST_ERROR;
+
+ if (vfd_swmr_mode && swmr_fapl_augment(fapl, filename, max_lag) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5VL_object(file_id)))
+ FAIL_STACK_ERROR;
+
+ /* The objective is to perform a quick smoke check on I/O of metadata
+ * entries that have been sub-allocated out of a larger space allocation.
+ * We do this by simulating a structure similar to elements of the
+ * fixed array on disk structure. Specifically, we create a synthetic
+ * set of metadata entries that are allocated out of a single allocation
+ * from the free space manager, and perform several reads and writes to
+ * verify expected behaviour.
+ *
+ * The synthetic set of metadata entries are constucted of integers
+ * so as to allow easy assignement of unique values. It is constructed
+ * as follows:
+ *
+ * size values: addr:
+ * (ints)
+ *
+ * header: 40 0, 1, ... 39 base_addr
+ * page 0: 250 1040, 1041, ... 1289 base_addr + 40 * sizeof(int)
+ * page 1: 250 2290, 2291, ... 2539 base_addr + 290 * sizeof(int)
+ * page 2: 250 3540, 3541, ... 3789 base_addr + 540 * sizeof(int)
+ *
+ * The overall size of the compound metadata entry is 395 * sizeof(int).
+ * Since we use a page size of 100 * sizeof(int), this system of synthetic
+ * metadata entries spans four pages.
+ */
+
+ /* allocate the buffers needed for the synthetic md entry test */
+ if ( (synth_md_vals = (int *)HDcalloc((size_t)TOT_SYNTH_ENTRY_SIZES,
+ sizeof(int))) == NULL )
+ TEST_ERROR
+
+ if ( (synth_md_test_buf = (int *)HDcalloc((size_t)TOT_SYNTH_ENTRY_SIZES,
+ sizeof(int))) == NULL )
+ TEST_ERROR
+
+ /* allocate file space for the synthetic metadata entries and
+ * compute their addresses.
+ */
+ if (HADDR_UNDEF ==
+ (base_addr = H5MF_alloc(f, H5FD_MEM_BTREE,
+ sizeof(int) * (size_t)(TOT_SYNTH_ENTRY_SIZES))))
+ FAIL_STACK_ERROR;
+
+ p0_addr = base_addr + (haddr_t)(sizeof(int) * HDR_SIZE);
+ p1_addr = p0_addr + (haddr_t)(sizeof(int) * MD_PAGE_SIZE);
+ p2_addr = p1_addr + (haddr_t)(sizeof(int) * MD_PAGE_SIZE);
+
+
+ /* Set all cells in synth_md_vals[] to -1 and write directly to
+ * the underlying file via an H5FD call. This gives us a known
+ * set of values in the underlying file.
+ */
+ for ( i = 0; i < TOT_SYNTH_ENTRY_SIZES; i++) {
+
+ synth_md_vals[i] = -1;
+ }
+
+ if ( H5FD_write(f->shared->lf, H5FD_MEM_BTREE, base_addr,
+ sizeof(int) * TOT_SYNTH_ENTRY_SIZES, synth_md_vals) < 0)
+ FAIL_STACK_ERROR;
+
+ /* touch up the metadata cache so that it will report that a metadata
+ * entry that was sub-allocated out of a larger file space allocation
+ * is the source of the current metadata I/O operation.
+ */
+ H5C_set_curr_io_type_splitable(f->shared->cache, TRUE);
+
+ /* initialize the buffer with the values of the synthetic metadata
+ * entries.
+ */
+ for ( i = 0; i < TOT_SYNTH_ENTRY_SIZES; i++ ) {
+
+ synth_md_vals[i] = i;
+
+ if ( i > HDR_SIZE ) {
+ synth_md_vals[i] += 1000;
+ }
+
+ if ( i > HDR_SIZE + MD_PAGE_SIZE ) {
+ synth_md_vals[i] += 1000;
+ }
+
+ if ( i > HDR_SIZE + MD_PAGE_SIZE + MD_PAGE_SIZE ) {
+ synth_md_vals[i] += 1000;
+ }
+
+ }
+
+ /* write the header */
+ if (H5F_block_write(f, H5FD_MEM_BTREE, base_addr,
+ sizeof(int) * (size_t)HDR_SIZE, synth_md_vals) < 0)
+ FAIL_STACK_ERROR;
+
+ /* read the header */
+ if (H5F_block_read(f, H5FD_MEM_BTREE, base_addr,
+ sizeof(int) * (size_t)HDR_SIZE, synth_md_test_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* write page 0 */
+ if (H5F_block_write(f, H5FD_MEM_BTREE, p0_addr,
+ sizeof(int) * (size_t)MD_PAGE_SIZE,
+ &(synth_md_vals[HDR_SIZE])) < 0)
+ FAIL_STACK_ERROR;
+
+ /* read page 0 */
+ if (H5F_block_read(f, H5FD_MEM_BTREE, p0_addr,
+ sizeof(int) * (size_t)MD_PAGE_SIZE,
+ &(synth_md_test_buf[HDR_SIZE])) < 0)
+ FAIL_STACK_ERROR;
+
+ /* write page 1 */
+ if (H5F_block_write(f, H5FD_MEM_BTREE, p1_addr,
+ sizeof(int) * (size_t)MD_PAGE_SIZE,
+ &(synth_md_vals[HDR_SIZE + MD_PAGE_SIZE])) < 0)
+ FAIL_STACK_ERROR;
+
+ /* read page 1 */
+ if (H5F_block_read(f, H5FD_MEM_BTREE, p1_addr,
+ sizeof(int) * (size_t)MD_PAGE_SIZE,
+ &(synth_md_test_buf[HDR_SIZE + MD_PAGE_SIZE])) < 0)
+ FAIL_STACK_ERROR;
+
+ /* write page 2 */
+ if (H5F_block_write(f, H5FD_MEM_BTREE, p2_addr,
+ sizeof(int) * (size_t)MD_PAGE_SIZE,
+ &(synth_md_vals[HDR_SIZE + 2 * MD_PAGE_SIZE])) < 0)
+ FAIL_STACK_ERROR;
+
+ /* read page 2 */
+ if (H5F_block_read(f, H5FD_MEM_BTREE, p2_addr,
+ sizeof(int) * (size_t)MD_PAGE_SIZE,
+ &(synth_md_test_buf[HDR_SIZE + 2 * MD_PAGE_SIZE])) < 0)
+ FAIL_STACK_ERROR;
+
+ /* verify reads */
+ for ( i = 0; i < TOT_SYNTH_ENTRY_SIZES; i++ ) {
+
+ if ( synth_md_vals[i] != synth_md_test_buf[i] ) {
+
+ HDfprintf(stderr, "(1) unexpected read %d: val %d -- %d expected\n",
+ i, synth_md_test_buf[i], synth_md_vals[i]);
+ TEST_ERROR;
+ }
+ }
+
+ /* zero the test buffer, do the reads again in reverse order, and verify */
+
+ for ( i = 0; i < TOT_SYNTH_ENTRY_SIZES; i++) {
+
+ synth_md_test_buf[i] = 0;
+ }
+
+ /* read page 2 */
+ if (H5F_block_read(f, H5FD_MEM_BTREE, p2_addr,
+ sizeof(int) * (size_t)MD_PAGE_SIZE,
+ &(synth_md_test_buf[HDR_SIZE + 2 * MD_PAGE_SIZE])) < 0)
+ FAIL_STACK_ERROR;
+
+ /* read page 1 */
+ if (H5F_block_read(f, H5FD_MEM_BTREE, p1_addr,
+ sizeof(int) * (size_t)MD_PAGE_SIZE,
+ &(synth_md_test_buf[HDR_SIZE + MD_PAGE_SIZE])) < 0)
+ FAIL_STACK_ERROR;
+
+ /* read page 0 */
+ if (H5F_block_read(f, H5FD_MEM_BTREE, p0_addr,
+ sizeof(int) * (size_t)MD_PAGE_SIZE,
+ &(synth_md_test_buf[HDR_SIZE])) < 0)
+ FAIL_STACK_ERROR;
+
+ /* read the header */
+ if (H5F_block_read(f, H5FD_MEM_BTREE, base_addr,
+ sizeof(int) * (size_t)HDR_SIZE, synth_md_test_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* verify reads again */
+ for ( i = 0; i < TOT_SYNTH_ENTRY_SIZES; i++ ) {
+
+ if ( synth_md_vals[i] != synth_md_test_buf[i] ) {
+
+ HDfprintf(stderr, "(2) unexpected read %d: val %d -- %d expected\n",
+ i, synth_md_test_buf[i], synth_md_vals[i]);
+ TEST_ERROR;
+ }
+ }
+
+ /* Undo the touchup of the metadata cache */
+ H5C_set_curr_io_type_splitable(f->shared->cache, FALSE);
+
+ /* free the test buffers */
+ HDfree(synth_md_vals);
+ HDfree(synth_md_test_buf);
+
+ if (H5Fclose(file_id) < 0)
+ FAIL_STACK_ERROR;
+ if (H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR;
+ if (H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED();
+ return 0;
+
+error:
+
+ /* Undo the touchup of the metadata cache */
+ if ( ( f ) && ( f->shared ) && ( f->shared->cache) )
+ H5C_set_curr_io_type_splitable(f->shared->cache, FALSE);
+
+ if ( synth_md_vals )
+ HDfree(synth_md_vals);
+
+ if ( synth_md_test_buf )
+ HDfree(synth_md_test_buf);
+
+ H5E_BEGIN_TRY {
+ if (fapl != H5I_INVALID_HID)
+ H5Pclose(fapl);
+ if (fcpl != H5I_INVALID_HID)
+ H5Pclose(fcpl);
+ if (file_id != H5I_INVALID_HID)
+ H5Fclose(file_id);
+ } H5E_END_TRY;
+ return 1;
+
+} /* md_entry_splitting_smoke_check() */
+
+#undef HDR_SIZE
+#undef MD_PAGE_SIZE
+#undef TOT_SYNTH_ENTRY_SIZES
+
+
+/*************************************************************************
+ *
+ * Function: md_entry_splitting_boundary_test()
+ *
+ * Purpose: Test to verify that I/O request splitting performs as
+ * as expected in various boundary conditions.
+ *
+ * The above md_entry_splitting_smoke_check() was directed
+ * at verifying that the page buffer behaved as expected
+ * in something approaching a typical use case.
+ *
+ * This test is directed at verifying that entries are
+ * split correctly under a variety of conditions that
+ * are unlikely unless the user chooses at odd page size.
+ *
+ * Return: 0 if test is sucessful
+ * 1 if test fails
+ *
+ * Programmer: John Mainzer
+ * 4/12/20
+ *
+ * Changes: None.
+ *
+ *************************************************************************/
+
+
+static unsigned
+md_entry_splitting_boundary_test(hid_t orig_fapl, const char *env_h5_drvr,
+ bool vfd_swmr_mode)
+{
+ char filename[FILENAME_LEN]; /* Filename to use */
+ hid_t file_id = -1; /* File ID */
+ hid_t fcpl = -1;
+ hid_t fapl = -1;
+ int64_t base_page_cnt;
+ int i;
+ H5F_t *f = NULL;
+ const uint32_t max_lag = 5;
+ size_t page_size = (size_t)512;
+ int pages_allocated = 32;
+ size_t alloc_size;
+ uint8_t * write_buf = NULL;
+ uint8_t * read_buf = NULL;
+ haddr_t base_addr = HADDR_UNDEF;
+ haddr_t first_page_addr = HADDR_UNDEF;
+ haddr_t start_addr = HADDR_UNDEF;
+ size_t test_len;
+
+ TESTING("%sMetadata Entry Splitting Boundary Test", \
+ vfd_swmr_mode ? "VFD SWMR " : "");
+
+ h5_fixname(namebase, orig_fapl, filename, sizeof(filename));
+
+ if ((fapl = H5Pcopy(orig_fapl)) < 0)
+ TEST_ERROR
+
+ if (set_multi_split(env_h5_drvr, fapl, sizeof(int) * 200) != 0)
+ TEST_ERROR;
+
+ if ((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 0, 1) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_file_space_page_size(fcpl, page_size) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_page_buffer_size(fapl, 32 * page_size, 0, 0) < 0)
+ TEST_ERROR;
+
+ if (vfd_swmr_mode && swmr_fapl_augment(fapl, filename, max_lag) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5VL_object(file_id)))
+ FAIL_STACK_ERROR;
+
+ /* opening the file inserts one or more pages into the page buffer.
+ * Get the number of pages inserted, and verify that it is the
+ * expected value.
+ */
+ base_page_cnt = f->shared->pb_ptr->curr_pages;
+ if (base_page_cnt != 1)
+ TEST_ERROR;
+
+ /* Test the folowing cases:
+ *
+ * 1) splittable md entry that is page aligned and exactly one
+ * page long.
+ *
+ * 2) splittable md entry that is page aligned and exactly two
+ * pages long
+ *
+ * 3) splittable md entry that is page aligned and is exactly one
+ * page and one byte long.
+ *
+ * 4) splittable md entry that is exactly one page and one byte
+ * long, and starts one byte before a page bundary.
+ *
+ * 5) splittable md entry that is exactly one page and two bytes
+ * long, and starts one byte before a page boundary.
+ *
+ * 6) splittable md entry that is two bytes long, and starts one
+ * byte before a page boundary.
+ *
+ * 7) splittable md entry that is page aligned and is exactly two
+ * pages and one byte long.
+ *
+ * 8) splittable md entry that is exactly two pages and one byte
+ * long, and starts one byte before a page bundary.
+ *
+ * 9) splittable md entry that is exactly two pages and two bytes
+ * long, and starts one byte before a page boundary.
+ *
+ */
+ alloc_size = page_size * (size_t)pages_allocated;
+
+ /* allocate the buffers needed for the synthetic md entry test */
+ if ((write_buf = (uint8_t *)HDcalloc(alloc_size, sizeof(uint8_t))) == NULL)
+ TEST_ERROR
+
+ if ((read_buf = (uint8_t *)HDcalloc(alloc_size, sizeof(uint8_t))) == NULL)
+ TEST_ERROR
+
+ /* allocate file space for the tests */
+ if (HADDR_UNDEF == (base_addr = H5MF_alloc(f, H5FD_MEM_SUPER, alloc_size)))
+ FAIL_STACK_ERROR;
+
+ /* Set all cells write_buf[] to 0 and write directly to
+ * the underlying file via an H5FD call. This gives us a known
+ * set of values in the underlying file.
+ */
+ for ( i = 0; i < (int)alloc_size; i++) {
+
+ write_buf[i] = 0;
+ }
+
+ if ( H5FD_write(f->shared->lf, H5FD_MEM_SUPER, base_addr,
+ alloc_size, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* touch up the metadata cache so that it will report that a metadata
+ * entry that was sub-allocated out of a larger file space allocation
+ * is the source of the current metadata I/O operation.
+ */
+ H5C_set_curr_io_type_splitable(f->shared->cache, TRUE);
+
+
+ /* 1) splittable md entry that is page aligned and exactly one
+ * page long.
+ *
+ * Should not register as a split I/O.
+ *
+ * Should log 4 metadata accesses.
+ * should log 3 metadata hits
+ * should log 1 metadata misses
+ * should log 1 metadata loads
+ * should log 1 metadata insertions
+ *
+ * Note that exposes an inefficiency in the page buffer, as page
+ * aligned I/O requests of exactly oen page in length really should
+ * bypass the page buffer.
+ *
+ * This should be fixed, but I am bypassing it for now.
+ *
+ * JRM -- 4/18/20
+ */
+ first_page_addr = base_addr;
+ start_addr = base_addr;
+ test_len = page_size;
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 1;
+
+ if ( H5PB_reset_stats(f->shared->pb_ptr) < 0 )
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "1.1) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 2;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "1.2) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ if ( ( f->shared->pb_ptr->md_read_splits != 0 ) ||
+ ( f->shared->pb_ptr->md_write_splits != 0 ) )
+ TEST_ERROR;
+
+ if ( ( f->shared->pb_ptr->accesses[H5PB__STATS_MD] != 4 ) ||
+ ( f->shared->pb_ptr->hits[H5PB__STATS_MD] != 3 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MD] != 1 ) ||
+ ( f->shared->pb_ptr->loads[H5PB__STATS_MD] != 1 ) ||
+ ( f->shared->pb_ptr->insertions[H5PB__STATS_MD] != 1 ) )
+ TEST_ERROR;
+
+
+ /* 2) splittable md entry that is page aligned and exactly two
+ * pages long
+ *
+ * Should not register as a split I/O.
+ *
+ * if vfd_swmr_mode
+ *
+ * Should log 0 multi-page metadata bypasses.
+ * Should log 4 multi-page metadata accesses.
+ * should log 3 multi-page metadata hits
+ * should log 1 multi-page metadata misses
+ * should log 0 multi-page metadata loads
+ * should log 1 multi-page metadata insertions
+ *
+ * else
+ *
+ * Should log 4 multi-page metadata bypasses.
+ * Should log 0 multi-page metadata accesses.
+ * should log 0 multi-page metadata hits
+ * should log 2 multi-page metadata misses
+ * should log 0 multi-page metadata loads
+ * should log 0 multi-page metadata insertions
+ *
+ * The misses in the normal operating mode could be avoided.
+ */
+ first_page_addr = base_addr + (haddr_t)(page_size);
+ start_addr = first_page_addr;
+ test_len = 3 * page_size;
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 3;
+
+ if ( H5PB_reset_stats(f->shared->pb_ptr) < 0 )
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "2.1) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 4;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "2.2) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ if ( ( f->shared->pb_ptr->md_read_splits != 0 ) ||
+ ( f->shared->pb_ptr->md_write_splits != 0 ) )
+ TEST_ERROR;
+
+ if ( vfd_swmr_mode ) {
+ if ( ( f->shared->pb_ptr->bypasses[H5PB__STATS_MPMDE] != 0 ) ||
+ ( f->shared->pb_ptr->accesses[H5PB__STATS_MPMDE] != 4 ) ||
+ ( f->shared->pb_ptr->hits[H5PB__STATS_MPMDE] != 3 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MPMDE] != 1 ) ||
+ ( f->shared->pb_ptr->loads[H5PB__STATS_MPMDE] != 0 ) ||
+ ( f->shared->pb_ptr->insertions[H5PB__STATS_MPMDE] != 1 ) )
+ TEST_ERROR;
+
+ } else {
+ if ( ( f->shared->pb_ptr->bypasses[H5PB__STATS_MPMDE] != 4 ) ||
+ ( f->shared->pb_ptr->accesses[H5PB__STATS_MPMDE] != 0 ) ||
+ ( f->shared->pb_ptr->hits[H5PB__STATS_MPMDE] != 0 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MPMDE] != 2 ) ||
+ ( f->shared->pb_ptr->loads[H5PB__STATS_MPMDE] != 0 ) ||
+ ( f->shared->pb_ptr->insertions[H5PB__STATS_MPMDE] != 0 ) )
+ TEST_ERROR;
+ }
+
+
+ /* 3) splittable md entry that is page aligned and is exactly one
+ * page and one byte long.
+ *
+ * Should register 2 metadata read splits
+ * Should register 2 metadata write splits
+ *
+ * Should log 0 metadata bypasses.
+ * Should log 8 metadata accesses.
+ * should log 6 metadata hits
+ * should log 2 metadata misses
+ * should log 2 metadata loads
+ * should log 2 metadata insertions
+ */
+ first_page_addr = base_addr + (haddr_t)(3 * page_size);
+ start_addr = first_page_addr;
+ test_len = page_size + 1;
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 5;
+
+ if ( H5PB_reset_stats(f->shared->pb_ptr) < 0 )
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "3.1) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 6;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "3.2) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ if ( ( f->shared->pb_ptr->md_read_splits != 2 ) ||
+ ( f->shared->pb_ptr->md_write_splits != 2 ) )
+ TEST_ERROR;
+
+ if ( ( f->shared->pb_ptr->bypasses[H5PB__STATS_MD] != 0 ) ||
+ ( f->shared->pb_ptr->accesses[H5PB__STATS_MD] != 8 ) ||
+ ( f->shared->pb_ptr->hits[H5PB__STATS_MD] != 6 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MD] != 2 ) ||
+ ( f->shared->pb_ptr->loads[H5PB__STATS_MD] != 2 ) ||
+ ( f->shared->pb_ptr->insertions[H5PB__STATS_MD] != 2 ) )
+ TEST_ERROR;
+
+
+ /* 4) splittable md entry that is exactly one page and one byte
+ * long, and starts one byte before a page bundary.
+ *
+ * Should register 2 metadata read splits
+ * Should register 2 metadata write splits
+ *
+ * Should log 0 metadata bypasses.
+ * Should log 8 metadata accesses.
+ * should log 6 metadata hits
+ * should log 2 metadata misses
+ * should log 2 metadata loads
+ * should log 2 metadata insertions
+ *
+ */
+ first_page_addr = base_addr + (haddr_t)(5 * page_size);
+ start_addr = first_page_addr + (haddr_t)(page_size - 1);;
+ test_len = page_size + 1;
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 7;
+
+ if ( H5PB_reset_stats(f->shared->pb_ptr) < 0 )
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if ( f->shared->pb_ptr->md_write_splits != 1 )
+ TEST_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if ( f->shared->pb_ptr->md_read_splits != 1 )
+ TEST_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "4.1) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 8;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "4.2) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ if ( ( f->shared->pb_ptr->md_read_splits != 2 ) ||
+ ( f->shared->pb_ptr->md_write_splits != 2 ) )
+ TEST_ERROR;
+
+ if ( ( f->shared->pb_ptr->bypasses[H5PB__STATS_MD] != 0 ) ||
+ ( f->shared->pb_ptr->accesses[H5PB__STATS_MD] != 8 ) ||
+ ( f->shared->pb_ptr->hits[H5PB__STATS_MD] != 6 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MD] != 2 ) ||
+ ( f->shared->pb_ptr->loads[H5PB__STATS_MD] != 2 ) ||
+ ( f->shared->pb_ptr->insertions[H5PB__STATS_MD] != 2 ) )
+ TEST_ERROR;
+
+
+ /* 5) splittable md entry that is exactly one page and two bytes
+ * long, and starts one byte before a page boundary.
+ *
+ * Should register 2 metadata read splits
+ * Should register 2 metadata write splits
+ *
+ * Should log 0 metadata bypasses.
+ * Should log 12 metadata accesses.
+ * should log 9 metadata hits
+ * should log 3 metadata misses
+ * should log 3 metadata loads
+ * should log 3 metadata insertions
+ */
+ first_page_addr = base_addr + (haddr_t)(8 * page_size);
+ start_addr = first_page_addr + (haddr_t)(page_size - 1);;
+ test_len = page_size + 2;
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 9;
+
+ if ( H5PB_reset_stats(f->shared->pb_ptr) < 0 )
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "5.1) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 10;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "5.2) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ if ( ( f->shared->pb_ptr->md_read_splits != 2 ) ||
+ ( f->shared->pb_ptr->md_write_splits != 2 ) )
+ TEST_ERROR;
+
+ if ( ( f->shared->pb_ptr->bypasses[H5PB__STATS_MD] != 0 ) ||
+ ( f->shared->pb_ptr->accesses[H5PB__STATS_MD] != 12 ) ||
+ ( f->shared->pb_ptr->hits[H5PB__STATS_MD] != 9 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MD] != 3 ) ||
+ ( f->shared->pb_ptr->loads[H5PB__STATS_MD] != 3 ) ||
+ ( f->shared->pb_ptr->insertions[H5PB__STATS_MD] != 3 ) )
+ TEST_ERROR;
+
+
+ /* 6) splittable md entry that is two bytes long, and starts one
+ * byte before a page boundary.
+ *
+ * Should register 2 metadata read splits
+ * Should register 2 metadata write splits
+ *
+ * Should log 0 metadata bypasses.
+ * Should log 8 metadata accesses.
+ * should log 6 metadata hits
+ * should log 2 metadata misses
+ * should log 2 metadata loads
+ * should log 2 metadata insertions
+ */
+ first_page_addr = base_addr + (haddr_t)(11 * page_size);
+ start_addr = first_page_addr + (haddr_t)(page_size - 1);;
+ test_len = 2;
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 11;
+
+ if ( H5PB_reset_stats(f->shared->pb_ptr) < 0 )
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "6.1) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 12;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "6.2) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ if ( ( f->shared->pb_ptr->md_read_splits != 2 ) ||
+ ( f->shared->pb_ptr->md_write_splits != 2 ) )
+ TEST_ERROR;
+
+ if ( ( f->shared->pb_ptr->bypasses[H5PB__STATS_MD] != 0 ) ||
+ ( f->shared->pb_ptr->accesses[H5PB__STATS_MD] != 8 ) ||
+ ( f->shared->pb_ptr->hits[H5PB__STATS_MD] != 6 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MD] != 2 ) ||
+ ( f->shared->pb_ptr->loads[H5PB__STATS_MD] != 2 ) ||
+ ( f->shared->pb_ptr->insertions[H5PB__STATS_MD] != 2 ) )
+ TEST_ERROR;
+
+ /* 7) splittable md entry that is page aligned and is exactly two
+ * pages and one byte long.
+ *
+ * Should register 2 metadata read splits
+ * Should register 2 metadata write splits
+ *
+ * if vfd_swmr_mode
+ *
+ * Should log 0 multi-page metadata bypasses.
+ * Should log 4 multi-page metadata accesses.
+ * Should log 4 metadata accesses.
+ * should log 3 multi-page metadata hits
+ * should log 3 metadata hits
+ * should log 1 multi-page metadata misses
+ * should log 1 metadata misses
+ * should log 0 multi-page metadata loads
+ * should log 1 metadata loads
+ * should log 1 multi-page metadata insertions
+ * should log 1 metadata insertions
+ *
+ * else
+ *
+ * Should log 4 multi-page metadata bypasses.
+ * Should log 4 metadata accesses.
+ * should log 3 metadata hits
+ * should log 2 multi-page metadata misses
+ * should log 1 metadata misses
+ * should log 1 metadata loads
+ * should log 1 metadata insertions
+ *
+ * The misses in the normal operating mode could be avoided.
+ */
+ first_page_addr = base_addr + (haddr_t)(13 * page_size);
+ start_addr = first_page_addr;
+ test_len = 2 * page_size + 1;
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 13;
+
+ if ( H5PB_reset_stats(f->shared->pb_ptr) < 0 )
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "3.1) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 14;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "3.2) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ if ( ( f->shared->pb_ptr->md_read_splits != 2 ) ||
+ ( f->shared->pb_ptr->md_write_splits != 2 ) )
+ TEST_ERROR;
+
+ if ( vfd_swmr_mode ) {
+ if ( ( f->shared->pb_ptr->bypasses[H5PB__STATS_MPMDE] != 0 ) ||
+ ( f->shared->pb_ptr->accesses[H5PB__STATS_MPMDE] != 4 ) ||
+ ( f->shared->pb_ptr->accesses[H5PB__STATS_MD] != 4 ) ||
+ ( f->shared->pb_ptr->hits[H5PB__STATS_MPMDE] != 3 ) ||
+ ( f->shared->pb_ptr->hits[H5PB__STATS_MD] != 3 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MPMDE] != 1 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MD] != 1 ) ||
+ ( f->shared->pb_ptr->loads[H5PB__STATS_MPMDE] != 0 ) ||
+ ( f->shared->pb_ptr->loads[H5PB__STATS_MD] != 1 ) ||
+ ( f->shared->pb_ptr->insertions[H5PB__STATS_MPMDE] != 1 ) ||
+ ( f->shared->pb_ptr->insertions[H5PB__STATS_MD] != 1 ) )
+ TEST_ERROR;
+
+ } else {
+ if ( ( f->shared->pb_ptr->bypasses[H5PB__STATS_MPMDE] != 4 ) ||
+ ( f->shared->pb_ptr->accesses[H5PB__STATS_MD] != 4 ) ||
+ ( f->shared->pb_ptr->hits[H5PB__STATS_MD] != 3 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MPMDE] != 2 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MD] != 1 ) ||
+ ( f->shared->pb_ptr->loads[H5PB__STATS_MD] != 1 ) ||
+ ( f->shared->pb_ptr->insertions[H5PB__STATS_MD] != 1 ) )
+ TEST_ERROR;
+ }
+
+
+ /* 8) splittable md entry that is exactly two pages and one byte
+ * long, and starts one byte before a page bundary.
+ *
+ * Should register 2 metadata read splits
+ * Should register 2 metadata write splits
+ *
+ * if vfd_swmr_mode
+ *
+ * Should log 0 multi-page metadata bypasses.
+ * Should log 4 multi-page metadata accesses.
+ * Should log 4 metadata accesses.
+ * should log 3 multi-page metadata hits
+ * should log 3 metadata hits
+ * should log 1 multi-page metadata misses
+ * should log 1 metadata misses
+ * should log 0 multi-page metadata loads
+ * should log 1 metadata loads
+ * should log 1 multi-page metadata insertions
+ * should log 1 metadata insertions
+ *
+ * else
+ *
+ * Should log 4 multi-page metadata bypasses.
+ * Should log 4 metadata accesses.
+ * should log 3 metadata hits
+ * should log 2 multi-page metadata misses
+ * should log 1 metadata misses
+ * should log 1 metadata loads
+ * should log 1 metadata insertions
+ *
+ * The misses in the normal operating mode could be avoided.
+ */
+ first_page_addr = base_addr + (haddr_t)(16 * page_size);
+ start_addr = first_page_addr + (haddr_t)(page_size - 1);;
+ test_len =2 * page_size + 1;
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 15;
+
+ if ( H5PB_reset_stats(f->shared->pb_ptr) < 0 )
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if ( f->shared->pb_ptr->md_write_splits != 1 )
+ TEST_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if ( f->shared->pb_ptr->md_read_splits != 1 )
+ TEST_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "4.1) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 16;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "4.2) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ if ( ( f->shared->pb_ptr->md_read_splits != 2 ) ||
+ ( f->shared->pb_ptr->md_write_splits != 2 ) )
+ TEST_ERROR;
+
+ if ( vfd_swmr_mode ) {
+ if ( ( f->shared->pb_ptr->bypasses[H5PB__STATS_MPMDE] != 0 ) ||
+ ( f->shared->pb_ptr->accesses[H5PB__STATS_MPMDE] != 4 ) ||
+ ( f->shared->pb_ptr->accesses[H5PB__STATS_MD] != 4 ) ||
+ ( f->shared->pb_ptr->hits[H5PB__STATS_MPMDE] != 3 ) ||
+ ( f->shared->pb_ptr->hits[H5PB__STATS_MD] != 3 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MPMDE] != 1 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MD] != 1 ) ||
+ ( f->shared->pb_ptr->loads[H5PB__STATS_MPMDE] != 0 ) ||
+ ( f->shared->pb_ptr->loads[H5PB__STATS_MD] != 1 ) ||
+ ( f->shared->pb_ptr->insertions[H5PB__STATS_MPMDE] != 1 ) ||
+ ( f->shared->pb_ptr->insertions[H5PB__STATS_MD] != 1 ) )
+ TEST_ERROR;
+
+ } else {
+ if ( ( f->shared->pb_ptr->bypasses[H5PB__STATS_MPMDE] != 4 ) ||
+ ( f->shared->pb_ptr->accesses[H5PB__STATS_MD] != 4 ) ||
+ ( f->shared->pb_ptr->hits[H5PB__STATS_MD] != 3 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MPMDE] != 2 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MD] != 1 ) ||
+ ( f->shared->pb_ptr->loads[H5PB__STATS_MD] != 1 ) ||
+ ( f->shared->pb_ptr->insertions[H5PB__STATS_MD] != 1 ) )
+ TEST_ERROR;
+ }
+
+
+ /* 9) splittable md entry that is exactly two pages and two bytes
+ * long, and starts one byte before a page boundary.
+ *
+ * if vfd_swmr_mode
+ *
+ * Should log 0 multi-page metadata bypasses.
+ * Should log 4 multi-page metadata accesses.
+ * Should log 8 metadata accesses.
+ * should log 3 multi-page metadata hits
+ * should log 6 metadata hits
+ * should log 1 multi-page metadata misses
+ * should log 2 metadata misses
+ * should log 0 multi-page metadata loads
+ * should log 2 metadata loads
+ * should log 1 multi-page metadata insertions
+ * should log 2 metadata insertions
+ *
+ * else
+ *
+ * Should log 4 multi-page metadata bypasses.
+ * Should log 4 metadata accesses.
+ * should log 3 metadata hits
+ * should log 2 multi-page metadata misses
+ * should log 1 metadata misses
+ * should log 1 metadata loads
+ * should log 1 metadata insertions
+ *
+ * The misses in the normal operating mode could be avoided.
+ */
+ first_page_addr = base_addr + (haddr_t)(19 * page_size);
+ start_addr = first_page_addr + (haddr_t)(page_size - 1);;
+ test_len = 2 * page_size + 2;
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 17;
+
+ if ( H5PB_reset_stats(f->shared->pb_ptr) < 0 )
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "5.1) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ for ( i = 0; i < (int)test_len; i++ )
+ write_buf[i] = 18;
+
+ if (H5F_block_write(f, H5FD_MEM_SUPER, start_addr, test_len, write_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ if (H5F_block_read(f, H5FD_MEM_SUPER, start_addr, test_len, read_buf) < 0)
+ FAIL_STACK_ERROR;
+
+ for ( i = 0; i < (int)test_len; i++ ) {
+ if ( write_buf[i] != read_buf[i] ) {
+ HDfprintf(stdout, "5.2) write_buf[%d] = %d != %d = read_buf[%d]\n",
+ i, (int)(write_buf[i]), (int)(read_buf[i]), i);
+ TEST_ERROR;
+ }
+ }
+
+ if ( ( f->shared->pb_ptr->md_read_splits != 2 ) ||
+ ( f->shared->pb_ptr->md_write_splits != 2 ) )
+ TEST_ERROR;
+
+ if ( vfd_swmr_mode ) {
+ if ( ( f->shared->pb_ptr->bypasses[H5PB__STATS_MPMDE] != 0 ) ||
+ ( f->shared->pb_ptr->accesses[H5PB__STATS_MPMDE] != 4 ) ||
+ ( f->shared->pb_ptr->accesses[H5PB__STATS_MD] != 8 ) ||
+ ( f->shared->pb_ptr->hits[H5PB__STATS_MPMDE] != 3 ) ||
+ ( f->shared->pb_ptr->hits[H5PB__STATS_MD] != 6 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MPMDE] != 1 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MD] != 2 ) ||
+ ( f->shared->pb_ptr->loads[H5PB__STATS_MPMDE] != 0 ) ||
+ ( f->shared->pb_ptr->loads[H5PB__STATS_MD] != 2 ) ||
+ ( f->shared->pb_ptr->insertions[H5PB__STATS_MPMDE] != 1 ) ||
+ ( f->shared->pb_ptr->insertions[H5PB__STATS_MD] != 2 ) )
+ TEST_ERROR;
+
+ } else {
+ if ( ( f->shared->pb_ptr->bypasses[H5PB__STATS_MPMDE] != 4 ) ||
+ ( f->shared->pb_ptr->accesses[H5PB__STATS_MD] != 8 ) ||
+ ( f->shared->pb_ptr->hits[H5PB__STATS_MD] != 6 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MPMDE] != 2 ) ||
+ ( f->shared->pb_ptr->misses[H5PB__STATS_MD] != 2 ) ||
+ ( f->shared->pb_ptr->loads[H5PB__STATS_MD] != 2 ) ||
+ ( f->shared->pb_ptr->insertions[H5PB__STATS_MD] != 2 ) )
+ TEST_ERROR;
+ }
+
+
+ /* Undo the touchup of the metadata cache */
+ H5C_set_curr_io_type_splitable(f->shared->cache, FALSE);
+
+ /* free the test buffers */
+ HDfree(write_buf);
+ HDfree(read_buf);
+
+ if (H5Fclose(file_id) < 0)
+ FAIL_STACK_ERROR;
+ if (H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR;
+ if (H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED();
+ return 0;
+
+error:
+
+ /* Undo the touchup of the metadata cache */
+ if ( ( f ) && ( f->shared ) && ( f->shared->cache) )
+ H5C_set_curr_io_type_splitable(f->shared->cache, FALSE);
+
+ if ( write_buf )
+ HDfree(write_buf);
+
+ if ( read_buf )
+ HDfree(read_buf);
+
+ H5E_BEGIN_TRY {
+ if (fapl != H5I_INVALID_HID)
+ H5Pclose(fapl);
+ if (fcpl != H5I_INVALID_HID)
+ H5Pclose(fcpl);
+ if (file_id != H5I_INVALID_HID)
+ H5Fclose(file_id);
+ } H5E_END_TRY;
+ return 1;
+
+} /* md_entry_splitting_boundary_test() */
+
+
+
/*-------------------------------------------------------------------------
* Function: main()
*
@@ -2991,7 +4392,7 @@ main(void)
* Page buffering depends on paged aggregation which is
* currently disabled for multi/split drivers.
*/
- if((0 == HDstrcmp(env_h5_drvr, "multi")) ||
+ if((0 == HDstrcmp(env_h5_drvr, "multi")) ||
(0 == HDstrcmp(env_h5_drvr, "split"))) {
SKIPPED()
@@ -3009,7 +4410,7 @@ main(void)
if(H5CX_push() < 0) FAIL_STACK_ERROR
api_ctx_pushed = TRUE;
-#ifdef H5_HAVE_PARALLEL
+#ifdef H5_HAVE_PARALLEL
HDputs("Page Buffering is disabled for parallel.");
nerrors += verify_page_buffering_disabled(fapl, env_h5_drvr);
@@ -3025,6 +4426,10 @@ main(void)
nerrors += test_lru_processing(fapl, env_h5_drvr);
nerrors += test_min_threshold(fapl, env_h5_drvr);
nerrors += test_stats_collection(fapl, env_h5_drvr);
+ nerrors += md_entry_splitting_smoke_check(fapl, env_h5_drvr, false);
+ nerrors += md_entry_splitting_smoke_check(fapl, env_h5_drvr, true);
+ nerrors += md_entry_splitting_boundary_test(fapl, env_h5_drvr, false);
+ nerrors += md_entry_splitting_boundary_test(fapl, env_h5_drvr, true);
#endif /* H5_HAVE_PARALLEL */
@@ -3052,4 +4457,5 @@ error:
if(api_ctx_pushed) H5CX_pop();
HDexit(EXIT_FAILURE);
-}
+
+} /* main() */