summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
Diffstat (limited to 'testpar')
-rw-r--r--testpar/t_cache.c311
-rw-r--r--testpar/t_mdset.c1
-rw-r--r--testpar/t_shapesame.c2
-rw-r--r--testpar/testphdf5.c2
4 files changed, 315 insertions, 1 deletions
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index 18140ad..eb73fc3 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -7145,6 +7145,269 @@ trace_file_check(int metadata_write_strategy)
/*****************************************************************************
*
+ * Function: smoke_check_6()
+ *
+ * Purpose: Sixth smoke check for the parallel cache.
+ *
+ * Use random reads to vary the loads on the diffferent
+ * processors. Also force different cache size adjustments.
+ *
+ * In this test, load process 0 heavily, and the other
+ * processes lightly.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 1/13/06
+ *
+ *****************************************************************************/
+static hbool_t
+smoke_check_6(int metadata_write_strategy)
+{
+ hbool_t success = TRUE;
+ int i;
+ int max_nerrors;
+ int min_count;
+ int max_count;
+ int min_idx;
+ int max_idx;
+ hid_t fid = -1;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
+ struct mssg_t mssg;
+
+ switch ( metadata_write_strategy ) {
+
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ if ( world_mpi_rank == 0 ) {
+ TESTING("smoke check #6 -- process 0 only md write strategy");
+ }
+ break;
+
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ if ( world_mpi_rank == 0 ) {
+ TESTING("smoke check #6 -- distributed md write strategy");
+ }
+ break;
+
+ default:
+ if ( world_mpi_rank == 0 ) {
+ TESTING("smoke check #6 -- unknown md write strategy");
+ }
+ break;
+ }
+
+ nerrors = 0;
+ init_data();
+ reset_stats();
+
+ if ( world_mpi_rank == world_server_mpi_rank ) {
+
+ if ( ! server_main() ) {
+
+ /* some error occured in the server -- report failure */
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ world_mpi_rank, FUNC);
+ }
+ }
+ }
+ else /* run the clients */
+ {
+ int temp;
+
+ if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr,
+ metadata_write_strategy) ) {
+
+ nerrors++;
+ fid = -1;
+ cache_ptr = NULL;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ world_mpi_rank, FUNC);
+ }
+ }
+
+ min_count = 100 / ((file_mpi_rank + 1) * (file_mpi_rank + 1));
+ max_count = min_count + 50;
+
+ temp = virt_num_data_entries;
+ virt_num_data_entries = NUM_DATA_ENTRIES;
+
+ /* insert the first half collectively */
+ file_ptr->coll_md_read = H5P_USER_TRUE;
+ for ( i = 0; i < virt_num_data_entries/2; i++ )
+ {
+ struct datum * entry_ptr;
+ entry_ptr = &(data[i]);
+
+ insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
+
+ if(TRUE != entry_ptr->header.coll_access) {
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Entry inserted not marked as collective.\n",
+ world_mpi_rank, FUNC);
+ }
+ }
+
+ /* Make sure coll entries do not cross the 80% threshold */
+ HDassert(cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
+ }
+
+ /* insert the other half independently */
+ file_ptr->coll_md_read = H5P_USER_FALSE;
+ for ( i = virt_num_data_entries/2; i < virt_num_data_entries; i++ )
+ {
+ struct datum * entry_ptr;
+ entry_ptr = &(data[i]);
+
+ insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
+
+ if(FALSE != entry_ptr->header.coll_access) {
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Entry inserted indepedently marked as collective.\n",
+ world_mpi_rank, FUNC);
+ }
+ }
+
+ /* Make sure coll entries do not cross the 80% threshold */
+ HDassert(cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
+ }
+
+ /* flush the file */
+ if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
+ world_mpi_rank, FUNC);
+ }
+ }
+
+ /* Protect the first half of the entries collectively */
+ file_ptr->coll_md_read = H5P_USER_TRUE;
+ for ( i = 0; i < (virt_num_data_entries / 2); i++ )
+ {
+ struct datum * entry_ptr;
+ entry_ptr = &(data[i]);
+
+ lock_entry(file_ptr, i);
+
+ if(TRUE != entry_ptr->header.coll_access) {
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Entry protected not marked as collective.\n",
+ world_mpi_rank, FUNC);
+ }
+ }
+
+ /* Make sure coll entries do not cross the 80% threshold */
+ HDassert(cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
+ }
+
+ /* protect the other half independently */
+ file_ptr->coll_md_read = H5P_USER_FALSE;
+ for ( i = virt_num_data_entries/2; i < virt_num_data_entries; i++ )
+ {
+ struct datum * entry_ptr;
+ entry_ptr = &(data[i]);
+
+ lock_entry(file_ptr, i);
+
+ if(FALSE != entry_ptr->header.coll_access) {
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: Entry inserted indepedently marked as collective.\n",
+ world_mpi_rank, FUNC);
+ }
+ }
+
+ /* Make sure coll entries do not cross the 80% threshold */
+ HDassert(cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
+ }
+
+ for ( i = 0; i < (virt_num_data_entries); i++ )
+ {
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ }
+
+ if ( fid >= 0 ) {
+
+ if ( ! take_down_cache(fid) ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ world_mpi_rank, FUNC);
+ }
+ }
+ }
+
+ /* verify that all instances of datum are back where the started
+ * and are clean.
+ */
+
+ for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
+ {
+ HDassert( data_index[i] == i );
+ HDassert( ! (data[i].dirty) );
+ }
+
+ /* compose the done message */
+ mssg.req = DONE_REQ_CODE;
+ mssg.src = world_mpi_rank;
+ mssg.dest = world_server_mpi_rank;
+ mssg.mssg_num = -1; /* set by send function */
+ mssg.base_addr = 0; /* not used */
+ mssg.len = 0; /* not used */
+ mssg.ver = 0; /* not used */
+ mssg.count = 0; /* not used */
+ mssg.magic = MSSG_MAGIC;
+
+ if ( success ) {
+
+
+ success = send_mssg(&mssg, FALSE);
+
+ if ( ! success ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n",
+ world_mpi_rank, FUNC);
+ }
+ }
+ }
+ virt_num_data_entries = temp;
+ }
+
+ max_nerrors = get_max_nerrors();
+
+ if ( world_mpi_rank == 0 ) {
+
+ if ( max_nerrors == 0 ) {
+
+ PASSED();
+
+ } else {
+
+ failures++;
+ H5_FAILED();
+ }
+ }
+
+ success = ( ( success ) && ( max_nerrors == 0 ) );
+
+ return(success);
+
+} /* smoke_check_6() */
+
+
+/*****************************************************************************
+ *
* Function: main()
*
* Purpose: Main function for the parallel cache test.
@@ -7250,6 +7513,15 @@ main(int argc, char **argv)
}
}
+ if ( H5Pset_coll_metadata_read(fapl, 1) < 0 ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Pset_coll_metadata_read() failed 1.\n",
+ world_mpi_rank, FUNC);
+ }
+ }
+
/* fix the file names */
for ( u = 0; u < sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; ++u )
{
@@ -7294,6 +7566,14 @@ main(int argc, char **argv)
world_mpi_rank, FUNC);
}
}
+ if ( H5Pset_coll_metadata_read(fapl, 1) < 0 ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Pset_coll_metadata_read() failed 1.\n",
+ world_mpi_rank, FUNC);
+ }
+ }
}
setup_rand();
@@ -7315,6 +7595,7 @@ main(int argc, char **argv)
#if 1
server_smoke_check();
#endif
+
#if 1
smoke_check_1(H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
smoke_check_1(H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
@@ -7323,6 +7604,18 @@ main(int argc, char **argv)
smoke_check_2(H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
smoke_check_2(H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
#endif
+ /* MSC - smoke check 3 and 4 do independent reads, so we disable
+ the collective metadata read property here */
+ if ( world_mpi_rank != world_server_mpi_rank ) {
+ if ( H5Pset_coll_metadata_read(fapl, 0) < 0 ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Pset_coll_metadata_read() failed 1.\n",
+ world_mpi_rank, FUNC);
+ }
+ }
+ }
#if 1
smoke_check_3(H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
smoke_check_3(H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
@@ -7331,10 +7624,27 @@ main(int argc, char **argv)
smoke_check_4(H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
smoke_check_4(H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
#endif
+ /* re-enable the collective metadata read property */
+ if ( world_mpi_rank != world_server_mpi_rank ) {
+ if ( H5Pset_coll_metadata_read(fapl, 0) < 0 ) {
+
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Pset_coll_metadata_read() failed 1.\n",
+ world_mpi_rank, FUNC);
+ }
+ }
+ }
#if 1
smoke_check_5(H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
smoke_check_5(H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
#endif
+
+#if 1
+ smoke_check_6(H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
+ smoke_check_6(H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
+#endif
+
#if 1
trace_file_check(H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
trace_file_check(H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
@@ -7368,4 +7678,3 @@ finish:
/* cannot just return (failures) because exit code is limited to 1byte */
return(failures != 0);
}
-
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index f294b93..f718f5b 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -888,6 +888,7 @@ void independent_group_read(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ H5Pset_coll_metadata_read(plist, 0);
fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
H5Pclose(plist);
diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c
index 9088470..24e88b0 100644
--- a/testpar/t_shapesame.c
+++ b/testpar/t_shapesame.c
@@ -4948,6 +4948,8 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
/* set Parallel access with communicator */
ret = H5Pset_fapl_mpio(ret_pl, comm, info);
VRFY((ret >= 0), "");
+ ret = H5Pset_coll_metadata_read(ret_pl, 1);
+ VRFY((ret >= 0), "");
return(ret_pl);
}
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index c3da73f..e2615b0 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -272,6 +272,8 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
/* set Parallel access with communicator */
ret = H5Pset_fapl_mpio(ret_pl, comm, info);
VRFY((ret >= 0), "");
+ ret = H5Pset_coll_metadata_read(ret_pl, 1);
+ VRFY((ret >= 0), "");
return(ret_pl);
}