summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
Diffstat (limited to 'testpar')
-rw-r--r--testpar/CMakeVFDTests.cmake23
-rw-r--r--testpar/COPYING13
-rw-r--r--testpar/t_bigio.c101
-rw-r--r--testpar/t_cache.c475
-rw-r--r--testpar/t_coll_chunk.c5
-rw-r--r--testpar/t_dset.c113
-rw-r--r--testpar/t_file.c59
-rw-r--r--testpar/t_filters_parallel.c415
-rw-r--r--testpar/t_filters_parallel.h20
-rw-r--r--testpar/t_mdset.c141
-rw-r--r--testpar/t_mpi.c4
-rw-r--r--testpar/t_pflush1.c4
-rw-r--r--testpar/t_pflush2.c4
-rw-r--r--testpar/t_prop.c20
-rw-r--r--testpar/t_span_tree.c5
-rw-r--r--testpar/t_subfiling_vfd.c4
-rw-r--r--testpar/testphdf5.c5
-rw-r--r--testpar/testphdf5.h13
18 files changed, 880 insertions, 544 deletions
diff --git a/testpar/CMakeVFDTests.cmake b/testpar/CMakeVFDTests.cmake
index 32f9337..785c08d 100644
--- a/testpar/CMakeVFDTests.cmake
+++ b/testpar/CMakeVFDTests.cmake
@@ -23,6 +23,8 @@ set (VFD_LIST
split
multi
family
+ splitter
+ #log - log VFD currently has file space allocation bugs
)
set (H5P_VFD_TESTS
@@ -30,9 +32,24 @@ set (H5P_VFD_TESTS
t_pflush2
)
-if (DIRECT_VFD)
+if (H5_HAVE_DIRECT)
set (VFD_LIST ${VFD_LIST} direct)
endif ()
+if (H5_HAVE_PARALLEL)
+ set (VFD_LIST ${VFD_LIST} mpio)
+endif ()
+if (H5_HAVE_MIRROR_VFD)
+ set (VFD_LIST ${VFD_LIST} mirror)
+endif ()
+if (H5_HAVE_ROS3_VFD)
+ set (VFD_LIST ${VFD_LIST} ros3)
+endif ()
+if (H5_HAVE_LIBHDFS)
+ set (VFD_LIST ${VFD_LIST} hdfs)
+endif ()
+if (H5_HAVE_WINDOWS)
+ set (VFD_LIST ${VFD_LIST} windows)
+endif ()
foreach (vfdtest ${VFD_LIST})
file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/${vfdtest}")
@@ -58,9 +75,9 @@ macro (ADD_VFD_TEST vfdname resultcode)
WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR}/${vfdname}
)
endforeach ()
- set_tests_properties (MPI_TEST_VFD-${vfdname}-pflush1 PROPERTIES WILL_FAIL "true")
+ set_tests_properties (MPI_TEST_VFD-${vfdname}-t_pflush1 PROPERTIES WILL_FAIL "true")
#set_property (TEST MPI_TEST_t_pflush1 PROPERTY PASS_REGULAR_EXPRESSION "PASSED")
- set_tests_properties (MPI_TEST_VFD-${vfdname}-pflush2 PROPERTIES DEPENDS MPI_TEST_VFD-${vfdname}-pflush1)
+ set_tests_properties (MPI_TEST_VFD-${vfdname}-t_pflush2 PROPERTIES DEPENDS MPI_TEST_VFD-${vfdname}-t_pflush1)
endif ()
endmacro ()
diff --git a/testpar/COPYING b/testpar/COPYING
deleted file mode 100644
index 97969da..0000000
--- a/testpar/COPYING
+++ /dev/null
@@ -1,13 +0,0 @@
-
- Copyright by The HDF Group and
- The Board of Trustees of the University of Illinois.
- All rights reserved.
-
- The files and subdirectories in this directory are part of HDF5.
- The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the COPYING file
- which can be found at the root of the source code distribution tree
- or in https://www.hdfgroup.org/licenses. If you do
- not have access to either file, you may request a copy from
- help@hdfgroup.org.
-
diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c
index 7884ecb..4389ea5 100644
--- a/testpar/t_bigio.c
+++ b/testpar/t_bigio.c
@@ -1103,8 +1103,9 @@ dataset_big_read(void)
static void
single_rank_independent_io(void)
{
+#if HDF5_BIG_SELECTION_IO_WORKS
if (mpi_rank_g == 0)
- HDprintf("single_rank_independent_io\n");
+ HDprintf("\nSingle Rank Independent I/O\n");
if (MAIN_PROCESS) {
hsize_t dims[] = {LARGE_DIM};
@@ -1155,6 +1156,7 @@ single_rank_independent_io(void)
HDremove(FILENAME[1]);
}
MPI_Barrier(MPI_COMM_WORLD);
+#endif
}
/*
@@ -1223,8 +1225,6 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
* Programmer: Unknown
* July 12th, 2004
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
@@ -1250,7 +1250,7 @@ coll_chunk1(void)
{
const char *filename = FILENAME[0];
if (mpi_rank_g == 0)
- HDprintf("coll_chunk1\n");
+ HDprintf("\nCollective chunk I/O Test #1\n");
coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
@@ -1303,7 +1303,7 @@ coll_chunk2(void)
{
const char *filename = FILENAME[0];
if (mpi_rank_g == 0)
- HDprintf("coll_chunk2\n");
+ HDprintf("\nCollective chunk I/O Test #2\n");
coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
@@ -1329,8 +1329,6 @@ coll_chunk2(void)
* Programmer: Unknown
* July 12th, 2004
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
@@ -1357,7 +1355,7 @@ coll_chunk3(void)
{
const char *filename = FILENAME[0];
if (mpi_rank_g == 0)
- HDprintf("coll_chunk3\n");
+ HDprintf("\nCollective chunk I/O Test #3\n");
coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
@@ -1385,17 +1383,9 @@ coll_chunk3(void)
*
* Failure: -1
*
- * Modifications:
- * Remove invalid temporary property checkings for API_LINK_HARD and
- * API_LINK_TRUE cases.
- * Programmer: Jonathan Kim
- * Date: 2012-10-10
- *
* Programmer: Unknown
* July 12th, 2004
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
@@ -1845,55 +1835,10 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap
HDfree(data_origin1);
}
-/*****************************************************************************
- *
- * Function: do_express_test()
- *
- * Purpose: Do an MPI_Allreduce to obtain the maximum value returned
- * by GetTestExpress() across all processes. Return this
- * value.
- *
- * Envirmoment variables can be different across different
- * processes. This function ensures that all processes agree
- * on whether to do an express test.
- *
- * Return: Success: Maximum of the values returned by
- * GetTestExpress() across all processes.
- *
- * Failure: -1
- *
- * Programmer: JRM -- 4/25/06
- *
- *****************************************************************************/
-static int
-do_express_test(int world_mpi_rank)
-{
- int express_test;
- int max_express_test;
- int result;
-
- express_test = GetTestExpress();
-
- result =
- MPI_Allreduce((void *)&express_test, (void *)&max_express_test, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
-
- if (result != MPI_SUCCESS) {
- nerrors++;
- max_express_test = -1;
- if (VERBOSE_MED && (world_mpi_rank == 0)) {
- HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n", world_mpi_rank, FUNC);
- }
- }
-
- return (max_express_test);
-
-} /* do_express_test() */
-
int
main(int argc, char **argv)
{
- int ExpressMode = 0;
- hsize_t newsize = 1048576;
+ hsize_t newsize = 1048576;
/* Set the bigio processing limit to be 'newsize' bytes */
hsize_t oldsize = H5_mpi_set_bigio_count(newsize);
@@ -1902,9 +1847,8 @@ main(int argc, char **argv)
* that we try to ensure that our bigio handling is actually
* envoked and tested.
*/
- if (newsize != oldsize) {
+ if (newsize != oldsize)
bigcount = newsize * 2;
- }
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size_g);
@@ -1915,14 +1859,11 @@ main(int argc, char **argv)
* hang in the atexit post processing in which it may try to make MPI
* calls. By then, MPI calls may not work.
*/
- if (H5dont_atexit() < 0) {
+ if (H5dont_atexit() < 0)
HDprintf("Failed to turn off atexit processing. Continue.\n");
- };
/* set alarm. */
- ALARM_ON;
-
- ExpressMode = do_express_test(mpi_rank_g);
+ TestAlarmOn();
dataset_big_write();
MPI_Barrier(MPI_COMM_WORLD);
@@ -1930,22 +1871,16 @@ main(int argc, char **argv)
dataset_big_read();
MPI_Barrier(MPI_COMM_WORLD);
- if (ExpressMode > 0) {
- if (mpi_rank_g == 0)
- HDprintf("***Express test mode on. Several tests are skipped\n");
- }
- else {
- coll_chunk1();
- MPI_Barrier(MPI_COMM_WORLD);
- coll_chunk2();
- MPI_Barrier(MPI_COMM_WORLD);
- coll_chunk3();
- MPI_Barrier(MPI_COMM_WORLD);
- single_rank_independent_io();
- }
+ coll_chunk1();
+ MPI_Barrier(MPI_COMM_WORLD);
+ coll_chunk2();
+ MPI_Barrier(MPI_COMM_WORLD);
+ coll_chunk3();
+ MPI_Barrier(MPI_COMM_WORLD);
+ single_rank_independent_io();
/* turn off alarm */
- ALARM_OFF;
+ TestAlarmOff();
if (mpi_rank_g == 0)
HDremove(FILENAME[0]);
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index 4846a75..8696092 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -577,7 +577,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: MPI_Comm_group() failed with error %d.\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: MPI_Comm_group() failed with error %d.\n", world_mpi_rank, __func__,
mpi_result);
}
}
@@ -594,7 +594,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: MPI_Group_excl() failed with error %d.\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: MPI_Group_excl() failed with error %d.\n", world_mpi_rank, __func__,
mpi_result);
}
}
@@ -609,8 +609,8 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: MPI_Comm_create() failed with error %d.\n", world_mpi_rank, FUNC,
- mpi_result);
+ HDfprintf(stdout, "%d:%s: MPI_Comm_create() failed with error %d.\n", world_mpi_rank,
+ __func__, mpi_result);
}
}
else {
@@ -622,7 +622,8 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: file_mpi_comm == MPI_COMM_NULL.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: file_mpi_comm == MPI_COMM_NULL.\n", world_mpi_rank,
+ __func__);
}
}
}
@@ -635,7 +636,8 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: file_mpi_comm != MPI_COMM_NULL.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: file_mpi_comm != MPI_COMM_NULL.\n", world_mpi_rank,
+ __func__);
}
}
}
@@ -651,7 +653,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: MPI_Comm_size() failed with error %d.\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: MPI_Comm_size() failed with error %d.\n", world_mpi_rank, __func__,
mpi_result);
}
}
@@ -666,7 +668,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: MPI_Comm_rank() failed with error %d.\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: MPI_Comm_rank() failed with error %d.\n", world_mpi_rank, __func__,
mpi_result);
}
}
@@ -832,7 +834,7 @@ do_express_test(void)
nerrors++;
max_express_test = -1;
if (verbose) {
- HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n", world_mpi_rank, __func__);
}
}
@@ -880,7 +882,7 @@ do_sync(void)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -891,7 +893,7 @@ do_sync(void)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__);
}
}
else if ((mssg.req != SYNC_ACK_CODE) || (mssg.src != world_server_mpi_rank) ||
@@ -899,7 +901,7 @@ do_sync(void)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad data in sync ack.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad data in sync ack.\n", world_mpi_rank, __func__);
}
}
}
@@ -936,7 +938,7 @@ get_max_nerrors(void)
nerrors++;
max_nerrors = -1;
if (verbose) {
- HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n", world_mpi_rank, __func__);
}
}
@@ -983,7 +985,7 @@ recv_mssg(struct mssg_t *mssg_ptr, int mssg_tag_offset)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: bad param(s) on entry.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: bad param(s) on entry.\n", world_mpi_rank, __func__);
}
}
else {
@@ -1000,7 +1002,7 @@ recv_mssg(struct mssg_t *mssg_ptr, int mssg_tag_offset)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: MPI_Recv() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: MPI_Recv() failed.\n", world_mpi_rank, __func__);
}
}
else if (mssg_ptr->magic != MSSG_MAGIC) {
@@ -1008,7 +1010,7 @@ recv_mssg(struct mssg_t *mssg_ptr, int mssg_tag_offset)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: invalid magic.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: invalid magic.\n", world_mpi_rank, __func__);
}
}
else if (mssg_ptr->src != status.MPI_SOURCE) {
@@ -1016,7 +1018,7 @@ recv_mssg(struct mssg_t *mssg_ptr, int mssg_tag_offset)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: mssg_ptr->src != status.MPI_SOURCE.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: mssg_ptr->src != status.MPI_SOURCE.\n", world_mpi_rank, __func__);
}
}
}
@@ -1061,7 +1063,7 @@ send_mssg(struct mssg_t *mssg_ptr, hbool_t add_req_to_tag)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Invalid mssg on entry.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Invalid mssg on entry.\n", world_mpi_rank, __func__);
}
}
@@ -1081,7 +1083,7 @@ send_mssg(struct mssg_t *mssg_ptr, hbool_t add_req_to_tag)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: MPI_Send() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: MPI_Send() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -1130,7 +1132,7 @@ setup_derived_types(void)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: MPI_Get_address() call failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: MPI_Get_address() call failed.\n", world_mpi_rank, __func__);
}
}
else {
@@ -1150,7 +1152,7 @@ setup_derived_types(void)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: MPI_Type_create_struct() call failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: MPI_Type_create_struct() call failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -1164,7 +1166,7 @@ setup_derived_types(void)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: MPI_Type_commit() call failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: MPI_Type_commit() call failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -1200,7 +1202,7 @@ takedown_derived_types(void)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: MPI_Type_free() call failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: MPI_Type_free() call failed.\n", world_mpi_rank, __func__);
}
}
@@ -1253,7 +1255,7 @@ reset_server_counters(void)
success = FALSE;
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: actual/total reads mismatch (%ld/%d).\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: actual/total reads mismatch (%ld/%d).\n", world_mpi_rank, __func__,
actual_total_reads, total_reads);
}
}
@@ -1263,7 +1265,7 @@ reset_server_counters(void)
success = FALSE;
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: actual/total writes mismatch (%ld/%d).\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: actual/total writes mismatch (%ld/%d).\n", world_mpi_rank, __func__,
actual_total_writes, total_writes);
}
}
@@ -1311,7 +1313,7 @@ server_main(void)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: This isn't the server process?!?!?\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: This isn't the server process?!?!?\n", world_mpi_rank, __func__);
}
}
@@ -1328,7 +1330,7 @@ server_main(void)
case WRITE_REQ_ACK_CODE:
success = FALSE;
if (verbose)
- HDfprintf(stdout, "%s: Received write ack?!?.\n", FUNC);
+ HDfprintf(stdout, "%s: Received write ack?!?.\n", __func__);
break;
case READ_REQ_CODE:
@@ -1338,7 +1340,7 @@ server_main(void)
case READ_REQ_REPLY_CODE:
success = FALSE;
if (verbose)
- HDfprintf(stdout, "%s: Received read req reply?!?.\n", FUNC);
+ HDfprintf(stdout, "%s: Received read req reply?!?.\n", __func__);
break;
case SYNC_REQ_CODE:
@@ -1348,7 +1350,7 @@ server_main(void)
case SYNC_ACK_CODE:
success = FALSE;
if (verbose)
- HDfprintf(stdout, "%s: Received sync ack?!?.\n", FUNC);
+ HDfprintf(stdout, "%s: Received sync ack?!?.\n", __func__);
break;
case REQ_TTL_WRITES_CODE:
@@ -1358,7 +1360,7 @@ server_main(void)
case REQ_TTL_WRITES_RPLY_CODE:
success = FALSE;
if (verbose)
- HDfprintf(stdout, "%s: Received total writes reply?!?.\n", FUNC);
+ HDfprintf(stdout, "%s: Received total writes reply?!?.\n", __func__);
break;
case REQ_TTL_READS_CODE:
@@ -1368,7 +1370,7 @@ server_main(void)
case REQ_TTL_READS_RPLY_CODE:
success = FALSE;
if (verbose)
- HDfprintf(stdout, "%s: Received total reads reply?!?.\n", FUNC);
+ HDfprintf(stdout, "%s: Received total reads reply?!?.\n", __func__);
break;
case REQ_ENTRY_WRITES_CODE:
@@ -1378,7 +1380,7 @@ server_main(void)
case REQ_ENTRY_WRITES_RPLY_CODE:
success = FALSE;
if (verbose)
- HDfprintf(stdout, "%s: Received entry writes reply?!?.\n", FUNC);
+ HDfprintf(stdout, "%s: Received entry writes reply?!?.\n", __func__);
break;
case REQ_ENTRY_READS_CODE:
@@ -1388,7 +1390,7 @@ server_main(void)
case REQ_ENTRY_READS_RPLY_CODE:
success = FALSE;
if (verbose)
- HDfprintf(stdout, "%s: Received entry reads reply?!?.\n", FUNC);
+ HDfprintf(stdout, "%s: Received entry reads reply?!?.\n", __func__);
break;
case REQ_RW_COUNT_RESET_CODE:
@@ -1398,7 +1400,7 @@ server_main(void)
case REQ_RW_COUNT_RESET_RPLY_CODE:
success = FALSE;
if (verbose)
- HDfprintf(stdout, "%s: Received RW count reset reply?!?.\n", FUNC);
+ HDfprintf(stdout, "%s: Received RW count reset reply?!?.\n", __func__);
break;
case DONE_REQ_CODE:
@@ -1411,7 +1413,7 @@ server_main(void)
nerrors++;
success = FALSE;
if (verbose)
- HDfprintf(stdout, "%d:%s: Unknown request code.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Unknown request code.\n", world_mpi_rank, __func__);
break;
}
}
@@ -1453,7 +1455,7 @@ serve_read_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__);
}
}
@@ -1467,7 +1469,7 @@ serve_read_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, __func__,
target_addr);
}
}
@@ -1476,7 +1478,7 @@ serve_read_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: data[i].len = %zu != mssg->len = %d.\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: data[i].len = %zu != mssg->len = %d.\n", world_mpi_rank, __func__,
data[target_index].len, mssg_ptr->len);
}
}
@@ -1488,7 +1490,8 @@ serve_read_request(struct mssg_t *mssg_ptr)
HDfprintf(stdout,
"%d:%s: proc %d read invalid entry. "
"idx/base_addr = %d/%" PRIuHADDR ".\n",
- world_mpi_rank, FUNC, mssg_ptr->src, target_index, data[target_index].base_addr);
+ world_mpi_rank, __func__, mssg_ptr->src, target_index,
+ data[target_index].base_addr);
}
}
else {
@@ -1568,7 +1571,7 @@ serve_sync_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__);
}
}
@@ -1642,7 +1645,7 @@ serve_write_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__);
}
}
@@ -1656,7 +1659,7 @@ serve_write_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, __func__,
target_addr);
}
}
@@ -1665,7 +1668,7 @@ serve_write_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: data[i].len = %zu != mssg->len = %d.\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: data[i].len = %zu != mssg->len = %d.\n", world_mpi_rank, __func__,
data[target_index].len, mssg_ptr->len);
}
}
@@ -1681,8 +1684,8 @@ serve_write_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: new ver = %d <= old ver = %d.\n", world_mpi_rank, FUNC, new_ver_num,
- data[target_index].ver);
+ HDfprintf(stdout, "%d:%s: new ver = %d <= old ver = %d.\n", world_mpi_rank, __func__,
+ new_ver_num, data[target_index].ver);
}
}
}
@@ -1767,7 +1770,7 @@ serve_total_writes_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__);
}
}
@@ -1837,7 +1840,7 @@ serve_total_reads_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__);
}
}
@@ -1909,7 +1912,7 @@ serve_entry_writes_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__);
}
}
@@ -1923,7 +1926,7 @@ serve_entry_writes_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, __func__,
target_addr);
}
}
@@ -1998,7 +2001,7 @@ serve_entry_reads_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__);
}
}
@@ -2012,7 +2015,7 @@ serve_entry_reads_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, __func__,
target_addr);
}
}
@@ -2084,7 +2087,7 @@ serve_rw_count_reset_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__);
}
}
@@ -2436,7 +2439,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
nerrors++;
ret_value = FAIL;
if (verbose) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__);
}
}
@@ -2447,7 +2450,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
nerrors++;
ret_value = FAIL;
if (verbose) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -2462,7 +2465,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
nerrors++;
ret_value = FAIL;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad data in read req reply.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad data in read req reply.\n", world_mpi_rank, __func__);
}
#if 0 /* This has been useful debugging code -- keep it for now. */
@@ -2470,35 +2473,35 @@ datum_notify(H5C_notify_action_t action, void *thing)
HDfprintf(stdout,
"%d:%s: mssg.req != READ_REQ_REPLY_CODE.\n",
- world_mpi_rank, FUNC);
+ world_mpi_rank, __func__);
HDfprintf(stdout, "%d:%s: mssg.req = %d.\n",
- world_mpi_rank, FUNC, (int)(mssg.req));
+ world_mpi_rank, __func__, (int)(mssg.req));
}
if ( mssg.src != world_server_mpi_rank ) {
HDfprintf(stdout,
"%d:%s: mssg.src != world_server_mpi_rank.\n",
- world_mpi_rank, FUNC);
+ world_mpi_rank, __func__);
}
if ( mssg.dest != world_mpi_rank ) {
HDfprintf(stdout,
"%d:%s: mssg.dest != world_mpi_rank.\n",
- world_mpi_rank, FUNC);
+ world_mpi_rank, __func__);
}
if ( mssg.base_addr != entry_ptr->base_addr ) {
HDfprintf(stdout,
"%d:%s: mssg.base_addr != entry_ptr->base_addr.\n",
- world_mpi_rank, FUNC);
+ world_mpi_rank, __func__);
HDfprintf(stdout, "%d:%s: mssg.base_addr = %" PRIuHADDR ".\n",
- world_mpi_rank, FUNC, mssg.base_addr);
+ world_mpi_rank, __func__, mssg.base_addr);
HDfprintf(stdout,
"%d:%s: entry_ptr->base_addr = %" PRIuHADDR ".\n",
- world_mpi_rank, FUNC,
+ world_mpi_rank, __func__,
entry_ptr->base_addr);
}
@@ -2506,22 +2509,22 @@ datum_notify(H5C_notify_action_t action, void *thing)
HDfprintf(stdout,
"%d:%s: mssg.len != entry_ptr->len.\n",
- world_mpi_rank, FUNC);
+ world_mpi_rank, __func__);
HDfprintf(stdout, "%d:%s: mssg.len = %" PRIuHADDR ".\n",
- world_mpi_rank, FUNC, mssg.len);
+ world_mpi_rank, __func__, mssg.len);
}
if ( mssg.ver < entry_ptr->ver ) {
HDfprintf(stdout,
"%d:%s: mssg.ver < entry_ptr->ver.\n",
- world_mpi_rank, FUNC);
+ world_mpi_rank, __func__);
}
if ( mssg.magic != MSSG_MAGIC ) {
HDfprintf(stdout, "%d:%s: mssg.magic != MSSG_MAGIC.\n",
- world_mpi_rank, FUNC);
+ world_mpi_rank, __func__);
}
#endif /* JRM */
}
@@ -2554,7 +2557,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
ret_value = FAIL;
HDfprintf(stdout, "%d:%s: Flushed dirty entry from non-zero file process.", world_mpi_rank,
- FUNC);
+ __func__);
}
if (ret_value == SUCCEED) {
@@ -2581,7 +2584,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
nerrors++;
ret_value = FAIL;
if (verbose) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__);
}
}
else {
@@ -2600,7 +2603,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
nerrors++;
ret_value = FAIL;
if (verbose) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__);
}
}
else if ((mssg.req != WRITE_REQ_ACK_CODE) || (mssg.src != world_server_mpi_rank) ||
@@ -2611,7 +2614,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
nerrors++;
ret_value = FAIL;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad data in write req ack.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad data in write req ack.\n", world_mpi_rank, __func__);
}
}
}
@@ -2717,7 +2720,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
nerrors++;
ret_value = FAIL;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Unknown notify action.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Unknown notify action.\n", world_mpi_rank, __func__);
}
break;
}
@@ -2824,7 +2827,7 @@ expunge_entry(H5F_t *file_ptr, int32_t idx)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Error in H5AC_expunge_entry().\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Error in H5AC_expunge_entry().\n", world_mpi_rank, __func__);
}
}
@@ -2838,14 +2841,14 @@ expunge_entry(H5F_t *file_ptr, int32_t idx)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Error in H5C_get_entry_status().\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Error in H5C_get_entry_status().\n", world_mpi_rank, __func__);
}
}
else if (in_cache) {
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Expunged entry still in cache?!?\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Expunged entry still in cache?!?\n", world_mpi_rank, __func__);
}
}
}
@@ -2906,7 +2909,7 @@ insert_entry(H5C_t *cache_ptr, H5F_t *file_ptr, int32_t idx, unsigned int flags)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Error in H5AC_insert_entry().\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Error in H5AC_insert_entry().\n", world_mpi_rank, __func__);
}
}
@@ -2927,8 +2930,8 @@ insert_entry(H5C_t *cache_ptr, H5F_t *file_ptr, int32_t idx, unsigned int flags)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n", world_mpi_rank, FUNC, idx,
- (int)(data[idx].header.is_dirty));
+ HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n", world_mpi_rank, __func__,
+ idx, (int)(data[idx].header.is_dirty));
}
}
}
@@ -3268,7 +3271,7 @@ lock_entry(H5F_t *file_ptr, int32_t idx)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: error in H5AC_protect().\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: error in H5AC_protect().\n", world_mpi_rank, __func__);
}
}
else {
@@ -3321,7 +3324,7 @@ mark_entry_dirty(int32_t idx)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: error in H5AC_mark_entry_dirty().\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: error in H5AC_mark_entry_dirty().\n", world_mpi_rank, __func__);
}
}
else if (!(entry_ptr->locked)) {
@@ -3432,7 +3435,7 @@ pin_protected_entry(int32_t idx, hbool_t global)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Error in H5AC_pin_protected entry().\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Error in H5AC_pin_protected entry().\n", world_mpi_rank, __func__);
}
}
@@ -3536,7 +3539,7 @@ move_entry(H5F_t *file_ptr, int32_t old_idx, int32_t new_idx)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: H5AC_move_entry() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5AC_move_entry() failed.\n", world_mpi_rank, __func__);
}
}
else {
@@ -3560,7 +3563,7 @@ move_entry(H5F_t *file_ptr, int32_t old_idx, int32_t new_idx)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n", world_mpi_rank, __func__,
new_idx, (int)(data[new_idx].header.is_dirty));
}
}
@@ -3612,7 +3615,7 @@ reset_server_counts(void)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -3624,7 +3627,7 @@ reset_server_counts(void)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__);
}
}
else if ((mssg.req != REQ_RW_COUNT_RESET_RPLY_CODE) || (mssg.src != world_server_mpi_rank) ||
@@ -3634,7 +3637,8 @@ reset_server_counts(void)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad data in req r/w counter reset reply.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad data in req r/w counter reset reply.\n", world_mpi_rank,
+ __func__);
}
}
}
@@ -3685,7 +3689,7 @@ resize_entry(int32_t idx, size_t new_size)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: H5AC_resize_entry() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5AC_resize_entry() failed.\n", world_mpi_rank, __func__);
}
}
else {
@@ -3750,13 +3754,13 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
if (fid < 0) {
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: H5Fcreate() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5Fcreate() failed.\n", world_mpi_rank, __func__);
}
}
else if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, __func__);
}
}
else {
@@ -3766,7 +3770,7 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
if (file_ptr == NULL) {
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Can't get file_ptr.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Can't get file_ptr.\n", world_mpi_rank, __func__);
}
}
else {
@@ -3776,13 +3780,13 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
if (cache_ptr == NULL) {
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Can't get cache_ptr.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Can't get cache_ptr.\n", world_mpi_rank, __func__);
}
}
else if (cache_ptr->magic != H5C__H5C_T_MAGIC) {
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad cache_ptr magic.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad cache_ptr magic.\n", world_mpi_rank, __func__);
}
}
else {
@@ -3800,7 +3804,8 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
if (H5AC_get_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
- HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config(1) failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config(1) failed.\n", world_mpi_rank,
+ __func__);
}
else {
@@ -3810,11 +3815,11 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
if (H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", world_mpi_rank,
- FUNC);
+ __func__);
}
else if (enable_rpt_fcn) {
- HDfprintf(stdout, "%d:%s: rpt_fcn enabled.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: rpt_fcn enabled.\n", world_mpi_rank, __func__);
}
}
}
@@ -3830,7 +3835,7 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: cache_ptr->aux_ptr == NULL.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: cache_ptr->aux_ptr == NULL.\n", world_mpi_rank, __func__);
}
}
else if (((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic != H5AC__H5AC_AUX_T_MAGIC) {
@@ -3838,7 +3843,7 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: cache_ptr->aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC.\n",
- world_mpi_rank, FUNC);
+ world_mpi_rank, __func__);
}
}
else if (((H5AC_aux_t *)(cache_ptr->aux_ptr))->metadata_write_strategy != metadata_write_strategy) {
@@ -3846,7 +3851,7 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: bad cache_ptr->aux_ptr->metadata_write_strategy\n", world_mpi_rank,
- FUNC);
+ __func__);
}
}
}
@@ -3861,7 +3866,8 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
if (H5AC_get_cache_auto_resize_config(cache_ptr, &test_config) != SUCCEED) {
- HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config(2) failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config(2) failed.\n", world_mpi_rank,
+ __func__);
}
else if (test_config.metadata_write_strategy != metadata_write_strategy) {
@@ -3869,7 +3875,7 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
if (verbose) {
- HDfprintf(stdout, "%d:%s: unexpected metadata_write_strategy.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: unexpected metadata_write_strategy.\n", world_mpi_rank, __func__);
}
}
}
@@ -3889,7 +3895,7 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: H5MF_alloc() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5MF_alloc() failed.\n", world_mpi_rank, __func__);
}
}
else if (actual_base_addr > BASE_ADDR) {
@@ -3902,7 +3908,7 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: actual_base_addr > BASE_ADDR.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: actual_base_addr > BASE_ADDR.\n", world_mpi_rank, __func__);
}
}
}
@@ -3913,7 +3919,7 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: second H5Fflush() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: second H5Fflush() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -3926,7 +3932,7 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: H5C_set_write_done_callback failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5C_set_write_done_callback failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -3940,7 +3946,7 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: H5AC__set_sync_point_done_callback failed.\n", world_mpi_rank,
- FUNC);
+ __func__);
}
}
}
@@ -3996,7 +4002,7 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
proceed = FALSE;
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: barrier 1 failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: barrier 1 failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -4019,7 +4025,7 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
proceed = FALSE;
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: barrier 2 failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: barrier 2 failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -4034,11 +4040,11 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
if (proceed) {
- HDfprintf(stdout, "%d:%s: verified %u writes.\n", world_mpi_rank, FUNC, num_writes);
+ HDfprintf(stdout, "%d:%s: verified %u writes.\n", world_mpi_rank, __func__, num_writes);
}
else {
- HDfprintf(stdout, "%d:%s: FAILED to verify %u writes.\n", world_mpi_rank, FUNC, num_writes);
+ HDfprintf(stdout, "%d:%s: FAILED to verify %u writes.\n", world_mpi_rank, __func__, num_writes);
}
}
@@ -4055,7 +4061,7 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
proceed = FALSE;
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: barrier 3 failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: barrier 3 failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -4098,7 +4104,7 @@ setup_rand(void)
HDassert(world_mpi_rank < world_mpi_size);
seed = predefined_seeds[world_mpi_rank];
- HDfprintf(stdout, "%d:%s: predefined_seed = %d.\n", world_mpi_rank, FUNC, seed);
+ HDfprintf(stdout, "%d:%s: predefined_seed = %d.\n", world_mpi_rank, __func__, seed);
fflush(stdout);
HDsrand(seed);
}
@@ -4108,13 +4114,13 @@ setup_rand(void)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: gettimeofday() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: gettimeofday() failed.\n", world_mpi_rank, __func__);
}
}
else {
seed = (unsigned)tv.tv_usec;
if (verbose) {
- HDfprintf(stdout, "%d:%s: seed = %d.\n", world_mpi_rank, FUNC, seed);
+ HDfprintf(stdout, "%d:%s: seed = %d.\n", world_mpi_rank, __func__, seed);
fflush(stdout);
}
HDsrand(seed);
@@ -4154,7 +4160,7 @@ take_down_cache(hid_t fid, H5C_t *cache_ptr)
success = FALSE;
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, __func__);
}
}
@@ -4171,7 +4177,7 @@ take_down_cache(hid_t fid, H5C_t *cache_ptr)
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: H5AC__set_sync_point_done_callback failed.\n", world_mpi_rank,
- FUNC);
+ __func__);
}
}
}
@@ -4182,7 +4188,7 @@ take_down_cache(hid_t fid, H5C_t *cache_ptr)
success = FALSE;
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: H5Fclose() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5Fclose() failed.\n", world_mpi_rank, __func__);
}
}
@@ -4198,7 +4204,7 @@ take_down_cache(hid_t fid, H5C_t *cache_ptr)
success = FALSE;
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: HDremove() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: HDremove() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -4260,7 +4266,7 @@ verify_entry_reads(haddr_t addr, int expected_entry_reads)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -4272,7 +4278,7 @@ verify_entry_reads(haddr_t addr, int expected_entry_reads)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -4286,7 +4292,7 @@ verify_entry_reads(haddr_t addr, int expected_entry_reads)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad data in req entry reads reply.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad data in req entry reads reply.\n", world_mpi_rank, __func__);
}
}
else {
@@ -4303,7 +4309,7 @@ verify_entry_reads(haddr_t addr, int expected_entry_reads)
success = FALSE;
if (verbose) {
HDfprintf(stdout, "%d:%s: rep/exp entry 0x%" PRIxHADDR " reads mismatch (%d/%d).\n",
- world_mpi_rank, FUNC, addr, reported_entry_reads, expected_entry_reads);
+ world_mpi_rank, __func__, addr, reported_entry_reads, expected_entry_reads);
}
}
}
@@ -4357,7 +4363,7 @@ verify_entry_writes(haddr_t addr, int expected_entry_writes)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -4369,7 +4375,7 @@ verify_entry_writes(haddr_t addr, int expected_entry_writes)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -4383,7 +4389,7 @@ verify_entry_writes(haddr_t addr, int expected_entry_writes)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad data in req entry writes reply.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad data in req entry writes reply.\n", world_mpi_rank, __func__);
}
}
else {
@@ -4400,7 +4406,7 @@ verify_entry_writes(haddr_t addr, int expected_entry_writes)
success = FALSE;
if (verbose) {
HDfprintf(stdout, "%d:%s: rep/exp entry 0x%llx writes mismatch (%d/%d).\n", world_mpi_rank,
- FUNC, (long long)addr, reported_entry_writes, expected_entry_writes);
+ __func__, (long long)addr, reported_entry_writes, expected_entry_writes);
}
}
}
@@ -4453,7 +4459,7 @@ verify_total_reads(int expected_total_reads)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -4465,7 +4471,7 @@ verify_total_reads(int expected_total_reads)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__);
}
}
else if ((mssg.req != REQ_TTL_READS_RPLY_CODE) || (mssg.src != world_server_mpi_rank) ||
@@ -4475,7 +4481,7 @@ verify_total_reads(int expected_total_reads)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad data in req total reads reply.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad data in req total reads reply.\n", world_mpi_rank, __func__);
}
}
else {
@@ -4492,7 +4498,7 @@ verify_total_reads(int expected_total_reads)
success = FALSE;
if (verbose) {
HDfprintf(stdout, "%d:%s: reported/expected total reads mismatch (%ld/%d).\n", world_mpi_rank,
- FUNC, reported_total_reads, expected_total_reads);
+ __func__, reported_total_reads, expected_total_reads);
}
}
}
@@ -4545,7 +4551,7 @@ verify_total_writes(unsigned expected_total_writes)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -4557,7 +4563,7 @@ verify_total_writes(unsigned expected_total_writes)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__);
}
}
else if ((mssg.req != REQ_TTL_WRITES_RPLY_CODE) || (mssg.src != world_server_mpi_rank) ||
@@ -4567,7 +4573,7 @@ verify_total_writes(unsigned expected_total_writes)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad data in req total reads reply.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad data in req total reads reply.\n", world_mpi_rank, __func__);
}
}
else {
@@ -4584,7 +4590,7 @@ verify_total_writes(unsigned expected_total_writes)
success = FALSE;
if (verbose) {
HDfprintf(stdout, "%d:%s: reported/expected total writes mismatch (%u/%u).\n", world_mpi_rank,
- FUNC, reported_total_writes, expected_total_writes);
+ __func__, reported_total_writes, expected_total_writes);
}
}
}
@@ -4646,7 +4652,7 @@ unlock_entry(H5F_t *file_ptr, int32_t idx, unsigned int flags)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: error in H5AC_unprotect().\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: error in H5AC_unprotect().\n", world_mpi_rank, __func__);
}
}
else {
@@ -4730,7 +4736,7 @@ unpin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty, hbool_t
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: error in H5AC_unpin_entry().\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: error in H5AC_unpin_entry().\n", world_mpi_rank, __func__);
}
}
}
@@ -4791,7 +4797,7 @@ server_smoke_check(void)
/* some error occured in the server -- report failure */
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -4812,7 +4818,7 @@ server_smoke_check(void)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on write.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on write.\n", world_mpi_rank, __func__);
}
}
@@ -4827,7 +4833,7 @@ server_smoke_check(void)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -4843,7 +4849,7 @@ server_smoke_check(void)
success = FALSE;
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad data in write req ack.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad data in write req ack.\n", world_mpi_rank, __func__);
}
}
}
@@ -4858,7 +4864,7 @@ server_smoke_check(void)
success = FALSE;
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: barrier 1 failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: barrier 1 failed.\n", world_mpi_rank, __func__);
}
}
@@ -4890,7 +4896,7 @@ server_smoke_check(void)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: barrier 2 failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: barrier 2 failed.\n", world_mpi_rank, __func__);
}
}
@@ -4913,7 +4919,7 @@ server_smoke_check(void)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on write.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on write.\n", world_mpi_rank, __func__);
}
}
}
@@ -4927,7 +4933,7 @@ server_smoke_check(void)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -4943,7 +4949,7 @@ server_smoke_check(void)
success = FALSE;
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Bad data in read req reply.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Bad data in read req reply.\n", world_mpi_rank, __func__);
}
}
}
@@ -4954,7 +4960,7 @@ server_smoke_check(void)
success = FALSE;
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: barrier 3 failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: barrier 3 failed.\n", world_mpi_rank, __func__);
}
}
@@ -4985,7 +4991,7 @@ server_smoke_check(void)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: barrier 4 failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: barrier 4 failed.\n", world_mpi_rank, __func__);
}
}
@@ -5001,7 +5007,7 @@ server_smoke_check(void)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: barrier 5 failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: barrier 5 failed.\n", world_mpi_rank, __func__);
}
}
@@ -5032,7 +5038,7 @@ server_smoke_check(void)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: barrier 6 failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: barrier 6 failed.\n", world_mpi_rank, __func__);
}
}
@@ -5055,7 +5061,7 @@ server_smoke_check(void)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, __func__);
}
}
}
@@ -5138,7 +5144,7 @@ smoke_check_1(int metadata_write_strategy)
/* some error occured in the server -- report failure */
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -5150,7 +5156,7 @@ smoke_check_1(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if (verbose) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, __func__);
}
}
@@ -5183,7 +5189,7 @@ smoke_check_1(int metadata_write_strategy)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -5216,7 +5222,7 @@ smoke_check_1(int metadata_write_strategy)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, __func__);
}
}
}
@@ -5302,7 +5308,7 @@ smoke_check_2(int metadata_write_strategy)
/* some error occured in the server -- report failure */
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -5314,7 +5320,7 @@ smoke_check_2(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if (verbose) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, __func__);
}
}
@@ -5381,7 +5387,7 @@ smoke_check_2(int metadata_write_strategy)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -5414,7 +5420,7 @@ smoke_check_2(int metadata_write_strategy)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, __func__);
}
}
}
@@ -5507,7 +5513,7 @@ smoke_check_3(int metadata_write_strategy)
/* some error occured in the server -- report failure */
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -5519,7 +5525,7 @@ smoke_check_3(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if (verbose) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, __func__);
}
}
@@ -5573,7 +5579,7 @@ smoke_check_3(int metadata_write_strategy)
if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, __func__);
}
}
@@ -5666,7 +5672,7 @@ smoke_check_3(int metadata_write_strategy)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -5699,7 +5705,7 @@ smoke_check_3(int metadata_write_strategy)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, __func__);
}
}
}
@@ -5792,7 +5798,7 @@ smoke_check_4(int metadata_write_strategy)
/* some error occured in the server -- report failure */
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -5804,7 +5810,7 @@ smoke_check_4(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if (verbose) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, __func__);
}
}
@@ -5870,7 +5876,7 @@ smoke_check_4(int metadata_write_strategy)
if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, __func__);
}
}
@@ -5950,7 +5956,7 @@ smoke_check_4(int metadata_write_strategy)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -5983,7 +5989,7 @@ smoke_check_4(int metadata_write_strategy)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, __func__);
}
}
}
@@ -6067,7 +6073,7 @@ smoke_check_5(int metadata_write_strategy)
/* some error occured in the server -- report failure */
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -6080,7 +6086,7 @@ smoke_check_5(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if (verbose) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, __func__);
}
}
@@ -6092,7 +6098,7 @@ smoke_check_5(int metadata_write_strategy)
if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, __func__);
}
}
@@ -6142,7 +6148,7 @@ smoke_check_5(int metadata_write_strategy)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -6175,7 +6181,7 @@ smoke_check_5(int metadata_write_strategy)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, __func__);
}
}
}
@@ -6352,7 +6358,7 @@ trace_file_check(int metadata_write_strategy)
/* some error occured in the server -- report failure */
nerrors++;
if (verbose)
- HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__);
}
}
else {
@@ -6364,7 +6370,7 @@ trace_file_check(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if (verbose)
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, __func__);
}
if (nerrors == 0) {
@@ -6374,7 +6380,7 @@ trace_file_check(int metadata_write_strategy)
if (H5AC_get_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
nerrors++;
HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", world_mpi_rank,
- FUNC);
+ __func__);
}
else {
config.open_trace_file = TRUE;
@@ -6383,7 +6389,7 @@ trace_file_check(int metadata_write_strategy)
if (H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
nerrors++;
HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", world_mpi_rank,
- FUNC);
+ __func__);
}
}
} /* end if */
@@ -6418,7 +6424,7 @@ trace_file_check(int metadata_write_strategy)
if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
nerrors++;
if (verbose)
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, __func__);
}
if (nerrors == 0) {
@@ -6427,7 +6433,7 @@ trace_file_check(int metadata_write_strategy)
if (H5AC_get_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
nerrors++;
HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", world_mpi_rank,
- FUNC);
+ __func__);
}
else {
config.open_trace_file = FALSE;
@@ -6437,7 +6443,7 @@ trace_file_check(int metadata_write_strategy)
if (H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
nerrors++;
HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", world_mpi_rank,
- FUNC);
+ __func__);
}
}
} /* end if */
@@ -6447,7 +6453,7 @@ trace_file_check(int metadata_write_strategy)
nerrors++;
if (verbose)
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, __func__);
}
} /* end if */
@@ -6477,7 +6483,7 @@ trace_file_check(int metadata_write_strategy)
if (!success) {
nerrors++;
if (verbose)
- HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, __func__);
}
} /* end if */
@@ -6488,7 +6494,7 @@ trace_file_check(int metadata_write_strategy)
nerrors++;
if (verbose)
- HDfprintf(stdout, "%d:%s: HDfopen failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: HDfopen failed.\n", world_mpi_rank, __func__);
}
} /* end if */
@@ -6517,17 +6523,17 @@ trace_file_check(int metadata_write_strategy)
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank,
- FUNC, i);
+ __func__, i);
if (expected_line_len == 0) {
- HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, __func__,
"<EMPTY>", expected_line_len);
- HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, FUNC, buffer,
- actual_line_len);
+ HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, __func__,
+ buffer, actual_line_len);
}
if (actual_line_len == 0) {
- HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, __func__,
(*expected_output)[i], expected_line_len);
- HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, __func__,
"<EMPTY>", actual_line_len);
}
}
@@ -6542,10 +6548,10 @@ trace_file_check(int metadata_write_strategy)
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank,
- FUNC, i);
- HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, FUNC,
+ __func__, i);
+ HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, __func__,
(*expected_output)[i], expected_line_len);
- HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, FUNC, buffer,
+ HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, __func__, buffer,
actual_line_len);
}
}
@@ -6563,10 +6569,10 @@ trace_file_check(int metadata_write_strategy)
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank,
- FUNC, i);
- HDfprintf(stdout, "%d:%s: expected = \"%s\"\n", world_mpi_rank, FUNC,
+ __func__, i);
+ HDfprintf(stdout, "%d:%s: expected = \"%s\"\n", world_mpi_rank, __func__,
(*expected_output)[i]);
- HDfprintf(stdout, "%d:%s: actual = \"%s\"\n", world_mpi_rank, FUNC, tok);
+ HDfprintf(stdout, "%d:%s: actual = \"%s\"\n", world_mpi_rank, __func__, tok);
}
}
} /* end else */
@@ -6657,7 +6663,7 @@ smoke_check_6(int metadata_write_strategy)
/* some error occured in the server -- report failure */
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -6671,7 +6677,7 @@ smoke_check_6(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if (verbose) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, __func__);
}
}
@@ -6690,7 +6696,7 @@ smoke_check_6(int metadata_write_strategy)
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: Entry inserted not marked as collective.\n", world_mpi_rank,
- FUNC);
+ __func__);
}
}
@@ -6711,7 +6717,7 @@ smoke_check_6(int metadata_write_strategy)
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: Entry inserted indepedently marked as collective.\n",
- world_mpi_rank, FUNC);
+ world_mpi_rank, __func__);
}
}
@@ -6723,7 +6729,7 @@ smoke_check_6(int metadata_write_strategy)
if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, __func__);
}
}
@@ -6739,7 +6745,7 @@ smoke_check_6(int metadata_write_strategy)
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: Entry protected not marked as collective.\n", world_mpi_rank,
- FUNC);
+ __func__);
}
}
@@ -6759,7 +6765,7 @@ smoke_check_6(int metadata_write_strategy)
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: Entry inserted indepedently marked as collective.\n",
- world_mpi_rank, FUNC);
+ world_mpi_rank, __func__);
}
}
@@ -6777,7 +6783,7 @@ smoke_check_6(int metadata_write_strategy)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, __func__);
}
}
}
@@ -6810,7 +6816,7 @@ smoke_check_6(int metadata_write_strategy)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, __func__);
}
}
}
@@ -6874,28 +6880,20 @@ main(int argc, char **argv)
* hang in the atexit post processing in which it may try to make MPI
* calls. By then, MPI calls may not work.
*/
- if (H5dont_atexit() < 0) {
+ if (H5dont_atexit() < 0)
HDprintf("%d:Failed to turn off atexit processing. Continue.\n", mpi_rank);
- };
+
H5open();
express_test = do_express_test();
-#if 0 /* JRM */
- express_test = 0;
-#endif /* JRM */
- if (express_test) {
-
+ if (express_test)
virt_num_data_entries = EXPRESS_VIRT_NUM_DATA_ENTRIES;
- }
- else {
-
+ else
virt_num_data_entries = STD_VIRT_NUM_DATA_ENTRIES;
- }
#ifdef H5_HAVE_MPE
- if (MAINPROCESS) {
+ if (MAINPROCESS)
HDprintf(" Tests compiled for MPE.\n");
- }
virt_num_data_entries = MPE_VIRT_NUM_DATA_ENTIES;
#endif /* H5_HAVE_MPE */
@@ -6908,11 +6906,8 @@ main(int argc, char **argv)
}
if (mpi_size < 3) {
-
- if (MAINPROCESS) {
-
+ if (MAINPROCESS)
HDprintf(" Need at least 3 processes. Exiting.\n");
- }
goto finish;
}
@@ -6930,27 +6925,22 @@ main(int argc, char **argv)
/* setup file access property list with the world communicator */
if (FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS))) {
nerrors++;
- if (verbose) {
- HDfprintf(stdout, "%d:%s: H5Pcreate() failed 1.\n", world_mpi_rank, FUNC);
- }
+ if (verbose)
+ HDfprintf(stdout, "%d:%s: H5Pcreate() failed 1.\n", world_mpi_rank, __func__);
}
if (H5Pset_fapl_mpio(fapl, world_mpi_comm, MPI_INFO_NULL) < 0) {
-
nerrors++;
- if (verbose) {
- HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 1.\n", world_mpi_rank, FUNC);
- }
+ if (verbose)
+ HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 1.\n", world_mpi_rank, __func__);
}
/* fix the file names */
for (u = 0; u < sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; ++u) {
if (h5_fixname(FILENAME[u], fapl, filenames[u], sizeof(filenames[u])) == NULL) {
-
nerrors++;
- if (verbose) {
- HDfprintf(stdout, "%d:%s: h5_fixname() failed.\n", world_mpi_rank, FUNC);
- }
+ if (verbose)
+ HDfprintf(stdout, "%d:%s: h5_fixname() failed.\n", world_mpi_rank, __func__);
break;
}
}
@@ -6958,9 +6948,8 @@ main(int argc, char **argv)
/* close the fapl before we set it up again */
if (H5Pclose(fapl) < 0) {
nerrors++;
- if (verbose) {
- HDfprintf(stdout, "%d:%s: H5Pclose() failed.\n", world_mpi_rank, FUNC);
- }
+ if (verbose)
+ HDfprintf(stdout, "%d:%s: H5Pclose() failed.\n", world_mpi_rank, __func__);
}
/* now create the fapl again, excluding the server process. */
@@ -6969,32 +6958,25 @@ main(int argc, char **argv)
/* setup file access property list */
if (FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS))) {
nerrors++;
- if (verbose) {
- HDfprintf(stdout, "%d:%s: H5Pcreate() failed 2.\n", world_mpi_rank, FUNC);
- }
+ if (verbose)
+ HDfprintf(stdout, "%d:%s: H5Pcreate() failed 2.\n", world_mpi_rank, __func__);
}
if (H5Pset_fapl_mpio(fapl, file_mpi_comm, MPI_INFO_NULL) < 0) {
-
nerrors++;
- if (verbose) {
- HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 2.\n", world_mpi_rank, FUNC);
- }
+ if (verbose)
+ HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 2.\n", world_mpi_rank, __func__);
}
}
setup_rand();
max_nerrors = get_max_nerrors();
-
if (max_nerrors != 0) {
/* errors in setup -- no point in continuing */
-
- if (world_mpi_rank == 0) {
-
+ if (world_mpi_rank == 0)
HDfprintf(stdout, "Errors in test initialization. Exiting.\n");
- }
goto finish;
}
@@ -7028,7 +7010,8 @@ main(int argc, char **argv)
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: H5Pset_all_coll_metadata_ops() failed 1.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5Pset_all_coll_metadata_ops() failed 1.\n", world_mpi_rank,
+ __func__);
}
}
}
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index 651a392..29341d7 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -832,7 +832,10 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap
VRFY((status >= 0), "dataset write succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- if (facc_type == FACC_MPIO) {
+ /* Only check chunk optimization mode if selection I/O is not being used -
+ * selection I/O bypasses this IO mode decision - it's effectively always
+ * multi chunk currently */
+ if (facc_type == FACC_MPIO && !H5_use_selection_io_g) {
switch (api_option) {
case API_LINK_HARD:
status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value);
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index 3e4a304..19caff5 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -1627,9 +1627,6 @@ extend_writeInd(void)
VRFY((mem_dataspace >= 0), "");
/* Try write to dataset2 beyond its current dim sizes. Should fail. */
- /* Temporary turn off auto error reporting */
- H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
- H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
/* create a file dataspace independently */
file_dataspace = H5Dget_space(dataset2);
@@ -1638,11 +1635,13 @@ extend_writeInd(void)
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently. Should fail. */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ }
+ H5E_END_TRY
VRFY((ret < 0), "H5Dwrite failed as expected");
- /* restore auto error reporting */
- H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
H5Sclose(file_dataspace);
/* Extend dataset2 and try again. Should succeed. */
@@ -1911,20 +1910,19 @@ extend_readInd(void)
VRFY((dataset2 >= 0), "");
/* Try extend dataset1 which is open RDONLY. Should fail. */
- /* first turn off auto error reporting */
- H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
- H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
dims[0]++;
- ret = H5Dset_extent(dataset1, dims);
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dset_extent(dataset1, dims);
+ }
+ H5E_END_TRY
VRFY((ret < 0), "H5Dset_extent failed as expected");
- /* restore auto error reporting */
- H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
H5Sclose(file_dataspace);
/* Read dataset1 using BYROW pattern */
@@ -2209,9 +2207,6 @@ extend_writeAll(void)
}
/* Try write to dataset2 beyond its current dim sizes. Should fail. */
- /* Temporary turn off auto error reporting */
- H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
- H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
/* create a file dataspace independently */
file_dataspace = H5Dget_space(dataset2);
@@ -2220,11 +2215,13 @@ extend_writeAll(void)
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently. Should fail. */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ }
+ H5E_END_TRY
VRFY((ret < 0), "H5Dwrite failed as expected");
- /* restore auto error reporting */
- H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
H5Sclose(file_dataspace);
/* Extend dataset2 and try again. Should succeed. */
@@ -2331,20 +2328,19 @@ extend_readAll(void)
VRFY((dataset2 >= 0), "");
/* Try extend dataset1 which is open RDONLY. Should fail. */
- /* first turn off auto error reporting */
- H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
- H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
dims[0]++;
- ret = H5Dset_extent(dataset1, dims);
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dset_extent(dataset1, dims);
+ }
+ H5E_END_TRY
VRFY((ret < 0), "H5Dset_extent failed as expected");
- /* restore auto error reporting */
- H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
H5Sclose(file_dataspace);
/* Read dataset1 using BYROW pattern */
@@ -3321,14 +3317,23 @@ test_actual_io_mode(int selection_mode)
/* Release some resources */
ret = H5Sclose(sid);
+ VRFY((ret >= 0), "H5Sclose succeeded");
ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
ret = H5Pclose(dcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
ret = H5Pclose(dxpl_write);
+ VRFY((ret >= 0), "H5Pclose succeeded");
ret = H5Pclose(dxpl_read);
+ VRFY((ret >= 0), "H5Pclose succeeded");
ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
ret = H5Sclose(mem_space);
+ VRFY((ret >= 0), "H5Sclose succeeded");
ret = H5Sclose(file_space);
+ VRFY((ret >= 0), "H5Sclose succeeded");
ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
HDfree(buffer);
return;
}
@@ -3344,36 +3349,40 @@ void
actual_io_mode_tests(void)
{
int mpi_size = -1;
- int mpi_rank = -1;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_rank);
- test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE);
+ /* Only run these tests if selection I/O is not being used - selection I/O
+ * bypasses this IO mode decision - it's effectively always multi chunk
+ * currently */
+ if (!H5_use_selection_io_g) {
+ test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE);
- /*
- * Test multi-chunk-io via proc_num threshold
- */
- test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND);
- test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL);
+ /*
+ * Test multi-chunk-io via proc_num threshold
+ */
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND);
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL);
- /* The Multi Chunk Mixed test requires atleast three processes. */
- if (mpi_size > 2)
- test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX);
- else
- HDfprintf(stdout, "Multi Chunk Mixed test requires 3 proceses minimum\n");
+ /* The Multi Chunk Mixed test requires atleast three processes. */
+ if (mpi_size > 2)
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX);
+ else
+ HDfprintf(stdout, "Multi Chunk Mixed test requires 3 proceses minimum\n");
- test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE);
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE);
- /*
- * Test multi-chunk-io via setting direct property
- */
- test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND);
- test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL);
+ /*
+ * Test multi-chunk-io via setting direct property
+ */
+ test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND);
+ test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL);
+
+ test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK);
+ test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS);
- test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK);
- test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS);
+ test_actual_io_mode(TEST_ACTUAL_IO_RESET);
+ }
- test_actual_io_mode(TEST_ACTUAL_IO_RESET);
return;
}
@@ -4112,13 +4121,15 @@ dataset_atomicity(void)
if (MAINPROCESS) {
fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
VRFY((fid >= 0), "H5Fopen succeeed");
- }
- /* should fail */
- ret = H5Fset_mpi_atomicity(fid, TRUE);
- VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed");
+ /* should fail */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Fset_mpi_atomicity(fid, TRUE);
+ }
+ H5E_END_TRY
+ VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed");
- if (MAINPROCESS) {
ret = H5Fclose(fid);
VRFY((ret >= 0), "H5Fclose succeeded");
}
diff --git a/testpar/t_file.c b/testpar/t_file.c
index 9a99c1c..5348f4f 100644
--- a/testpar/t_file.c
+++ b/testpar/t_file.c
@@ -151,7 +151,10 @@ test_page_buffer_access(void)
VRFY((ret == 0), "");
/* This should fail because collective metadata writes are not supported with page buffering */
- H5E_BEGIN_TRY { file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); }
+ H5E_BEGIN_TRY
+ {
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
+ }
H5E_END_TRY;
VRFY((file_id < 0), "H5Fcreate failed");
@@ -945,3 +948,57 @@ test_file_properties(void)
VRFY((mpi_ret >= 0), "MPI_Info_free succeeded");
} /* end test_file_properties() */
+
+void
+test_delete(void)
+{
+ hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */
+ hid_t fapl_id = H5I_INVALID_HID; /* File access plist */
+ const char *filename = NULL;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ htri_t is_hdf5 = FAIL; /* Whether a file is an HDF5 file */
+ herr_t ret; /* Generic return value */
+
+ filename = (const char *)GetTestParameters();
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* setup file access plist */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate");
+ ret = H5Pset_fapl_mpio(fapl_id, comm, info);
+ VRFY((SUCCEED == ret), "H5Pset_fapl_mpio");
+
+ /* create the file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((fid != H5I_INVALID_HID), "H5Fcreate");
+
+ /* close the file */
+ ret = H5Fclose(fid);
+ VRFY((SUCCEED == ret), "H5Fclose");
+
+ /* Verify that the file is an HDF5 file */
+ is_hdf5 = H5Fis_accessible(filename, fapl_id);
+ VRFY((TRUE == is_hdf5), "H5Fis_accessible");
+
+ /* Delete the file */
+ ret = H5Fdelete(filename, fapl_id);
+ VRFY((SUCCEED == ret), "H5Fdelete");
+
+ /* Verify that the file is NO LONGER an HDF5 file */
+ /* This should fail since there is no file */
+ H5E_BEGIN_TRY
+ {
+ is_hdf5 = H5Fis_accessible(filename, fapl_id);
+ }
+ H5E_END_TRY;
+ VRFY((is_hdf5 != SUCCEED), "H5Fis_accessible");
+
+ /* Release file-access plist */
+ ret = H5Pclose(fapl_id);
+ VRFY((SUCCEED == ret), "H5Pclose");
+
+} /* end test_delete() */
diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c
index 5153bce..78af0fb 100644
--- a/testpar/t_filters_parallel.c
+++ b/testpar/t_filters_parallel.c
@@ -62,6 +62,7 @@ static void test_write_filtered_dataset_single_no_selection(void);
static void test_write_filtered_dataset_all_no_selection(void);
static void test_write_filtered_dataset_point_selection(void);
static void test_write_filtered_dataset_interleaved_write(void);
+static void test_write_transformed_filtered_dataset_no_overlap(void);
static void test_write_3d_filtered_dataset_no_overlap_separate_pages(void);
static void test_write_3d_filtered_dataset_no_overlap_same_pages(void);
static void test_write_3d_filtered_dataset_overlap(void);
@@ -79,6 +80,7 @@ static void test_read_filtered_dataset_single_no_selection(void);
static void test_read_filtered_dataset_all_no_selection(void);
static void test_read_filtered_dataset_point_selection(void);
static void test_read_filtered_dataset_interleaved_read(void);
+static void test_read_transformed_filtered_dataset_no_overlap(void);
static void test_read_3d_filtered_dataset_no_overlap_separate_pages(void);
static void test_read_3d_filtered_dataset_no_overlap_same_pages(void);
static void test_read_3d_filtered_dataset_overlap(void);
@@ -120,6 +122,7 @@ static void (*tests[])(void) = {
test_write_filtered_dataset_all_no_selection,
test_write_filtered_dataset_point_selection,
test_write_filtered_dataset_interleaved_write,
+ test_write_transformed_filtered_dataset_no_overlap,
test_write_3d_filtered_dataset_no_overlap_separate_pages,
test_write_3d_filtered_dataset_no_overlap_same_pages,
test_write_3d_filtered_dataset_overlap,
@@ -135,6 +138,7 @@ static void (*tests[])(void) = {
test_read_filtered_dataset_all_no_selection,
test_read_filtered_dataset_point_selection,
test_read_filtered_dataset_interleaved_read,
+ test_read_transformed_filtered_dataset_no_overlap,
test_read_3d_filtered_dataset_no_overlap_separate_pages,
test_read_3d_filtered_dataset_no_overlap_same_pages,
test_read_3d_filtered_dataset_overlap,
@@ -428,7 +432,7 @@ test_write_filtered_dataset_no_overlap(void)
/* Select hyperslab in the file */
filespace = H5Dget_space(dset_id);
- VRFY((dset_id >= 0), "File dataspace retrieval succeeded");
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
@@ -1288,6 +1292,177 @@ test_write_filtered_dataset_interleaved_write(void)
}
/*
+ * Tests parallel write of transformed and filtered data
+ * in the case where only one process is writing to a
+ * particular chunk in the operation. Normally, a data
+ * transform function will cause the parallel library to
+ * break to independent I/O and this isn't allowed when
+ * there are filters in the pipeline. However, in this
+ * case the parallel library recognizes that the used
+ * data transform function "x" is the same as not applying
+ * the transform function. Therefore it does not apply
+ * the transform function resulting in not breaking to
+ * independent I/O.
+ *
+ * Programmer: Jan-Willem Blokland
+ * 08/20/2021
+ */
+static void
+test_write_transformed_filtered_dataset_no_overlap(void)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t start[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t stride[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t count[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t block[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
+ size_t i, data_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS)
+ HDputs("Testing write to unshared transformed and filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS;
+
+ filespace = H5Screate_simple(WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple(WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS /
+ (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS * count[0]);
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
+ (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
+
+ /* Create property list for collective dataset write and data transform */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+
+ /* Set data transform expression */
+ VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
+
+ if (data)
+ HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
* Tests parallel write of filtered data in the case where
* the dataset has 3 dimensions and each process writes
* to its own "page" in the 3rd dimension.
@@ -1762,8 +1937,8 @@ test_write_3d_filtered_dataset_overlap(void)
WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) /
(hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))
- /* Add the amount that gets added when a rank moves down to its next section vertically in the
- dataset */
+ /* Add the amount that gets added when a rank moves down to its next
+ section vertically in the dataset */
+ ((hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS) *
(i / (hsize_t)(mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH *
WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))));
@@ -4224,6 +4399,232 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
}
/*
+ * Tests parallel read of transformed and filtered data in the
+ * case where only one process is reading from a particular
+ * chunk in the operation. Normally, a data transform function
+ * will cause the parallel library to break to independent I/O
+ * and this isn't allowed when there are filters in the pipeline.
+ * However, in this case the parallel library recognizes that
+ * the used data transform function "x" is the same as not
+ * applying the transform function. Therefore it does not apply
+ * the transform function resulting in not breaking to
+ * independent I/O.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads a part of
+ * the dataset and contributes its piece to a global buffer
+ * that is checked for consistency.
+ *
+ * Programmer: Jan-Willem Blokland
+ * 08/20/2021
+ */
+static void
+test_read_transformed_filtered_dataset_no_overlap(void)
+{
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t start[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t stride[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t count[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t block[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+ int * recvcounts = NULL;
+ int * displs = NULL;
+
+ if (MAINPROCESS)
+ HDputs("Testing read from unshared transformed and filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ dataset_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS *
+ (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
+ (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
+
+ if (MAINPROCESS) {
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace =
+ H5Screate_simple(READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY(
+ (H5Pset_chunk(plist_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME,
+ HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ /* Set data transform expression */
+ VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and reads
+ * it to the selection in memory
+ */
+ count[0] = 1;
+ count[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS /
+ (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = ((hsize_t)mpi_rank * (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS * count[0]);
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read and data transform */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+
+ /* Set data transform expression */
+ VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t)mpi_size; i++)
+ recvcounts[i] = (int)flat_dims[0];
+
+ displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t)mpi_size; i++)
+ displs[i] = (int)(i * flat_dims[0]);
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts,
+ displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+
+ if (displs)
+ HDfree(displs);
+ if (recvcounts)
+ HDfree(recvcounts);
+ if (global_buf)
+ HDfree(global_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
* Tests parallel read of filtered data in the case where
* the dataset has 3 dimensions and each process reads from
* each "page" in the 3rd dimension. However, no chunk on a
@@ -4497,8 +4898,8 @@ test_read_3d_filtered_dataset_overlap(void)
READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) /
(hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))
- /* Add the amount that gets added when a rank moves down to its next section vertically in the
- dataset */
+ /* Add the amount that gets added when a rank moves down to its next
+ section vertically in the dataset */
+ ((hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS) *
(i / (hsize_t)(mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH *
READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))));
@@ -6093,7 +6494,7 @@ main(int argc, char **argv)
if (VERBOSE_MED)
h5_show_hostname();
- ALARM_ON;
+ TestAlarmOn();
/* Create test file */
fapl = H5Pcreate(H5P_FILE_ACCESS);
@@ -6172,7 +6573,7 @@ exit:
if (MAINPROCESS)
HDprintf("*** %d TEST ERROR%s OCCURRED ***\n", nerrors, nerrors > 1 ? "S" : "");
- ALARM_OFF;
+ TestAlarmOff();
h5_clean_files(FILENAME, fapl);
diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h
index 3804c09..7eb34ed 100644
--- a/testpar/t_filters_parallel.h
+++ b/testpar/t_filters_parallel.h
@@ -138,6 +138,16 @@ typedef struct {
#define INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS \
(INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS * DIM1_SCALE_FACTOR)
+/* Defines for the unshared transformed and filtered chunks write test */
+#define WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME "unshared_transformed_filtered_chunks_write"
+#define WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS 2
+#define WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS \
+ (WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS / mpi_size)
+#define WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS \
+ (WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS / mpi_size)
+
/* Defines for the 3D unshared filtered dataset separate page write test */
#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME \
"3D_unshared_filtered_chunks_separate_pages_write"
@@ -280,6 +290,16 @@ typedef struct {
#define INTERLEAVED_READ_FILTERED_DATASET_NCOLS \
(INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS * DIM1_SCALE_FACTOR)
+/* Defines for the unshared transformed and filtered chunks read test */
+#define READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME "unshared_transformed_filtered_chunks_read"
+#define READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS 2
+#define READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS \
+ (READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS / mpi_size)
+#define READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS \
+ (READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS / mpi_size)
+
/* Defines for the 3D unshared filtered dataset separate page read test */
#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME \
"3D_unshared_filtered_chunks_separate_pages_read"
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index 2eca297..3041e77 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -129,12 +129,6 @@ zero_dim_dset(void)
/*
* Example of using PHDF5 to create ndatasets datasets. Each process write
* a slab of array to the file.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/11/04
*/
void
multiple_dset_write(void)
@@ -218,12 +212,6 @@ multiple_dset_write(void)
}
/* Example of using PHDF5 to create, write, and read compact dataset.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/11/04
*/
void
compact_dataset(void)
@@ -353,14 +341,6 @@ compact_dataset(void)
/*
* Example of using PHDF5 to create, write, and read dataset and attribute
* of Null dataspace.
- *
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
- *
- * JRM - 8/24/04
*/
void
null_dataset(void)
@@ -465,14 +445,6 @@ null_dataset(void)
* Actual data is _not_ written to these datasets. Dataspaces are exact
* sizes(2GB, 4GB, etc.), but the metadata for the file pushes the file over
* the boundary of interest.
- *
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
- *
- * JRM - 8/11/04
*/
void
big_dataset(void)
@@ -594,16 +566,6 @@ big_dataset(void)
/* Example of using PHDF5 to read a partial written dataset. The dataset does
* not have actual data written to the entire raw data area and relies on the
* default fill value of zeros to work correctly.
- *
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
- *
- * Also added code to free dynamically allocated buffers.
- *
- * JRM - 8/11/04
*/
void
dataset_fillvalue(void)
@@ -710,15 +672,16 @@ dataset_fillvalue(void)
for (i = 0; i < (int)dset_dims[0]; i++)
for (j = 0; j < (int)dset_dims[1]; j++)
for (k = 0; k < (int)dset_dims[2]; k++)
- for (l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++)
+ for (l = 0; l < (int)dset_dims[3]; l++, trdata++)
if (*trdata != 0)
if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,
- j, k, l, *trdata);
+ HDprintf(
+ "Rank %d: Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n",
+ mpi_rank, i, j, k, l, *trdata);
if (err_num > MAX_ERR_REPORT && !VERBOSE_MED)
- HDprintf("[more errors ...]\n");
+ HDprintf("Rank %d: [more errors ...]\n", mpi_rank);
if (err_num) {
- HDprintf("%d errors found in check_value\n", err_num);
+ HDprintf("Rank %d: %d errors found in check_value\n", mpi_rank, err_num);
nerrors++;
}
}
@@ -856,12 +819,6 @@ collective_group_write_independent_group_read(void)
/* Write multiple groups with a chunked dataset in each group collectively.
* These groups and datasets are for testing independent read later.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/16/04
*/
void
collective_group_write(void)
@@ -896,6 +853,7 @@ collective_group_write(void)
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+ VRFY((fid >= 0), "H5Fcreate");
H5Pclose(plist);
/* decide the hyperslab according to process number. */
@@ -909,13 +867,13 @@ collective_group_write(void)
ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
VRFY((memspace >= 0), "memspace");
VRFY((filespace >= 0), "filespace");
- VRFY((ret1 >= 0), "mgroup memspace selection");
- VRFY((ret2 >= 0), "mgroup filespace selection");
+ VRFY((ret1 == 0), "mgroup memspace selection");
+ VRFY((ret2 == 0), "mgroup filespace selection");
dcpl = H5Pcreate(H5P_DATASET_CREATE);
ret1 = H5Pset_chunk(dcpl, 2, chunk_size);
VRFY((dcpl >= 0), "dataset creation property");
- VRFY((ret1 >= 0), "set chunk for dataset creation property");
+ VRFY((ret1 == 0), "set chunk for dataset creation property");
/* creates ngroups groups under the root group, writes chunked
* datasets in parallel. */
@@ -932,10 +890,14 @@ collective_group_write(void)
for (j = 0; j < size; j++)
outme[(i * size) + j] = (i + j) * 1000 + mpi_rank;
- H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
+ ret1 = H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
+ VRFY((ret1 == 0), "H5Dwrite");
- H5Dclose(did);
- H5Gclose(gid);
+ ret1 = H5Dclose(did);
+ VRFY((ret1 == 0), "H5Dclose");
+
+ ret1 = H5Gclose(gid);
+ VRFY((ret1 == 0), "H5Gclose");
#ifdef BARRIER_CHECKS
if (!((m + 1) % 10)) {
@@ -948,7 +910,9 @@ collective_group_write(void)
H5Pclose(dcpl);
H5Sclose(filespace);
H5Sclose(memspace);
- H5Fclose(fid);
+
+ ret1 = H5Fclose(fid);
+ VRFY((ret1 == 0), "H5Fclose");
HDfree(outme);
}
@@ -964,6 +928,7 @@ independent_group_read(void)
const H5Ptest_param_t *pt;
char * filename;
int ngroups;
+ herr_t ret;
pt = GetTestParameters();
filename = pt->name;
@@ -975,6 +940,7 @@ independent_group_read(void)
H5Pset_all_coll_metadata_ops(plist, FALSE);
fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
+ VRFY((fid > 0), "H5Fopen");
H5Pclose(plist);
/* open groups and read datasets. Odd number processes read even number
@@ -989,20 +955,11 @@ independent_group_read(void)
group_dataset_read(fid, mpi_rank, m);
}
- H5Fclose(fid);
+ ret = H5Fclose(fid);
+ VRFY((ret == 0), "H5Fclose");
}
/* Open and read datasets and compare data
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * Also added code to verify the results of dynamic memory
- * allocations, and to free dynamically allocated memeory
- * when we are done with it.
- *
- * JRM - 8/16/04
*/
static void
group_dataset_read(hid_t fid, int mpi_rank, int m)
@@ -1035,16 +992,17 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
/* this is the original value */
for (i = 0; i < size; i++)
- for (j = 0; j < size; j++) {
+ for (j = 0; j < size; j++)
outdata[(i * size) + j] = (i + j) * 1000 + mpi_rank;
- }
/* compare the original value(outdata) to the value in file(indata).*/
ret = check_value(indata, outdata, size);
VRFY((ret == 0), "check the data");
- H5Dclose(did);
- H5Gclose(gid);
+ ret = H5Dclose(did);
+ VRFY((ret == 0), "H5Dclose");
+ ret = H5Gclose(gid);
+ VRFY((ret == 0), "H5Gclose");
HDfree(indata);
HDfree(outdata);
@@ -1076,11 +1034,6 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
* + means the group has attribute(s).
* ' means the datasets in the groups have attribute(s).
*
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/16/04
*/
void
multiple_group_write(void)
@@ -1164,12 +1117,6 @@ multiple_group_write(void)
/*
* In a group, creates NDATASETS datasets. Each process writes a hyperslab
* of a data array to the file.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/16/04
*/
static void
write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
@@ -1243,12 +1190,6 @@ create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter)
/*
* This function is to verify the data from multiple group testing. It opens
* every dataset in every group and check their correctness.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/11/04
*/
void
multiple_group_read(void)
@@ -1323,12 +1264,6 @@ multiple_group_read(void)
/*
* This function opens all the datasets in a certain, checks the data using
* dataset_vrfy function.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/11/04
*/
static int
read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
@@ -1475,12 +1410,6 @@ read_attribute(hid_t obj_id, int this_type, int num)
/* This functions compares the original data with the read-in data for its
* hyperslab part only by process ID.
- *
- * Changes: Modified function to use a passed in size parameter
- * instead of the old SIZE #define. This should let us
- * run with an arbitrary number of processes.
- *
- * JRM - 8/16/04
*/
static int
check_value(DATATYPE *indata, DATATYPE *outdata, int size)
@@ -1513,12 +1442,6 @@ check_value(DATATYPE *indata, DATATYPE *outdata, int size)
}
/* Decide the portion of data chunk in dataset by process ID.
- *
- * Changes: Modified function to use a passed in size parameter
- * instead of the old SIZE #define. This should let us
- * run with an arbitrary number of processes.
- *
- * JRM - 8/11/04
*/
static void
@@ -1560,8 +1483,6 @@ get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[], hsize_t
* This function reproduces this situation. At present the test hangs
* on failure.
* JRM - 9/13/04
- *
- * Changes: None.
*/
#define N 4
@@ -1805,10 +1726,6 @@ io_mode_confusion(void)
* cache clients will have to construct on disk images on demand.
*
* JRM -- 10/13/10
- *
- * Changes:
- * Break it into two parts, a writer to write the file and a reader
- * the correctness of the writer. AKC -- 2010/10/27
*/
#define NUM_DATA_SETS 4
diff --git a/testpar/t_mpi.c b/testpar/t_mpi.c
index a883f55..a8a756c 100644
--- a/testpar/t_mpi.c
+++ b/testpar/t_mpi.c
@@ -1086,7 +1086,7 @@ main(int argc, char **argv)
H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
/* set alarm. */
- ALARM_ON;
+ TestAlarmOn();
/*=======================================
* MPIO 1 write Many read test
@@ -1195,7 +1195,7 @@ finish:
}
/* turn off alarm */
- ALARM_OFF;
+ TestAlarmOff();
h5_clean_files(FILENAME, fapl);
H5close();
diff --git a/testpar/t_pflush1.c b/testpar/t_pflush1.c
index 2a80f4a..cc569f6 100644
--- a/testpar/t_pflush1.c
+++ b/testpar/t_pflush1.c
@@ -133,7 +133,7 @@ main(int argc, char *argv[])
TESTING("H5Fflush (part1)");
/* Don't run using the split VFD */
- envval = HDgetenv("HDF5_DRIVER");
+ envval = HDgetenv(HDF5_DRIVER);
if (envval == NULL)
envval = "nomatch";
@@ -207,5 +207,7 @@ error:
HDfflush(stderr);
HDprintf("*** ERROR ***\n");
HDprintf("THERE WAS A REAL ERROR IN t_pflush1.\n");
+ HDfflush(stdout);
+
HD_exit(EXIT_FAILURE);
} /* end main() */
diff --git a/testpar/t_pflush2.c b/testpar/t_pflush2.c
index c96233a..e782f8a 100644
--- a/testpar/t_pflush2.c
+++ b/testpar/t_pflush2.c
@@ -157,7 +157,7 @@ main(int argc, char *argv[])
TESTING("H5Fflush (part2 with flush)");
/* Don't run using the split VFD */
- envval = HDgetenv("HDF5_DRIVER");
+ envval = HDgetenv(HDF5_DRIVER);
if (envval == NULL)
envval = "nomatch";
@@ -167,7 +167,7 @@ main(int argc, char *argv[])
HDputs(" Test not compatible with current Virtual File Driver");
}
MPI_Finalize();
- HDexit(EXIT_FAILURE);
+ HDexit(EXIT_SUCCESS);
}
if ((fapl_id1 = H5Pcreate(H5P_FILE_ACCESS)) < 0)
diff --git a/testpar/t_prop.c b/testpar/t_prop.c
index 92c8bc0..a3f1859 100644
--- a/testpar/t_prop.c
+++ b/testpar/t_prop.c
@@ -98,10 +98,10 @@ test_plist_ed(void)
int mpi_size, mpi_rank, recv_proc;
hsize_t chunk_size = 16384; /* chunk size */
- double fill = 2.7f; /* Fill value */
+ double fill = 2.7; /* Fill value */
size_t nslots = 521 * 2;
size_t nbytes = 1048576 * 10;
- double w0 = 0.5f;
+ double w0 = 0.5;
unsigned max_compact;
unsigned min_dense;
hsize_t max_size[1]; /*data space maximum size */
@@ -114,26 +114,26 @@ test_plist_ed(void)
TRUE,
FALSE,
(2 * 2048 * 1024),
- 0.3f,
+ 0.3,
(64 * 1024 * 1024),
(4 * 1024 * 1024),
60000,
H5C_incr__threshold,
- 0.8f,
- 3.0f,
+ 0.8,
+ 3.0,
TRUE,
(8 * 1024 * 1024),
H5C_flash_incr__add_space,
- 2.0f,
- 0.25f,
+ 2.0,
+ 0.25,
H5C_decr__age_out_with_threshold,
- 0.997f,
- 0.8f,
+ 0.997,
+ 0.8,
TRUE,
(3 * 1024 * 1024),
3,
FALSE,
- 0.2f,
+ 0.2,
(256 * 2048),
H5AC__DEFAULT_METADATA_WRITE_STRATEGY};
diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c
index 7c2763c..518741d 100644
--- a/testpar/t_span_tree.c
+++ b/testpar/t_span_tree.c
@@ -35,6 +35,8 @@
#include "H5private.h"
#include "testphdf5.h"
+#define LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG 0
+
static void coll_write_test(int chunk_factor);
static void coll_read_test(void);
@@ -1431,8 +1433,7 @@ lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr,
*-------------------------------------------------------------------------
*/
-#define LDSCT_DS_RANK 5
-#define LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG 0
+#define LDSCT_DS_RANK 5
static void
lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_collective_io,
diff --git a/testpar/t_subfiling_vfd.c b/testpar/t_subfiling_vfd.c
index 862107b..501b8b8 100644
--- a/testpar/t_subfiling_vfd.c
+++ b/testpar/t_subfiling_vfd.c
@@ -2633,6 +2633,8 @@ error:
* 2019
* ---------------------------------------------------------------------------
*/
+extern hbool_t H5_use_selection_io_g;
+
int
main(int argc, char **argv)
{
@@ -2652,6 +2654,8 @@ main(int argc, char **argv)
h5_reset();
+ H5_use_selection_io_g = TRUE;
+
g_log_stream = stdout; /* default debug/logging output stream */
HDprintf("Testing Subfiling VFD functionality.\n");
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index f5b9e63..1ead1b8 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -33,9 +33,6 @@ int ngroups = 512; /* number of groups to create in root
int facc_type = FACC_MPIO; /*Test file access type */
int dxfer_coll_type = DXFER_COLLECTIVE_IO;
-H5E_auto2_t old_func; /* previous error handler */
-void * old_client_data; /* previous error handler arg.*/
-
/* other option flags */
/* FILENAME and filenames must have the same number of names.
@@ -353,6 +350,8 @@ main(int argc, char **argv)
AddTest("props", test_file_properties, NULL, "Coll Metadata file property settings", PARATESTFILE);
+ AddTest("delete", test_delete, NULL, "MPI-IO VFD file delete", PARATESTFILE);
+
AddTest("idsetw", dataset_writeInd, NULL, "dataset independent write", PARATESTFILE);
AddTest("idsetr", dataset_readInd, NULL, "dataset independent read", PARATESTFILE);
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index bd8de08..10e3027 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -225,19 +225,18 @@ typedef enum {
} ShapeSameTestMethods;
/* Shared global variables */
-extern int dim0, dim1; /*Dataset dimensions */
-extern int chunkdim0, chunkdim1; /*Chunk dimensions */
-extern int nerrors; /*errors count */
-extern H5E_auto2_t old_func; /* previous error handler */
-extern void * old_client_data; /*previous error handler arg.*/
-extern int facc_type; /*Test file access type */
-extern int dxfer_coll_type;
+extern int dim0, dim1; /*Dataset dimensions */
+extern int chunkdim0, chunkdim1; /*Chunk dimensions */
+extern int nerrors; /*errors count */
+extern int facc_type; /*Test file access type */
+extern int dxfer_coll_type;
/* Test program prototypes */
void test_plist_ed(void);
void external_links(void);
void zero_dim_dset(void);
void test_file_properties(void);
+void test_delete(void);
void multiple_dset_write(void);
void multiple_group_write(void);
void multiple_group_read(void);