From 6ceb9711b8f891229cf4206417a824ffb7850c3d Mon Sep 17 00:00:00 2001 From: Mohamad Chaarawi Date: Mon, 6 Jan 2014 12:56:45 -0500 Subject: [svn-r24612] fix bugs in parallel tests exposed in corner cases when running with 1 or 2 processes. First bug is in testpar/t_mdset.c, where the test reports an error in addition to skipping the test if there are less than three procs. Fix to just skip the test. Second bug is in testpar/t_dset.c in actual_io_mode tests, where incorrect expected value for IO mode was set if the number of procs running the test is 1. tested with h5committest. --- testpar/t_dset.c | 31 +++++++++++++++---------------- testpar/t_mdset.c | 9 +++------ testpar/t_prop.c | 38 +++++++++++++++++++++++--------------- testpar/testphdf5.c | 18 ++++++++++++------ 4 files changed, 53 insertions(+), 43 deletions(-) diff --git a/testpar/t_dset.c b/testpar/t_dset.c index 34ccab7..246d2d9 100644 --- a/testpar/t_dset.c +++ b/testpar/t_dset.c @@ -2703,7 +2703,10 @@ test_actual_io_mode(int selection_mode) { test_name = "Multi Chunk - Collective"; actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; + if(mpi_size > 1) + actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; + else + actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; break; /* Mixed I/O with optimization */ @@ -2780,11 +2783,14 @@ test_actual_io_mode(int selection_mode) { test_name = "Multi Chunk - Mixed (Disagreement)"; actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - - if(mpi_rank == 0) - actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; + if(mpi_size > 1) { + if(mpi_rank == 0) + actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; + else + actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; + } else - actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; + actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; break; @@ -2843,7 +2849,6 @@ test_actual_io_mode(int selection_mode) { ret = H5Sselect_hyperslab(mem_space, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - /* Get the number of elements in the selection */ length = dim0 * dim1; @@ -2921,7 +2926,6 @@ test_actual_io_mode(int selection_mode) { VRFY((actual_chunk_opt_mode_read == actual_chunk_opt_mode_write), "reading and writing are the same for actual_chunk_opt_mode"); - /* Test values */ if(actual_chunk_opt_mode_expected != (unsigned) -1 && actual_io_mode_expected != (unsigned) -1) { sprintf(message, "Actual Chunk Opt Mode has the correct value for %s.\n",test_name); @@ -3012,7 +3016,7 @@ actual_io_mode_tests(void) { */ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND); test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL); - + /* The Multi Chunk Mixed test requires atleast three processes. */ if (mpi_size > 2) test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX); @@ -3110,8 +3114,8 @@ test_no_collective_cause_mode(int selection_mode) int length; int * buffer; int i; - MPI_Comm mpi_comm = MPI_COMM_NULL; - MPI_Info mpi_info = MPI_INFO_NULL; + MPI_Comm mpi_comm; + MPI_Info mpi_info; hid_t fid = -1; hid_t sid = -1; hid_t dataset = -1; @@ -3138,7 +3142,7 @@ test_no_collective_cause_mode(int selection_mode) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Barrier(MPI_COMM_WORLD); - + HDassert(mpi_size >= 1); mpi_comm = MPI_COMM_WORLD; @@ -3675,11 +3679,6 @@ test_no_collective_cause_mode_filter(int selection_mode) void no_collective_cause_tests(void) { - int mpi_size = -1; - int mpi_rank = -1; - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_rank); - /* * Test individual cause */ diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c index 8fc739e..516cc2f 100644 --- a/testpar/t_mdset.c +++ b/testpar/t_mdset.c @@ -1779,12 +1779,9 @@ void rr_obj_hdr_flush_confusion(void) */ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - if (mpi_size < 3){ - HDfprintf(stdout, "%s needs at least 3 processes to run. Terminated.\n", - fcn_name); - nerrors++; - return; - } + + HDassert(mpi_size > 2); + is_reader = mpi_rank % 2; mrc = MPI_Comm_split(MPI_COMM_WORLD, is_reader, mpi_rank, &comm); VRFY((mrc==MPI_SUCCESS), "MPI_Comm_split"); diff --git a/testpar/t_prop.c b/testpar/t_prop.c index 4601316..e85b227 100644 --- a/testpar/t_prop.c +++ b/testpar/t_prop.c @@ -26,42 +26,50 @@ test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc) MPI_Request req[2]; MPI_Status status; hid_t pl; /* Decoded property list */ - void *buf = NULL; - size_t buf_size = 0; + void *send_buf = NULL; + size_t send_size = 0; herr_t ret; /* Generic return value */ if(mpi_rank == 0) { + /* first call to encode returns only the size of the buffer needed */ - ret = H5Pencode(orig_pl, NULL, &buf_size); + ret = H5Pencode(orig_pl, NULL, &send_size); VRFY((ret >= 0), "H5Pencode succeeded"); - buf = (uint8_t *)HDmalloc(buf_size); + send_buf = (uint8_t *)HDmalloc(send_size); - ret = H5Pencode(orig_pl, buf, &buf_size); + ret = H5Pencode(orig_pl, send_buf, &send_size); VRFY((ret >= 0), "H5Pencode succeeded"); - MPI_Isend(&buf_size, 1, MPI_INT, recv_proc, 123, MPI_COMM_WORLD, &req[0]); - MPI_Isend(buf, (int)buf_size, MPI_BYTE, recv_proc, 124, MPI_COMM_WORLD, &req[1]); + MPI_Isend(&send_size, 1, MPI_INT, recv_proc, 123, MPI_COMM_WORLD, &req[0]); + MPI_Isend(send_buf, (int)send_size, MPI_BYTE, recv_proc, 124, MPI_COMM_WORLD, &req[1]); } /* end if */ + if(mpi_rank == recv_proc) { - MPI_Recv(&buf_size, 1, MPI_INT, 0, 123, MPI_COMM_WORLD, &status); - buf = (uint8_t *)HDmalloc(buf_size); - MPI_Recv(buf, (int)buf_size, MPI_BYTE, 0, 124, MPI_COMM_WORLD, &status); + void *recv_buf = NULL; + size_t recv_size = 0; + + MPI_Recv(&recv_size, 1, MPI_INT, 0, 123, MPI_COMM_WORLD, &status); + recv_buf = (uint8_t *)HDmalloc(recv_size); + MPI_Recv(recv_buf, (int)recv_size, MPI_BYTE, 0, 124, MPI_COMM_WORLD, &status); - pl = H5Pdecode(buf); + pl = H5Pdecode(recv_buf); VRFY((pl >= 0), "H5Pdecode succeeded"); VRFY(H5Pequal(orig_pl, pl), "Property List Equal Succeeded"); ret = H5Pclose(pl); VRFY((ret >= 0), "H5Pclose succeeded"); + + if(NULL != recv_buf) + HDfree(recv_buf); } /* end if */ - if(0 == mpi_rank) + if(mpi_rank == 0) { MPI_Waitall(2, req, MPI_STATUSES_IGNORE); - - if(NULL != buf) - HDfree(buf); + if(NULL != send_buf) + HDfree(send_buf); + } MPI_Barrier(MPI_COMM_WORLD); diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c index 89230f1..3419977 100644 --- a/testpar/testphdf5.c +++ b/testpar/testphdf5.c @@ -342,7 +342,7 @@ int main(int argc, char **argv) * calls. By then, MPI calls may not work. */ if (H5dont_atexit() < 0){ - printf("Failed to turn off atexit processing. Continue.\n", mpi_rank); + printf("Failed to turn off atexit processing. Continue.\n"); }; H5open(); h5_show_hostname(); @@ -486,11 +486,17 @@ int main(int argc, char **argv) "I/O mode confusion test -- hangs quickly on failure", &io_mode_confusion_params); - rr_obj_flush_confusion_params.name = PARATESTFILE; - rr_obj_flush_confusion_params.count = 0; /* value not used */ - AddTest("rrobjflushconf", rr_obj_hdr_flush_confusion, NULL, - "round robin object header flush confusion test", - &rr_obj_flush_confusion_params); + if((mpi_size < 3) && MAINPROCESS) { + printf("rr_obj_hdr_flush_confusion test needs at least 3 processes.\n"); + printf("rr_obj_hdr_flush_confusion test will be skipped \n"); + } + if(mpi_size > 2) { + rr_obj_flush_confusion_params.name = PARATESTFILE; + rr_obj_flush_confusion_params.count = 0; /* value not used */ + AddTest("rrobjflushconf", rr_obj_hdr_flush_confusion, NULL, + "round robin object header flush confusion test", + &rr_obj_flush_confusion_params); + } AddTest("tldsc", lower_dim_size_comp_test, NULL, -- cgit v0.12