summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
authorjhendersonHDF <jhenderson@hdfgroup.org>2023-10-25 00:36:18 (GMT)
committerGitHub <noreply@github.com>2023-10-25 00:36:18 (GMT)
commita91be87f072a8020d9a467f1ef81132cb5c40149 (patch)
treee5a3ef49a818d07f5649f9aeaa718047efaef658 /testpar
parent097fd51481a7d5eaf8f94436cf21e08ac516d6ba (diff)
downloadhdf5-a91be87f072a8020d9a467f1ef81132cb5c40149.zip
hdf5-a91be87f072a8020d9a467f1ef81132cb5c40149.tar.gz
hdf5-a91be87f072a8020d9a467f1ef81132cb5c40149.tar.bz2
Sync with develop (#3764)
* Add missing test files to distclean target (#3734) Cleans up new files in Autotools `make distclean` in the test directory * Add tools/libtest to Autotools builds (#3735) This was only added to CMake many years ago and tests the tools library. * Clean up onion VFD files in tools `make clean` (#3739) Cleans up h5dump and h5diff *.onion files in the Autotools when runing `make clean`. * Clean Java test files on Autotools (#3740) Removes generated HDF5 and text output files when running `make clean`. * Clean the flushrefresh test dir on Autotools (#3741) The flushrefresh_test directory was not being cleaned up w/ `make clean` under the Autotools * Fix file names in tfile.c (#3743) Some tests in tfile.c use h5_fileaccess to get a VFD-dependent file name but use the scheme from testhdf5, reusing the FILE1 and FILE8 names. This leads to files like test1.h5.h5 which are unintended and not cleaned up. This changes the filename scheme for a few tests to work with h5test, resulting in more informative names and allowing the files to be cleaned up at the end of the test. The test files have also been added to the `make clean` target for the Autotools. * Clean Autotools files in parallel tests (#3744) Adds missing files to `make clean` for parallel, including Fortran. * Add native VOL checks to deprecated functions (#3647) * Add native VOL checks to deprecated functions * Remove unneeded native VOL checks * Move native checks to top level calls * Fix buffer overflow in cache debugging code (#3691) * update stat arg for apple (#3726) * update stat arg for apple * use H5_HAVE_DARWIN for Apple ifdef * fix typo * removed duplicate H5_ih_info_t * added fortran async test to cmake * Fix windows cpack error in WiX package. (#3747) * Add a simple cache to the ros3 VFD (#3753) Adds a small cache of the first N bytes of a file opened with the read-only S3 (ros3) VFD, where N is 4kiB or the size of the file, whichever is smaller. This avoids a lot of small I/O operations on file open. Addresses GitHub issue #3381 * Update Autotools to correctly configure oneAPI (#3751) * Update Autotools to correctly configure oneAPI Splits the Intel config files under the Autotools into 'classic' Intel and oneAPI versions, fixing 'unsupported option' messages. Also turns off `-check uninit` (new in 2023) in Fortran, which kills the H5_buildiface program due to false positives. * Enable Fortran in oneAPI CI workflow * Turn on Fortran in CMake, update LD_LIBRARY_PATH * Go back to disabling Fortran w/ Intel For some reason there's a linking problem w/ Fortran error while loading shared libraries: libifport.so.5: cannot open shared object file: No such file or directory * Add h5pget_actual_selection_io_mode fortran wrapper (#3746) * added h5pget_actual_selection_io_mode_f test * added tests for h5pget_actual_selection_io_mode_f * fixed int_f type conversion * Update fortran action step (#3748) * Added missing DLL for H5PGET_ACTUAL_SELECTION_IO_MODE_F (#3760) * add missing H5PGET_ACTUAL_SELECTION_IO_MODE_F dll * Bump the ros3 VFD cache to 16 MiB (#3759) * Fix hangs during collective I/O with independent metadata writes (#3693) * Fix some issues with collective metadata reads for chunked datasets (#3716) Add functions/callbacks for explicit control over chunk index open/close Add functions/callbacks to check if chunk index is open or not so that it can be opened if necessary before temporarily disabling collective metadata reads in the library Add functions/callbacks for requesting loading of additional chunk index metadata beyond the chunk index itself * Fix failure in t_select_io_dset when run with more than 10 ranks (#3758) * Fix H5Pset_evict_on_close failing regardless of actual parallel use (#3761) Allow H5Pset_evict_on_close to be called regardless of whether a parallel build of HDF5 is being used Fail during file opens if H5Pset_evict_on_close has been set to true on the given File Access Property List and the size of the MPI communicator being used is greater than 1
Diffstat (limited to 'testpar')
-rw-r--r--testpar/Makefile.am3
-rw-r--r--testpar/t_coll_md.c103
-rw-r--r--testpar/t_file.c59
-rw-r--r--testpar/t_select_io_dset.c30
-rw-r--r--testpar/testphdf5.c5
-rw-r--r--testpar/testphdf5.h2
6 files changed, 199 insertions, 3 deletions
diff --git a/testpar/Makefile.am b/testpar/Makefile.am
index 59d47e1..4a8cb82 100644
--- a/testpar/Makefile.am
+++ b/testpar/Makefile.am
@@ -58,6 +58,7 @@ LDADD = $(LIBH5TEST) $(LIBHDF5)
# after_mpi_fin.h5 is from t_init_term
# go is used for debugging. See testphdf5.c.
CHECK_CLEANFILES+=MPItest.h5 Para*.h5 bigio_test.h5 CacheTestDummy.h5 \
- ShapeSameTest.h5 shutdown.h5 pmulti_dset.h5 after_mpi_fin.h5 go
+ ShapeSameTest.h5 shutdown.h5 pmulti_dset.h5 after_mpi_fin.h5 go noflush.h5 \
+ mpio_select_test_file.h5 *.btr
include $(top_srcdir)/config/conclude.am
diff --git a/testpar/t_coll_md.c b/testpar/t_coll_md.c
index 1220111..9c6fc71 100644
--- a/testpar/t_coll_md.c
+++ b/testpar/t_coll_md.c
@@ -43,6 +43,11 @@
#define COLL_GHEAP_WRITE_ATTR_NAME "coll_gheap_write_attr"
#define COLL_GHEAP_WRITE_ATTR_DIMS 1
+#define COLL_IO_IND_MD_WRITE_NDIMS 2
+#define COLL_IO_IND_MD_WRITE_CHUNK0 4
+#define COLL_IO_IND_MD_WRITE_CHUNK1 256
+#define COLL_IO_IND_MD_WRITE_NCHUNK1 16384
+
/*
* A test for issue HDFFV-10501. A parallel hang was reported which occurred
* in linked-chunk I/O when collective metadata reads are enabled and some ranks
@@ -569,3 +574,101 @@ test_collective_global_heap_write(void)
VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
}
+
+/*
+ * A test to ensure that hangs don't occur when collective I/O
+ * is requested at the interface level (by a call to
+ * H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)), while
+ * collective metadata writes are NOT requested.
+ */
+void
+test_coll_io_ind_md_write(void)
+{
+ const char *filename;
+ long long *data = NULL;
+ hsize_t dset_dims[COLL_IO_IND_MD_WRITE_NDIMS];
+ hsize_t chunk_dims[COLL_IO_IND_MD_WRITE_NDIMS];
+ hsize_t sel_dims[COLL_IO_IND_MD_WRITE_NDIMS];
+ hsize_t offset[COLL_IO_IND_MD_WRITE_NDIMS];
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_id2 = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ int mpi_rank, mpi_size;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ filename = GetTestParameters();
+
+ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+
+ VRFY((H5Pset_all_coll_metadata_ops(fapl_id, false) >= 0), "Unset collective metadata reads succeeded");
+ VRFY((H5Pset_coll_metadata_write(fapl_id, false) >= 0), "Unset collective metadata writes succeeded");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ dset_dims[0] = (hsize_t)(mpi_size * COLL_IO_IND_MD_WRITE_CHUNK0);
+ dset_dims[1] = (hsize_t)(COLL_IO_IND_MD_WRITE_CHUNK1 * COLL_IO_IND_MD_WRITE_NCHUNK1);
+
+ fspace_id = H5Screate_simple(COLL_IO_IND_MD_WRITE_NDIMS, dset_dims, NULL);
+ VRFY((fspace_id >= 0), "H5Screate_simple succeeded");
+
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
+
+ chunk_dims[0] = (hsize_t)(COLL_IO_IND_MD_WRITE_CHUNK0);
+ chunk_dims[1] = (hsize_t)(COLL_IO_IND_MD_WRITE_CHUNK1);
+
+ VRFY((H5Pset_chunk(dcpl_id, COLL_IO_IND_MD_WRITE_NDIMS, chunk_dims) >= 0), "H5Pset_chunk succeeded");
+
+ VRFY((H5Pset_shuffle(dcpl_id) >= 0), "H5Pset_shuffle succeeded");
+
+ dset_id = H5Dcreate2(file_id, "dset1", H5T_NATIVE_LLONG, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
+
+ sel_dims[0] = (hsize_t)(COLL_IO_IND_MD_WRITE_CHUNK0);
+ sel_dims[1] = (hsize_t)(COLL_IO_IND_MD_WRITE_CHUNK1 * COLL_IO_IND_MD_WRITE_NCHUNK1);
+
+ offset[0] = (hsize_t)mpi_rank * sel_dims[0];
+ offset[1] = 0;
+
+ VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, offset, NULL, sel_dims, NULL) >= 0),
+ "H5Sselect_hyperslab succeeded");
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id >= 0), "H5Pcreate succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ data = malloc(sel_dims[0] * sel_dims[1] * sizeof(long long));
+ for (size_t i = 0; i < sel_dims[0] * sel_dims[1]; i++)
+ data[i] = rand();
+
+ VRFY((H5Dwrite(dset_id, H5T_NATIVE_LLONG, H5S_BLOCK, fspace_id, dxpl_id, data) >= 0),
+ "H5Dwrite succeeded");
+
+ dset_id2 = H5Dcreate2(file_id, "dset2", H5T_NATIVE_LLONG, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id2 >= 0), "H5Dcreate2 succeeded");
+
+ for (size_t i = 0; i < sel_dims[0] * sel_dims[1]; i++)
+ data[i] = rand();
+
+ VRFY((H5Dwrite(dset_id2, H5T_NATIVE_LLONG, H5S_BLOCK, fspace_id, dxpl_id, data) >= 0),
+ "H5Dwrite succeeded");
+
+ free(data);
+
+ VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded");
+ VRFY((H5Dclose(dset_id2) >= 0), "H5Dclose succeeded");
+ VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+}
diff --git a/testpar/t_file.c b/testpar/t_file.c
index a6a541b..700ccc2 100644
--- a/testpar/t_file.c
+++ b/testpar/t_file.c
@@ -1060,3 +1060,62 @@ test_invalid_libver_bounds_file_close_assert(void)
ret = H5Pclose(fcpl_id);
VRFY((SUCCEED == ret), "H5Pclose");
}
+
+/*
+ * Tests that H5Pevict_on_close properly succeeds in serial/one rank and fails when
+ * called by multiple ranks.
+ */
+void
+test_evict_on_close_parallel_unsupp(void)
+{
+ const char *filename = NULL;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ hid_t fid = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ herr_t ret;
+
+ filename = (const char *)GetTestParameters();
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* setup file access plist */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate");
+ ret = H5Pset_libver_bounds(fapl_id, H5F_LIBVER_EARLIEST, H5F_LIBVER_V18);
+ VRFY((SUCCEED == ret), "H5Pset_libver_bounds");
+
+ ret = H5Pset_evict_on_close(fapl_id, true);
+ VRFY((SUCCEED == ret), "H5Pset_evict_on_close");
+
+ /* test on 1 rank */
+ ret = H5Pset_fapl_mpio(fapl_id, MPI_COMM_SELF, info);
+ VRFY((SUCCEED == ret), "H5Pset_fapl_mpio");
+
+ if (mpi_rank == 0) {
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((SUCCEED == ret), "H5Fcreate");
+ ret = H5Fclose(fid);
+ VRFY((SUCCEED == ret), "H5Fclose");
+ }
+
+ VRFY((MPI_SUCCESS == MPI_Barrier(MPI_COMM_WORLD)), "MPI_Barrier");
+
+ /* test on multiple ranks if we have them */
+ if (mpi_size > 1) {
+ ret = H5Pset_fapl_mpio(fapl_id, comm, info);
+ VRFY((SUCCEED == ret), "H5Pset_fapl_mpio");
+
+ H5E_BEGIN_TRY
+ {
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ }
+ H5E_END_TRY
+ VRFY((fid == H5I_INVALID_HID), "H5Fcreate");
+ }
+
+ ret = H5Pclose(fapl_id);
+ VRFY((SUCCEED == ret), "H5Pclose");
+}
diff --git a/testpar/t_select_io_dset.c b/testpar/t_select_io_dset.c
index 2be2b40..9d3f120 100644
--- a/testpar/t_select_io_dset.c
+++ b/testpar/t_select_io_dset.c
@@ -222,6 +222,26 @@ check_actual_selection_io_mode(hid_t dxpl, uint32_t sel_io_mode_expected)
}
/*
+ * Helper routine to check actual selection I/O mode on a dxpl
+ */
+static void
+check_actual_selection_io_mode_either(hid_t dxpl, uint32_t sel_io_mode_expected1,
+ uint32_t sel_io_mode_expected2)
+{
+ uint32_t actual_sel_io_mode;
+
+ if (H5Pget_actual_selection_io_mode(dxpl, &actual_sel_io_mode) < 0)
+ P_TEST_ERROR;
+ if (actual_sel_io_mode != sel_io_mode_expected1 && actual_sel_io_mode != sel_io_mode_expected2) {
+ if (MAINPROCESS)
+ printf("\n Failed: Incorrect selection I/O mode (expected/actual) %u or %u : %u",
+ (unsigned)sel_io_mode_expected1, (unsigned)sel_io_mode_expected2,
+ (unsigned)actual_sel_io_mode);
+ P_TEST_ERROR;
+ }
+}
+
+/*
* Case 1: single dataset read/write, no type conversion (null case)
*/
static void
@@ -327,8 +347,14 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned select,
exp_io_mode = chunked ? H5D_MPIO_CHUNK_COLLECTIVE : H5D_MPIO_CONTIGUOUS_COLLECTIVE;
testing_check_io_mode(dxpl, exp_io_mode);
- if (chunked && !dtrans)
- check_actual_selection_io_mode(dxpl, H5D_VECTOR_IO);
+ if (chunked && !dtrans) {
+ /* If there are more ranks than chunks, then some ranks will not perform vector I/O due to how the
+ * parallel compression code redistributes data */
+ if ((hsize_t)mpi_size > (dims[0] / cdims[0]))
+ check_actual_selection_io_mode_either(dxpl, H5D_VECTOR_IO, 0);
+ else
+ check_actual_selection_io_mode(dxpl, H5D_VECTOR_IO);
+ }
else
check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO);
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index 584ca1f..2428c71 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -366,6 +366,9 @@ main(int argc, char **argv)
AddTest("invlibverassert", test_invalid_libver_bounds_file_close_assert, NULL,
"Invalid libver bounds assertion failure", PARATESTFILE);
+ AddTest("evictparassert", test_evict_on_close_parallel_unsupp, NULL, "Evict on close in parallel failure",
+ PARATESTFILE);
+
AddTest("idsetw", dataset_writeInd, NULL, "dataset independent write", PARATESTFILE);
AddTest("idsetr", dataset_readInd, NULL, "dataset independent read", PARATESTFILE);
@@ -521,6 +524,8 @@ main(int argc, char **argv)
"Collective MD read with link chunk I/O (H5D__sort_chunk)", PARATESTFILE);
AddTest("GH_coll_MD_wr", test_collective_global_heap_write, NULL,
"Collective MD write of global heap data", PARATESTFILE);
+ AddTest("COLLIO_INDMDWR", test_coll_io_ind_md_write, NULL,
+ "Collective I/O with Independent metadata writes", PARATESTFILE);
/* Display testing information */
TestInfo(argv[0]);
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index 6ac8080..6bbdb0d 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -233,6 +233,7 @@ void zero_dim_dset(void);
void test_file_properties(void);
void test_delete(void);
void test_invalid_libver_bounds_file_close_assert(void);
+void test_evict_on_close_parallel_unsupp(void);
void multiple_dset_write(void);
void multiple_group_write(void);
void multiple_group_read(void);
@@ -296,6 +297,7 @@ void test_partial_no_selection_coll_md_read(void);
void test_multi_chunk_io_addrmap_issue(void);
void test_link_chunk_io_sort_chunk_issue(void);
void test_collective_global_heap_write(void);
+void test_coll_io_ind_md_write(void);
void test_oflush(void);
/* commonly used prototypes */