From 13f1790851c91f1d9dbb267d03ecfc5897c73ed2 Mon Sep 17 00:00:00 2001 From: Jordan Henderson Date: Tue, 15 May 2018 14:12:32 -0500 Subject: Add seven of fourteen parallel filtered data partial read tests --- testpar/t_filters_parallel.c | 3034 ++++++++++++++++++++++++++++++++---------- testpar/t_filters_parallel.h | 240 ++-- 2 files changed, 2500 insertions(+), 774 deletions(-) diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c index 21a5ce0..30c048d 100644 --- a/testpar/t_filters_parallel.c +++ b/testpar/t_filters_parallel.c @@ -33,20 +33,47 @@ int nerrors = 0; #define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0]) -static void test_one_chunk_filtered_dataset(void); -static void test_filtered_dataset_no_overlap(void); -static void test_filtered_dataset_overlap(void); -static void test_filtered_dataset_single_no_selection(void); -static void test_filtered_dataset_all_no_selection(void); -static void test_filtered_dataset_point_selection(void); -static void test_filtered_dataset_interleaved_write(void); -static void test_3d_filtered_dataset_no_overlap_separate_pages(void); -static void test_3d_filtered_dataset_no_overlap_same_pages(void); -static void test_3d_filtered_dataset_overlap(void); -static void test_cmpd_filtered_dataset_no_conversion_unshared(void); -static void test_cmpd_filtered_dataset_no_conversion_shared(void); -static void test_cmpd_filtered_dataset_type_conversion_unshared(void); -static void test_cmpd_filtered_dataset_type_conversion_shared(void); +/* Tests for writing data in parallel */ +static void test_write_one_chunk_filtered_dataset(void); +static void test_write_filtered_dataset_no_overlap(void); +static void test_write_filtered_dataset_overlap(void); +static void test_write_filtered_dataset_single_no_selection(void); +static void test_write_filtered_dataset_all_no_selection(void); +static void test_write_filtered_dataset_point_selection(void); +static void test_write_filtered_dataset_interleaved_write(void); +static void test_write_3d_filtered_dataset_no_overlap_separate_pages(void); +static void test_write_3d_filtered_dataset_no_overlap_same_pages(void); +static void test_write_3d_filtered_dataset_overlap(void); +static void test_write_cmpd_filtered_dataset_no_conversion_unshared(void); +static void test_write_cmpd_filtered_dataset_no_conversion_shared(void); +static void test_write_cmpd_filtered_dataset_type_conversion_unshared(void); +static void test_write_cmpd_filtered_dataset_type_conversion_shared(void); + +/* Tests for reading data in parallel */ +static void test_read_one_chunk_filtered_dataset(void); +static void test_read_filtered_dataset_no_overlap(void); +static void test_read_filtered_dataset_overlap(void); +static void test_read_filtered_dataset_single_no_selection(void); +static void test_read_filtered_dataset_all_no_selection(void); +static void test_read_filtered_dataset_point_selection(void); +static void test_read_filtered_dataset_interleaved_read(void); +static void test_read_3d_filtered_dataset_no_overlap_separate_pages(void); +static void test_read_3d_filtered_dataset_no_overlap_same_pages(void); +static void test_read_3d_filtered_dataset_overlap(void); +static void test_read_cmpd_filtered_dataset_no_conversion_unshared(void); +static void test_read_cmpd_filtered_dataset_no_conversion_shared(void); +static void test_read_cmpd_filtered_dataset_type_conversion_unshared(void); +static void test_read_cmpd_filtered_dataset_type_conversion_shared(void); + +/* + * Tests for attempting to round-trip the data going from + * + * written serially -> read in parallel + * + * and + * + * written in parallel -> read serially + */ static void test_write_serial_read_parallel(void); static void test_write_parallel_read_serial(void); @@ -56,22 +83,36 @@ static int mpi_rank; static int mpi_size; static void (*tests[])(void) = { - test_one_chunk_filtered_dataset, - test_filtered_dataset_no_overlap, - test_filtered_dataset_overlap, - test_filtered_dataset_single_no_selection, - test_filtered_dataset_all_no_selection, - test_filtered_dataset_point_selection, - test_filtered_dataset_interleaved_write, - test_3d_filtered_dataset_no_overlap_separate_pages, - test_3d_filtered_dataset_no_overlap_same_pages, - test_3d_filtered_dataset_overlap, - test_cmpd_filtered_dataset_no_conversion_unshared, - test_cmpd_filtered_dataset_no_conversion_shared, - test_cmpd_filtered_dataset_type_conversion_unshared, - test_cmpd_filtered_dataset_type_conversion_shared, - test_write_serial_read_parallel, - test_write_parallel_read_serial, + test_write_one_chunk_filtered_dataset, + test_write_filtered_dataset_no_overlap, + test_write_filtered_dataset_overlap, + test_write_filtered_dataset_single_no_selection, + test_write_filtered_dataset_all_no_selection, + test_write_filtered_dataset_point_selection, + test_write_filtered_dataset_interleaved_write, + test_write_3d_filtered_dataset_no_overlap_separate_pages, + test_write_3d_filtered_dataset_no_overlap_same_pages, + test_write_3d_filtered_dataset_overlap, + test_write_cmpd_filtered_dataset_no_conversion_unshared, + test_write_cmpd_filtered_dataset_no_conversion_shared, + test_write_cmpd_filtered_dataset_type_conversion_unshared, + test_write_cmpd_filtered_dataset_type_conversion_shared, + test_read_one_chunk_filtered_dataset, + test_read_filtered_dataset_no_overlap, + test_read_filtered_dataset_overlap, + test_read_filtered_dataset_single_no_selection, + test_read_filtered_dataset_all_no_selection, + test_read_filtered_dataset_point_selection, + test_read_filtered_dataset_interleaved_read, + test_read_3d_filtered_dataset_no_overlap_separate_pages, + test_read_3d_filtered_dataset_no_overlap_same_pages, + test_read_3d_filtered_dataset_overlap, + test_read_cmpd_filtered_dataset_no_conversion_unshared, + test_read_cmpd_filtered_dataset_no_conversion_shared, + test_read_cmpd_filtered_dataset_type_conversion_unshared, + test_read_cmpd_filtered_dataset_type_conversion_shared, + test_write_serial_read_parallel, + test_write_parallel_read_serial, }; /* @@ -82,31 +123,32 @@ static void (*tests[])(void) = { * 02/01/2017 */ static void -test_one_chunk_filtered_dataset(void) +test_write_one_chunk_filtered_dataset(void) { C_DATATYPE *data = NULL; C_DATATYPE *read_buf = NULL; C_DATATYPE *correct_buf = NULL; - hsize_t dataset_dims[ONE_CHUNK_FILTERED_DATASET_DIMS]; - hsize_t chunk_dims[ONE_CHUNK_FILTERED_DATASET_DIMS]; - hsize_t sel_dims[ONE_CHUNK_FILTERED_DATASET_DIMS]; - hsize_t count[ONE_CHUNK_FILTERED_DATASET_DIMS]; - hsize_t stride[ONE_CHUNK_FILTERED_DATASET_DIMS]; - hsize_t block[ONE_CHUNK_FILTERED_DATASET_DIMS]; - hsize_t offset[ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t dataset_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t sel_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t start[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t stride[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t count[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t block[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; size_t i, data_size, correct_buf_size; hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; - if (MAINPROCESS) puts("Testing one-chunk filtered dataset"); + if (MAINPROCESS) puts("Testing write to one-chunk filtered dataset"); /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); - - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -114,29 +156,30 @@ test_one_chunk_filtered_dataset(void) VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); /* Create the dataspace for the dataset */ - dataset_dims[0] = (hsize_t) ONE_CHUNK_FILTERED_DATASET_NROWS; - dataset_dims[1] = (hsize_t) ONE_CHUNK_FILTERED_DATASET_NCOLS; - chunk_dims[0] = (hsize_t) ONE_CHUNK_FILTERED_DATASET_CH_NROWS; - chunk_dims[1] = (hsize_t) ONE_CHUNK_FILTERED_DATASET_CH_NCOLS; - sel_dims[0] = (hsize_t) ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t) mpi_size; - sel_dims[1] = (hsize_t) ONE_CHUNK_FILTERED_DATASET_NCOLS; - - filespace = H5Screate_simple(ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL); + dataset_dims[0] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS; + dataset_dims[1] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS; + chunk_dims[0] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS; + chunk_dims[1] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS; + sel_dims[0] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t) mpi_size; + sel_dims[1] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS; + + filespace = H5Screate_simple(WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(ONE_CHUNK_FILTERED_DATASET_DIMS, sel_dims, NULL); + memspace = H5Screate_simple(WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, sel_dims, NULL); VRFY((memspace >= 0), "Memory dataspace creation succeeded"); /* Create chunked dataset */ plist_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((plist_id >= 0), "DCPL creation succeeded"); - VRFY((H5Pset_chunk(plist_id, ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + VRFY((H5Pset_chunk(plist_id, WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); /* Add test filter to the pipeline */ VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); - dset_id = H5Dcreate2(file_id, ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + dset_id = H5Dcreate2(file_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); @@ -148,26 +191,29 @@ test_one_chunk_filtered_dataset(void) */ count[0] = 1; count[1] = 1; - stride[0] = (hsize_t) ONE_CHUNK_FILTERED_DATASET_CH_NROWS; - stride[1] = (hsize_t) ONE_CHUNK_FILTERED_DATASET_CH_NCOLS; + stride[0] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS; + stride[1] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS; block[0] = sel_dims[0]; block[1] = sel_dims[1]; - offset[0] = ((hsize_t) mpi_rank * sel_dims[0]); - offset[1] = 0; + start[0] = ((hsize_t) mpi_rank * sel_dims[0]); + start[1] = 0; - if (VERBOSE_MED) - printf("Process %d: count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]); + if (VERBOSE_MED) { + printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } /* Select hyperslab in the file */ filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), "Hyperslab selection succeeded"); /* Fill data buffer */ - data_size = (hsize_t) ONE_CHUNK_FILTERED_DATASET_CH_NROWS * (hsize_t) ONE_CHUNK_FILTERED_DATASET_NCOLS * sizeof(*data); + data_size = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS + * (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS * sizeof(*data); correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); data = (C_DATATYPE *) calloc(1, data_size); @@ -180,16 +226,18 @@ test_one_chunk_filtered_dataset(void) data[i] = (C_DATATYPE) GEN_DATA(i); for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = ((C_DATATYPE) i % (ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) - + ((C_DATATYPE) i / (ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)); + correct_buf[i] = ((C_DATATYPE) i % (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) + + ((C_DATATYPE) i / (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)); /* Create property list for collective dataset write */ plist_id = H5Pcreate(H5P_DATASET_XFER); VRFY((plist_id >= 0), "DXPL creation succeeded"); - VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded"); + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), + "Dataset write succeeded"); if (data) free(data); @@ -199,12 +247,14 @@ test_one_chunk_filtered_dataset(void) read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); VRFY((NULL != read_buf), "calloc succeeded"); - dset_id = H5Dopen2(file_id, "/" ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT); + dset_id = H5Dopen2(file_id, "/" WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded"); + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), + "Dataset read succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); if (correct_buf) free(correct_buf); if (read_buf) free(read_buf); @@ -228,18 +278,18 @@ test_one_chunk_filtered_dataset(void) * 02/01/2017 */ static void -test_filtered_dataset_no_overlap(void) +test_write_filtered_dataset_no_overlap(void) { C_DATATYPE *data = NULL; C_DATATYPE *read_buf = NULL; C_DATATYPE *correct_buf = NULL; - hsize_t dataset_dims[UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t chunk_dims[UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t sel_dims[UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t count[UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t stride[UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t block[UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t offset[UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t start[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; size_t i, data_size, correct_buf_size; hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; @@ -250,9 +300,11 @@ test_filtered_dataset_no_overlap(void) plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -260,29 +312,30 @@ test_filtered_dataset_no_overlap(void) VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); /* Create the dataspace for the dataset */ - dataset_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_NROWS; - dataset_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_NCOLS; - chunk_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NROWS; - chunk_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NCOLS; - sel_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NROWS; - sel_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_NCOLS; - - filespace = H5Screate_simple(UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + dataset_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS; + chunk_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; + sel_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS; + sel_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS; + + filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); + memspace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); VRFY((memspace >= 0), "Memory dataspace creation succeeded"); /* Create chunked dataset */ plist_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((plist_id >= 0), "DCPL creation succeeded"); - VRFY((H5Pset_chunk(plist_id, UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); /* Add test filter to the pipeline */ VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); - dset_id = H5Dcreate2(file_id, UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); @@ -293,23 +346,26 @@ test_filtered_dataset_no_overlap(void) * it to the hyperslab in the file */ count[0] = 1; - count[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NCOLS; - stride[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NROWS; - stride[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NCOLS; - block[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NROWS; - block[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NCOLS; - offset[0] = ((hsize_t) mpi_rank * (hsize_t) UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]); - offset[1] = 0; - - if (VERBOSE_MED) - printf("Process %d: count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]); + count[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; + stride[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS; + stride[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; + block[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS; + block[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; + start[0] = ((hsize_t) mpi_rank * (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]); + start[1] = 0; + + if (VERBOSE_MED) { + printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } /* Select hyperslab in the file */ filespace = H5Dget_space(dset_id); VRFY((dset_id >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded"); + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); /* Fill data buffer */ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); @@ -325,16 +381,21 @@ test_filtered_dataset_no_overlap(void) data[i] = (C_DATATYPE) GEN_DATA(i); for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (C_DATATYPE) ( (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1])) - + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))); + correct_buf[i] = + (C_DATATYPE) ( + (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1])) + + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1])) + ); /* Create property list for collective dataset write */ plist_id = H5Pcreate(H5P_DATASET_XFER); VRFY((plist_id >= 0), "DXPL creation succeeded"); - VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded"); + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), + "Dataset write succeeded"); if (data) free(data); @@ -344,12 +405,14 @@ test_filtered_dataset_no_overlap(void) read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); VRFY((NULL != read_buf), "calloc succeeded"); - dset_id = H5Dopen2(file_id, "/" UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded"); + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), + "Dataset read succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); if (correct_buf) free(correct_buf); if (read_buf) free(read_buf); @@ -374,18 +437,18 @@ test_filtered_dataset_no_overlap(void) * 02/01/2017 */ static void -test_filtered_dataset_overlap(void) +test_write_filtered_dataset_overlap(void) { C_DATATYPE *data = NULL; C_DATATYPE *read_buf = NULL; C_DATATYPE *correct_buf = NULL; - hsize_t dataset_dims[SHARED_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t chunk_dims[SHARED_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t sel_dims[SHARED_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t count[SHARED_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t stride[SHARED_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t block[SHARED_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t offset[SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t dataset_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t start[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t stride[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t block[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; size_t i, data_size, correct_buf_size; hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; @@ -396,9 +459,11 @@ test_filtered_dataset_overlap(void) plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -406,29 +471,30 @@ test_filtered_dataset_overlap(void) VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); /* Create the dataspace for the dataset */ - dataset_dims[0] = (hsize_t) SHARED_FILTERED_CHUNKS_NROWS; - dataset_dims[1] = (hsize_t) SHARED_FILTERED_CHUNKS_NCOLS; - chunk_dims[0] = (hsize_t) SHARED_FILTERED_CHUNKS_CH_NROWS; - chunk_dims[1] = (hsize_t) SHARED_FILTERED_CHUNKS_CH_NCOLS; + dataset_dims[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_NCOLS; + chunk_dims[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS; sel_dims[0] = (hsize_t) DIM0_SCALE_FACTOR; - sel_dims[1] = (hsize_t) SHARED_FILTERED_CHUNKS_CH_NCOLS * (hsize_t) DIM1_SCALE_FACTOR; + sel_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS * (hsize_t) DIM1_SCALE_FACTOR; - filespace = H5Screate_simple(SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + filespace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(SHARED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); + memspace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); VRFY((memspace >= 0), "Memory dataspace creation succeeded"); /* Create chunked dataset */ plist_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((plist_id >= 0), "DCPL creation succeeded"); - VRFY((H5Pset_chunk(plist_id, SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); /* Add test filter to the pipeline */ VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); - dset_id = H5Dcreate2(file_id, SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + dset_id = H5Dcreate2(file_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); @@ -438,24 +504,27 @@ test_filtered_dataset_overlap(void) /* Each process defines the dataset selection in memory and writes * it to the hyperslab in the file */ - count[0] = (hsize_t) SHARED_FILTERED_CHUNKS_NROWS / (hsize_t) SHARED_FILTERED_CHUNKS_CH_NROWS; - count[1] = (hsize_t) SHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) SHARED_FILTERED_CHUNKS_CH_NCOLS; - stride[0] = (hsize_t) SHARED_FILTERED_CHUNKS_CH_NROWS; - stride[1] = (hsize_t) SHARED_FILTERED_CHUNKS_CH_NCOLS; - block[0] = (hsize_t) SHARED_FILTERED_CHUNKS_CH_NROWS / (hsize_t) mpi_size; - block[1] = (hsize_t) SHARED_FILTERED_CHUNKS_CH_NCOLS; - offset[0] = (hsize_t) mpi_rank * block[0]; - offset[1] = 0; - - if (VERBOSE_MED) - printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]); + count[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_NROWS / (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS; + count[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS; + stride[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS; + stride[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS; + block[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS / (hsize_t) mpi_size; + block[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS; + start[0] = (hsize_t) mpi_rank * block[0]; + start[1] = 0; + + if (VERBOSE_MED) { + printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } /* Select hyperslab in the file */ filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded"); + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); /* Fill data buffer */ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); @@ -471,17 +540,21 @@ test_filtered_dataset_overlap(void) data[i] = (C_DATATYPE) GEN_DATA(i); for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (C_DATATYPE) ((dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1]))) - + (i % dataset_dims[1]) - + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + correct_buf[i] = (C_DATATYPE) ( + (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1]))) + + (i % dataset_dims[1]) + + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]) + ); /* Create property list for collective dataset write */ plist_id = H5Pcreate(H5P_DATASET_XFER); VRFY((plist_id >= 0), "DXPL creation succeeded"); - VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded"); + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), + "Dataset write succeeded"); if (data) free(data); @@ -491,12 +564,14 @@ test_filtered_dataset_overlap(void) read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); VRFY((NULL != read_buf), "calloc succeeded"); - dset_id = H5Dopen2(file_id, "/" SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + dset_id = H5Dopen2(file_id, "/" WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded"); + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), + "Dataset read succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); if (correct_buf) free(correct_buf); if (read_buf) free(read_buf); @@ -523,18 +598,18 @@ test_filtered_dataset_overlap(void) * 02/01/2017 */ static void -test_filtered_dataset_single_no_selection(void) +test_write_filtered_dataset_single_no_selection(void) { C_DATATYPE *data = NULL; C_DATATYPE *read_buf = NULL; C_DATATYPE *correct_buf = NULL; - hsize_t dataset_dims[SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t chunk_dims[SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t sel_dims[SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t count[SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t stride[SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t block[SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t offset[SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t dataset_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t start[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t stride[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t count[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t block[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; size_t i, data_size, correct_buf_size; size_t segment_length; hid_t file_id = -1, dset_id = -1, plist_id = -1; @@ -546,9 +621,11 @@ test_filtered_dataset_single_no_selection(void) plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -556,32 +633,33 @@ test_filtered_dataset_single_no_selection(void) VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); /* Create the dataspace for the dataset */ - dataset_dims[0] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS; - dataset_dims[1] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS; - chunk_dims[0] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; - chunk_dims[1] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; - sel_dims[0] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; - sel_dims[1] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS; - - if (mpi_rank == SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) + dataset_dims[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS; + chunk_dims[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + sel_dims[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + sel_dims[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS; + + if (mpi_rank == WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) sel_dims[0] = sel_dims[1] = 0; - filespace = H5Screate_simple(SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + filespace = H5Screate_simple(WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); + memspace = H5Screate_simple(WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); VRFY((memspace >= 0), "Memory dataspace creation succeeded"); /* Create chunked dataset */ plist_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((plist_id >= 0), "DCPL creation succeeded"); - VRFY((H5Pset_chunk(plist_id, SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + VRFY((H5Pset_chunk(plist_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); /* Add test filter to the pipeline */ VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); - dset_id = H5Dcreate2(file_id, SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + dset_id = H5Dcreate2(file_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); @@ -592,26 +670,29 @@ test_filtered_dataset_single_no_selection(void) * it to the hyperslab in the file */ count[0] = 1; - count[1] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; - stride[0] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; - stride[1] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; - block[0] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; - block[1] = (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; - offset[0] = (hsize_t) mpi_rank * (hsize_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0]; - offset[1] = 0; - - if (VERBOSE_MED) - printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]); + count[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + stride[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + stride[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + block[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + block[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + start[0] = (hsize_t) mpi_rank * (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0]; + start[1] = 0; + + if (VERBOSE_MED) { + printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } /* Select hyperslab in the file */ filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - if (mpi_rank == SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) + if (mpi_rank == WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded"); else - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded"); + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); /* Fill data buffer */ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); @@ -627,20 +708,26 @@ test_filtered_dataset_single_no_selection(void) data[i] = (C_DATATYPE) GEN_DATA(i); for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (C_DATATYPE) ( (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1])) - + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))); + correct_buf[i] = + (C_DATATYPE) ( + (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1])) + + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1])) + ); /* Compute the correct offset into the buffer for the process having no selection and clear it */ segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t) mpi_size; - HDmemset(correct_buf + ((size_t) SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length), 0, segment_length * sizeof(*data)); + HDmemset(correct_buf + ((size_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length), + 0, segment_length * sizeof(*data)); /* Create property list for collective dataset write */ plist_id = H5Pcreate(H5P_DATASET_XFER); VRFY((plist_id >= 0), "DXPL creation succeeded"); - VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded"); + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), + "Dataset write succeeded"); if (data) free(data); @@ -650,12 +737,14 @@ test_filtered_dataset_single_no_selection(void) read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); VRFY((NULL != read_buf), "calloc succeeded"); - dset_id = H5Dopen2(file_id, "/" SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + dset_id = H5Dopen2(file_id, "/" WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded"); + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), + "Dataset read succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); if (correct_buf) free(correct_buf); if (read_buf) free(read_buf); @@ -683,14 +772,14 @@ test_filtered_dataset_single_no_selection(void) * 02/02/2017 */ static void -test_filtered_dataset_all_no_selection(void) +test_write_filtered_dataset_all_no_selection(void) { C_DATATYPE *data = NULL; C_DATATYPE *read_buf = NULL; C_DATATYPE *correct_buf = NULL; - hsize_t dataset_dims[ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t chunk_dims[ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t sel_dims[ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t dataset_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; size_t i, data_size, correct_buf_size; hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; @@ -701,9 +790,11 @@ test_filtered_dataset_all_no_selection(void) plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -711,28 +802,29 @@ test_filtered_dataset_all_no_selection(void) VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); /* Create the dataspace for the dataset */ - dataset_dims[0] = (hsize_t) ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS; - dataset_dims[1] = (hsize_t) ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS; - chunk_dims[0] = (hsize_t) ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; - chunk_dims[1] = (hsize_t) ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + dataset_dims[0] = (hsize_t) WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t) WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS; + chunk_dims[0] = (hsize_t) WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t) WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; sel_dims[0] = sel_dims[1] = 0; - filespace = H5Screate_simple(ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + filespace = H5Screate_simple(WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); + memspace = H5Screate_simple(WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); VRFY((memspace >= 0), "Memory dataspace creation succeeded"); /* Create chunked dataset */ plist_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((plist_id >= 0), "DCPL creation succeeded"); - VRFY((H5Pset_chunk(plist_id, ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + VRFY((H5Pset_chunk(plist_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); /* Add test filter to the pipeline */ VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); - dset_id = H5Dcreate2(file_id, ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + dset_id = H5Dcreate2(file_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); @@ -761,9 +853,11 @@ test_filtered_dataset_all_no_selection(void) plist_id = H5Pcreate(H5P_DATASET_XFER); VRFY((plist_id >= 0), "DXPL creation succeeded"); - VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded"); + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), + "Dataset write succeeded"); if (data) free(data); @@ -773,12 +867,14 @@ test_filtered_dataset_all_no_selection(void) read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); VRFY((NULL != read_buf), "calloc succeeded"); - dset_id = H5Dopen2(file_id, "/" ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + dset_id = H5Dopen2(file_id, "/" WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded"); + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), + "Dataset read succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); if (correct_buf) free(correct_buf); if (read_buf) free(read_buf); @@ -800,15 +896,15 @@ test_filtered_dataset_all_no_selection(void) * 02/02/2017 */ static void -test_filtered_dataset_point_selection(void) +test_write_filtered_dataset_point_selection(void) { C_DATATYPE *data = NULL; C_DATATYPE *correct_buf = NULL; C_DATATYPE *read_buf = NULL; hsize_t *coords = NULL; - hsize_t dataset_dims[POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t chunk_dims[POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t sel_dims[POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t dataset_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; size_t i, j, data_size, correct_buf_size; size_t num_points; hid_t file_id = -1, dset_id = -1, plist_id = -1; @@ -820,9 +916,11 @@ test_filtered_dataset_point_selection(void) plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -830,29 +928,30 @@ test_filtered_dataset_point_selection(void) VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); /* Create the dataspace for the dataset */ - dataset_dims[0] = (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_NROWS; - dataset_dims[1] = (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_NCOLS; - chunk_dims[0] = (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS; - chunk_dims[1] = (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS; - sel_dims[0] = (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t) mpi_size; - sel_dims[1] = (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_NCOLS; - - filespace = H5Screate_simple(POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + dataset_dims[0] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS; + chunk_dims[0] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + sel_dims[0] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t) mpi_size; + sel_dims[1] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS; + + filespace = H5Screate_simple(WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims,NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); + memspace = H5Screate_simple(WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); VRFY((memspace >= 0), "Memory dataspace creation succeeded"); /* Create chunked dataset */ plist_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((plist_id >= 0), "DCPL creation succeeded"); - VRFY((H5Pset_chunk(plist_id, POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + VRFY((H5Pset_chunk(plist_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); /* Add test filter to the pipeline */ VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); - dset_id = H5Dcreate2(file_id, POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + dset_id = H5Dcreate2(file_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); @@ -863,16 +962,17 @@ test_filtered_dataset_point_selection(void) filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - num_points = (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_NROWS * (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) mpi_size; + num_points = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS * (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) mpi_size; coords = (hsize_t *) calloc(1, 2 * num_points * sizeof(*coords)); VRFY((NULL != coords), "Coords calloc succeeded"); for (i = 0; i < num_points; i++) - for (j = 0; j < POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++) - coords[(i * POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS) + j] = (j > 0) ? (i % (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_NCOLS) - : ((hsize_t) mpi_rank + ((hsize_t) mpi_size * (i / (hsize_t) POINT_SELECTION_FILTERED_CHUNKS_NCOLS))); + for (j = 0; j < WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++) + coords[(i * WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS) + j] = + (j > 0) ? (i % (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS) + : ((hsize_t) mpi_rank + ((hsize_t) mpi_size * (i / (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS))); - VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t) num_points, (const hsize_t *) coords) >= 0), + VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t ) num_points, (const hsize_t * ) coords) >= 0), "Point selection succeeded"); /* Fill data buffer */ @@ -889,17 +989,21 @@ test_filtered_dataset_point_selection(void) data[i] = (C_DATATYPE) GEN_DATA(i); for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (C_DATATYPE) ((dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1]))) - + (i % dataset_dims[1]) - + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + correct_buf[i] = (C_DATATYPE) ( + (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1]))) + + (i % dataset_dims[1]) + + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]) + ); /* Create property list for collective dataset write */ plist_id = H5Pcreate(H5P_DATASET_XFER); VRFY((plist_id >= 0), "DXPL creation succeeded"); - VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded"); + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), + "Dataset write succeeded"); if (data) free(data); @@ -909,12 +1013,14 @@ test_filtered_dataset_point_selection(void) read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); VRFY((NULL != read_buf), "calloc succeeded"); - dset_id = H5Dopen2(file_id, "/" POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + dset_id = H5Dopen2(file_id, "/" WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded"); + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), + "Dataset read succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); if (coords) free(coords); if (correct_buf) free(correct_buf); @@ -941,7 +1047,7 @@ test_filtered_dataset_point_selection(void) * 02/02/2017 */ static void -test_filtered_dataset_interleaved_write(void) +test_write_filtered_dataset_interleaved_write(void) { C_DATATYPE *data = NULL; C_DATATYPE *read_buf = NULL; @@ -949,10 +1055,10 @@ test_filtered_dataset_interleaved_write(void) hsize_t dataset_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; hsize_t chunk_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; hsize_t sel_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; - hsize_t count[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; + hsize_t start[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; hsize_t stride[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; + hsize_t count[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; hsize_t block[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; - hsize_t offset[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; size_t i, data_size, correct_buf_size; hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; @@ -963,9 +1069,11 @@ test_filtered_dataset_interleaved_write(void) plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -990,7 +1098,8 @@ test_filtered_dataset_interleaved_write(void) plist_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((plist_id >= 0), "DCPL creation succeeded"); - VRFY((H5Pset_chunk(plist_id, INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + VRFY((H5Pset_chunk(plist_id, INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); /* Add test filter to the pipeline */ VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); @@ -1011,18 +1120,21 @@ test_filtered_dataset_interleaved_write(void) stride[1] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS; block[0] = 1; block[1] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS; - offset[0] = (hsize_t) mpi_rank; - offset[1] = 0; + start[0] = (hsize_t) mpi_rank; + start[1] = 0; - if (VERBOSE_MED) - printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]); + if (VERBOSE_MED) { + printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } /* Select hyperslab in the file */ filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded"); + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); /* Fill data buffer */ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); @@ -1038,22 +1150,27 @@ test_filtered_dataset_interleaved_write(void) data[i] = (C_DATATYPE) GEN_DATA(i); for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - /* Add Column Index */ - correct_buf[i] = (C_DATATYPE) ( (i % (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS) + /* Add Column Index */ + correct_buf[i] = + (C_DATATYPE) ( + (i % (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS) - /* Add the Row Index */ - + ((i % (hsize_t) (mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)) / (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS) + /* Add the Row Index */ + + ((i % (hsize_t) (mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)) / (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS) - /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */ - + ((hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS * (i / (hsize_t) (mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)))); + /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */ + + ((hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS * (i / (hsize_t) (mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS))) + ); /* Create property list for collective dataset write */ plist_id = H5Pcreate(H5P_DATASET_XFER); VRFY((plist_id >= 0), "DXPL creation succeeded"); - VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded"); + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), + "Dataset write succeeded"); if (data) free(data); @@ -1066,9 +1183,11 @@ test_filtered_dataset_interleaved_write(void) dset_id = H5Dopen2(file_id, "/" INTERLEAVED_WRITE_FILTERED_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded"); + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), + "Dataset read succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); if (correct_buf) free(correct_buf); if (read_buf) free(read_buf); @@ -1091,18 +1210,18 @@ test_filtered_dataset_interleaved_write(void) * 02/06/2017 */ static void -test_3d_filtered_dataset_no_overlap_separate_pages(void) +test_write_3d_filtered_dataset_no_overlap_separate_pages(void) { C_DATATYPE *data = NULL; C_DATATYPE *read_buf = NULL; C_DATATYPE *correct_buf = NULL; - hsize_t dataset_dims[UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; - hsize_t chunk_dims[UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; - hsize_t sel_dims[UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; - hsize_t count[UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; - hsize_t stride[UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; - hsize_t block[UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; - hsize_t offset[UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t offset[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; size_t i, data_size, correct_buf_size; hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; @@ -1113,9 +1232,11 @@ test_3d_filtered_dataset_no_overlap_separate_pages(void) plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -1123,32 +1244,33 @@ test_3d_filtered_dataset_no_overlap_separate_pages(void) VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); /* Create the dataspace for the dataset */ - dataset_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS; - dataset_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS; - dataset_dims[2] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH; - chunk_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; - chunk_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; + dataset_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS; + dataset_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS; + dataset_dims[2] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH; + chunk_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; + chunk_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; chunk_dims[2] = 1; - sel_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS; - sel_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS; + sel_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS; + sel_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS; sel_dims[2] = 1; - filespace = H5Screate_simple(UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL); + filespace = H5Screate_simple( WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, sel_dims, NULL); + memspace = H5Screate_simple( WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, sel_dims, NULL); VRFY((memspace >= 0), "Memory dataspace creation succeeded"); /* Create chunked dataset */ plist_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((plist_id >= 0), "DCPL creation succeeded"); - VRFY((H5Pset_chunk(plist_id, UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); /* Add test filter to the pipeline */ VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); - dset_id = H5Dcreate2(file_id, UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); @@ -1158,28 +1280,31 @@ test_3d_filtered_dataset_no_overlap_separate_pages(void) /* Each process defines the dataset selection in memory and writes * it to the hyperslab in the file */ - count[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; - count[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; + count[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; + count[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; count[2] = 1; - stride[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; - stride[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; + stride[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; + stride[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; stride[2] = 1; - block[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; - block[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; + block[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; + block[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; block[2] = 1; offset[0] = 0; offset[1] = 0; offset[2] = (hsize_t) mpi_rank; - if (VERBOSE_MED) + if (VERBOSE_MED) { printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n", - mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]); + mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]); + fflush(stdout); + } /* Select hyperslab in the file */ filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded"); + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), + "Hyperslab selection succeeded"); /* Fill data buffer */ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data); @@ -1201,9 +1326,11 @@ test_3d_filtered_dataset_no_overlap_separate_pages(void) plist_id = H5Pcreate(H5P_DATASET_XFER); VRFY((plist_id >= 0), "DXPL creation succeeded"); - VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded"); + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), + "Dataset write succeeded"); if (data) free(data); @@ -1213,12 +1340,14 @@ test_3d_filtered_dataset_no_overlap_separate_pages(void) read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); VRFY((NULL != read_buf), "calloc succeeded"); - dset_id = H5Dopen2(file_id, "/" UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT); + dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded"); + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), + "Dataset read succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); if (correct_buf) free(correct_buf); if (read_buf) free(read_buf); @@ -1242,18 +1371,18 @@ test_3d_filtered_dataset_no_overlap_separate_pages(void) * 02/06/2017 */ static void -test_3d_filtered_dataset_no_overlap_same_pages(void) +test_write_3d_filtered_dataset_no_overlap_same_pages(void) { C_DATATYPE *data = NULL; C_DATATYPE *read_buf = NULL; C_DATATYPE *correct_buf = NULL; - hsize_t dataset_dims[UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; - hsize_t chunk_dims[UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; - hsize_t sel_dims[UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; - hsize_t count[UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; - hsize_t stride[UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; - hsize_t block[UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; - hsize_t offset[UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t offset[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; size_t i, data_size, correct_buf_size; hid_t file_id, dset_id, plist_id; hid_t filespace, memspace; @@ -1264,9 +1393,11 @@ test_3d_filtered_dataset_no_overlap_same_pages(void) plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -1274,32 +1405,33 @@ test_3d_filtered_dataset_no_overlap_same_pages(void) VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); /* Create the dataspace for the dataset */ - dataset_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS; - dataset_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS; - dataset_dims[2] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH; - chunk_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; - chunk_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; + dataset_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS; + dataset_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS; + dataset_dims[2] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH; + chunk_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; + chunk_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; chunk_dims[2] = 1; - sel_dims[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; - sel_dims[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS; - sel_dims[2] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH; + sel_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; + sel_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS; + sel_dims[2] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH; - filespace = H5Screate_simple(UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL); + filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, sel_dims, NULL); + memspace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, sel_dims, NULL); VRFY((memspace >= 0), "Memory dataspace creation succeeded"); /* Create chunked dataset */ plist_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((plist_id >= 0), "DCPL creation succeeded"); - VRFY((H5Pset_chunk(plist_id, UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); /* Add test filter to the pipeline */ VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); - dset_id = H5Dcreate2(file_id, UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); @@ -1310,27 +1442,30 @@ test_3d_filtered_dataset_no_overlap_same_pages(void) * it to the hyperslab in the file */ count[0] = 1; - count[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; + count[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; count[2] = (hsize_t) mpi_size; - stride[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; - stride[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; + stride[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; + stride[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; stride[2] = 1; - block[0] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; - block[1] = (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; + block[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; + block[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; block[2] = 1; - offset[0] = ((hsize_t) mpi_rank * (hsize_t) UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS * count[0]); + offset[0] = ((hsize_t) mpi_rank * (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS * count[0]); offset[1] = 0; offset[2] = 0; - if (VERBOSE_MED) + if (VERBOSE_MED) { printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n", - mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]); + mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]); + fflush(stdout); + } /* Select hyperslab in the file */ filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded"); + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), + "Hyperslab selection succeeded"); /* Fill data buffer */ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data); @@ -1346,15 +1481,20 @@ test_3d_filtered_dataset_no_overlap_same_pages(void) data[i] = (C_DATATYPE) GEN_DATA(i); for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (C_DATATYPE) ((i % (dataset_dims[0] * dataset_dims[1])) + (i / (dataset_dims[0] * dataset_dims[1]))); + correct_buf[i] = (C_DATATYPE) ( + (i % (dataset_dims[0] * dataset_dims[1])) + + (i / (dataset_dims[0] * dataset_dims[1])) + ); /* Create property list for collective dataset write */ plist_id = H5Pcreate(H5P_DATASET_XFER); VRFY((plist_id >= 0), "DXPL creation succeeded"); - VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded"); + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), + "Dataset write succeeded"); if (data) free(data); @@ -1364,12 +1504,14 @@ test_3d_filtered_dataset_no_overlap_same_pages(void) read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); VRFY((NULL != read_buf), "calloc succeeded"); - dset_id = H5Dopen2(file_id, "/" UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT); + dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded"); + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), + "Dataset read succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); if (correct_buf) free(correct_buf); if (read_buf) free(read_buf); @@ -1393,18 +1535,18 @@ test_3d_filtered_dataset_no_overlap_same_pages(void) * 02/06/2017 */ static void -test_3d_filtered_dataset_overlap(void) +test_write_3d_filtered_dataset_overlap(void) { C_DATATYPE *data = NULL; C_DATATYPE *read_buf = NULL; C_DATATYPE *correct_buf = NULL; - hsize_t dataset_dims[SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; - hsize_t chunk_dims[SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; - hsize_t sel_dims[SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; - hsize_t count[SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; - hsize_t stride[SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; - hsize_t block[SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; - hsize_t offset[SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t dataset_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t sel_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t stride[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t block[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t offset[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; size_t i, data_size, correct_buf_size; hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; @@ -1415,9 +1557,11 @@ test_3d_filtered_dataset_overlap(void) plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -1425,32 +1569,33 @@ test_3d_filtered_dataset_overlap(void) VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); /* Create the dataspace for the dataset */ - dataset_dims[0] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_NROWS; - dataset_dims[1] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_NCOLS; - dataset_dims[2] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_DEPTH; - chunk_dims[0] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_CH_NROWS; - chunk_dims[1] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; + dataset_dims[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS; + dataset_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS; + dataset_dims[2] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH; + chunk_dims[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS; + chunk_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; chunk_dims[2] = 1; - sel_dims[0] = (hsize_t) (SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size); - sel_dims[1] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_NCOLS; - sel_dims[2] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_DEPTH; + sel_dims[0] = (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size); + sel_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS; + sel_dims[2] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH; - filespace = H5Screate_simple(SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL); + filespace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, sel_dims, NULL); + memspace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, sel_dims, NULL); VRFY((memspace >= 0), "Memory dataspace creation succeeded"); /* Create chunked dataset */ plist_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((plist_id >= 0), "DCPL creation succeeded"); - VRFY((H5Pset_chunk(plist_id, SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); /* Add test filter to the pipeline */ VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); - dset_id = H5Dcreate2(file_id, SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + dset_id = H5Dcreate2(file_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); @@ -1460,28 +1605,31 @@ test_3d_filtered_dataset_overlap(void) /* Each process defines the dataset selection in memory and writes * it to the hyperslab in the file */ - count[0] = (hsize_t) (SHARED_FILTERED_CHUNKS_3D_NROWS / SHARED_FILTERED_CHUNKS_3D_CH_NROWS); - count[1] = (hsize_t) (SHARED_FILTERED_CHUNKS_3D_NCOLS / SHARED_FILTERED_CHUNKS_3D_CH_NCOLS); - count[2] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_DEPTH; - stride[0] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_CH_NROWS; - stride[1] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; + count[0] = (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS / WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS); + count[1] = (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS / WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS); + count[2] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH; + stride[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS; + stride[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; stride[2] = 1; block[0] = 1; - block[1] = (hsize_t) SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; + block[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; block[2] = 1; offset[0] = (hsize_t) mpi_rank; offset[1] = 0; offset[2] = 0; - if (VERBOSE_MED) + if (VERBOSE_MED) { printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n", - mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]); + mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]); + fflush(stdout); + } /* Select hyperslab in the file */ filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded"); + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, + count, block) >= 0), "Hyperslab selection succeeded"); /* Fill data buffer */ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data); @@ -1497,22 +1645,29 @@ test_3d_filtered_dataset_overlap(void) data[i] = (C_DATATYPE) GEN_DATA(i); for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - /* Add the Column Index */ - correct_buf[i] = (C_DATATYPE) ( (i % (hsize_t) (SHARED_FILTERED_CHUNKS_3D_DEPTH * SHARED_FILTERED_CHUNKS_3D_NCOLS)) + /* Add the Column Index */ + correct_buf[i] = + (C_DATATYPE) ( + (i % (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) - /* Add the Row Index */ - + ((i % (hsize_t) (mpi_size * SHARED_FILTERED_CHUNKS_3D_DEPTH * SHARED_FILTERED_CHUNKS_3D_NCOLS)) / (hsize_t) (SHARED_FILTERED_CHUNKS_3D_DEPTH * SHARED_FILTERED_CHUNKS_3D_NCOLS)) + /* Add the Row Index */ + + ((i % (hsize_t) (mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) + / (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) - /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */ - + ((hsize_t) (SHARED_FILTERED_CHUNKS_3D_DEPTH * SHARED_FILTERED_CHUNKS_3D_NCOLS) * (i / (hsize_t) (mpi_size * SHARED_FILTERED_CHUNKS_3D_DEPTH * SHARED_FILTERED_CHUNKS_3D_NCOLS)))); + /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */ + + ((hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS) + * (i / (hsize_t) (mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))) + ); /* Create property list for collective dataset write */ plist_id = H5Pcreate(H5P_DATASET_XFER); VRFY((plist_id >= 0), "DXPL creation succeeded"); - VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded"); + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), + "Dataset write succeeded"); if (data) free(data); @@ -1522,12 +1677,14 @@ test_3d_filtered_dataset_overlap(void) read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); VRFY((NULL != read_buf), "calloc succeeded"); - dset_id = H5Dopen2(file_id, "/" SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT); + dset_id = H5Dopen2(file_id, "/" WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded"); + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), + "Dataset read succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); if (correct_buf) free(correct_buf); if (read_buf) free(read_buf); @@ -1551,16 +1708,16 @@ test_3d_filtered_dataset_overlap(void) */ /* JTH: This test currently cannot be data-verified due to the floating-point data involved */ static void -test_cmpd_filtered_dataset_no_conversion_unshared(void) +test_write_cmpd_filtered_dataset_no_conversion_unshared(void) { cmpd_filtered_t *data = NULL; - hsize_t dataset_dims[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t chunk_dims[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t sel_dims[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t count[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t stride[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t block[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t offset[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t offset[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; size_t i; hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1; hid_t filespace = -1, memspace = -1; @@ -1571,9 +1728,11 @@ test_cmpd_filtered_dataset_no_conversion_unshared(void) plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -1581,24 +1740,25 @@ test_cmpd_filtered_dataset_no_conversion_unshared(void) VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); /* Create the dataspace for the dataset */ - dataset_dims[0] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS; - dataset_dims[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS; - chunk_dims[0] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; - chunk_dims[1] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; - sel_dims[0] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; - sel_dims[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; - - filespace = H5Screate_simple(COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL); + dataset_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS; + dataset_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS; + chunk_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; + chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; + sel_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; + sel_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; + + filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, sel_dims, NULL); + memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, sel_dims, NULL); VRFY((memspace >= 0), "Memory dataspace creation succeeded"); /* Create chunked dataset */ plist_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((plist_id >= 0), "DCPL creation succeeded"); - VRFY((H5Pset_chunk(plist_id, COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); /* Add test filter to the pipeline */ VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); @@ -1607,12 +1767,16 @@ test_cmpd_filtered_dataset_no_conversion_unshared(void) memtype = H5Tcreate(H5T_COMPOUND, sizeof(cmpd_filtered_t)); VRFY((memtype >= 0), "Datatype creation succeeded"); - VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(cmpd_filtered_t, field1), H5T_NATIVE_SHORT) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "IntData", HOFFSET(cmpd_filtered_t, field2), H5T_NATIVE_INT) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "LongData", HOFFSET(cmpd_filtered_t, field3), H5T_NATIVE_LONG) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "DoubleData", HOFFSET(cmpd_filtered_t, field4), H5T_NATIVE_DOUBLE) >= 0), "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(cmpd_filtered_t, field1), H5T_NATIVE_SHORT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(cmpd_filtered_t, field2), H5T_NATIVE_INT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(cmpd_filtered_t, field3), H5T_NATIVE_LONG) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "DoubleData", HOFFSET(cmpd_filtered_t, field4), H5T_NATIVE_DOUBLE) >= 0), + "Datatype insertion succeeded"); - dset_id = H5Dcreate2(file_id, COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype, filespace, + dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); @@ -1623,30 +1787,33 @@ test_cmpd_filtered_dataset_no_conversion_unshared(void) * it to the hyperslab in the file */ count[0] = 1; - count[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; - stride[0] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; - stride[1] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; - block[0] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; - block[1] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; + count[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; + stride[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; + stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; + block[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; + block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; offset[0] = 0; - offset[1] = ((hsize_t) mpi_rank * COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS); + offset[1] = ((hsize_t) mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS); - if (VERBOSE_MED) + if (VERBOSE_MED) { printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n", mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]); + fflush(stdout); + } /* Select hyperslab in the file */ filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded"); + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), + "Hyperslab selection succeeded"); - data = (COMPOUND_C_DATATYPE *) calloc(1, (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data)); + data = (COMPOUND_C_DATATYPE *) calloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data)); VRFY((NULL != data), "calloc succeeded"); /* Fill data buffer */ - memset(data, 0, sizeof(cmpd_filtered_t) * (size_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC); - for (i = 0; i < (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) { + memset(data, 0, sizeof(cmpd_filtered_t) * (size_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC); + for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) { data[i].field1 = (short) GEN_DATA(i); data[i].field2 = (int) GEN_DATA(i); data[i].field3 = (long) GEN_DATA(i); @@ -1657,9 +1824,11 @@ test_cmpd_filtered_dataset_no_conversion_unshared(void) plist_id = H5Pcreate(H5P_DATASET_XFER); VRFY((plist_id >= 0), "DXPL creation succeeded"); - VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded"); + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); - VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); + VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0), + "Dataset write succeeded"); if (data) free(data); @@ -1683,16 +1852,16 @@ test_cmpd_filtered_dataset_no_conversion_unshared(void) */ /* JTH: This test currently cannot be data-verified due to the floating-point data involved */ static void -test_cmpd_filtered_dataset_no_conversion_shared(void) +test_write_cmpd_filtered_dataset_no_conversion_shared(void) { cmpd_filtered_t *data = NULL; - hsize_t dataset_dims[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t chunk_dims[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t sel_dims[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t count[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t stride[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t block[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t offset[COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t offset[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; size_t i; hid_t file_id, dset_id, plist_id, memtype; hid_t filespace, memspace; @@ -1701,11 +1870,13 @@ test_cmpd_filtered_dataset_no_conversion_shared(void) /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); - VRFY((plist_id>= 0), "FAPL creation succeeded"); + VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -1713,24 +1884,25 @@ test_cmpd_filtered_dataset_no_conversion_shared(void) VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); /* Create the dataspace for the dataset */ - dataset_dims[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS; - dataset_dims[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS; - chunk_dims[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS; - chunk_dims[1] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; - sel_dims[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size; - sel_dims[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; - - filespace = H5Screate_simple(COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL); + dataset_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS; + dataset_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS; + chunk_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS; + chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; + sel_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size; + sel_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; + + filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL); + memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL); VRFY((memspace >= 0), "Memory dataspace creation succeeded"); /* Create chunked dataset */ plist_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((plist_id >= 0), "DCPL creation succeeded"); - VRFY((H5Pset_chunk(plist_id, COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); /* Add test filter to the pipeline */ VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); @@ -1739,12 +1911,16 @@ test_cmpd_filtered_dataset_no_conversion_shared(void) memtype = H5Tcreate(H5T_COMPOUND, sizeof(cmpd_filtered_t)); VRFY((memtype >= 0), "Datatype creation succeeded"); - VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(cmpd_filtered_t, field1), H5T_NATIVE_SHORT) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "IntData", HOFFSET(cmpd_filtered_t, field2), H5T_NATIVE_INT) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "LongData", HOFFSET(cmpd_filtered_t, field3), H5T_NATIVE_LONG) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "DoubleData", HOFFSET(cmpd_filtered_t, field4), H5T_NATIVE_DOUBLE) >= 0), "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(cmpd_filtered_t, field1), H5T_NATIVE_SHORT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(cmpd_filtered_t, field2), H5T_NATIVE_INT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(cmpd_filtered_t, field3), H5T_NATIVE_LONG) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "DoubleData", HOFFSET(cmpd_filtered_t, field4), H5T_NATIVE_DOUBLE) >= 0), + "Datatype insertion succeeded"); - dset_id = H5Dcreate2(file_id, COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype, filespace, + dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); @@ -1755,30 +1931,33 @@ test_cmpd_filtered_dataset_no_conversion_shared(void) * it to the hyperslab in the file */ count[0] = 1; - count[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; - stride[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS; - stride[1] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; - block[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size; - block[1] = COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; + count[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; + stride[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS; + stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; + block[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size; + block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; offset[0] = (hsize_t) mpi_rank; offset[1] = 0; - if (VERBOSE_MED) + if (VERBOSE_MED) { printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]); + mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]); + fflush(stdout); + } /* Select hyperslab in the file */ filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded"); + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), + "Hyperslab selection succeeded"); - data = (COMPOUND_C_DATATYPE *) calloc(1, (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data)); + data = (COMPOUND_C_DATATYPE *) calloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data)); VRFY((NULL != data), "calloc succeeded"); /* Fill data buffer */ - memset(data, 0, sizeof(cmpd_filtered_t) * (size_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC); - for (i = 0; i < (hsize_t) COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) { + memset(data, 0, sizeof(cmpd_filtered_t) * (size_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC); + for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) { data[i].field1 = (short) GEN_DATA(i); data[i].field2 = (int) GEN_DATA(i); data[i].field3 = (long) GEN_DATA(i); @@ -1789,9 +1968,11 @@ test_cmpd_filtered_dataset_no_conversion_shared(void) plist_id = H5Pcreate(H5P_DATASET_XFER); VRFY((plist_id >= 0), "DXPL creation succeeded"); - VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded"); + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); - VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); + VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0), + "Dataset write succeeded"); if (data) free(data); @@ -1820,16 +2001,16 @@ test_cmpd_filtered_dataset_no_conversion_shared(void) */ /* JTH: This test currently cannot be data-verified due to the floating-point data involved */ static void -test_cmpd_filtered_dataset_type_conversion_unshared(void) +test_write_cmpd_filtered_dataset_type_conversion_unshared(void) { cmpd_filtered_t *data = NULL; - hsize_t dataset_dims[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t chunk_dims[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t sel_dims[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t count[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t stride[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t block[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t offset[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t offset[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; size_t i; hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1; hid_t filespace = -1, memspace = -1; @@ -1840,9 +2021,11 @@ test_cmpd_filtered_dataset_type_conversion_unshared(void) plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -1850,24 +2033,25 @@ test_cmpd_filtered_dataset_type_conversion_unshared(void) VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); /* Create the dataspace for the dataset */ - dataset_dims[0] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS; - dataset_dims[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS; - chunk_dims[0] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; - chunk_dims[1] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; - sel_dims[0] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; - sel_dims[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; - - filespace = H5Screate_simple(COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL); + dataset_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS; + dataset_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS; + chunk_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; + chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; + sel_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; + sel_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; + + filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, sel_dims, NULL); + memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, sel_dims, NULL); VRFY((memspace >= 0), "Memory dataspace creation succeeded"); /* Create chunked dataset */ plist_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((plist_id >= 0), "DCPL creation succeeded"); - VRFY((H5Pset_chunk(plist_id, COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); /* Add test filter to the pipeline */ VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); @@ -1876,22 +2060,30 @@ test_cmpd_filtered_dataset_type_conversion_unshared(void) memtype = H5Tcreate(H5T_COMPOUND, sizeof(cmpd_filtered_t)); VRFY((memtype >= 0), "Datatype creation succeeded"); - VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(cmpd_filtered_t, field1), H5T_NATIVE_SHORT) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "IntData", HOFFSET(cmpd_filtered_t, field2), H5T_NATIVE_INT) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "LongData", HOFFSET(cmpd_filtered_t, field3), H5T_NATIVE_LONG) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "DoubleData", HOFFSET(cmpd_filtered_t, field4), H5T_NATIVE_DOUBLE) >= 0), "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(cmpd_filtered_t, field1), H5T_NATIVE_SHORT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(cmpd_filtered_t, field2), H5T_NATIVE_INT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(cmpd_filtered_t, field3), H5T_NATIVE_LONG) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "DoubleData", HOFFSET(cmpd_filtered_t, field4), H5T_NATIVE_DOUBLE) >= 0), + "Datatype insertion succeeded"); /* Create the compound type for file. */ filetype = H5Tcreate(H5T_COMPOUND, 32); VRFY((filetype >= 0), "Datatype creation succeeded"); - VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(filetype, "DoubleData", 24, H5T_IEEE_F64BE) >= 0), "Datatype insertion succeeded"); - - dset_id = H5Dcreate2(file_id, COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, filetype, filespace, - H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "DoubleData", 24, H5T_IEEE_F64BE) >= 0), + "Datatype insertion succeeded"); + + dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, filetype, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); @@ -1901,30 +2093,33 @@ test_cmpd_filtered_dataset_type_conversion_unshared(void) * it to the hyperslab in the file */ count[0] = 1; - count[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; - stride[0] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; - stride[1] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; - block[0] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; - block[1] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; + count[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; + stride[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; + stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; + block[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; + block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; offset[0] = 0; - offset[1] = ((hsize_t) mpi_rank * COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS); + offset[1] = ((hsize_t) mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS); - if (VERBOSE_MED) + if (VERBOSE_MED) { printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]); + mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]); + fflush(stdout); + } /* Select hyperslab in the file */ filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded"); + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), + "Hyperslab selection succeeded"); - data = (COMPOUND_C_DATATYPE *) calloc(1, (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data)); + data = (COMPOUND_C_DATATYPE *) calloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data)); VRFY((NULL != data), "calloc succeeded"); /* Fill data buffer */ - memset(data, 0, sizeof(cmpd_filtered_t) * (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC); - for (i = 0; i < (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) { + memset(data, 0, sizeof(cmpd_filtered_t) * (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC); + for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) { data[i].field1 = (short) GEN_DATA(i); data[i].field2 = (int) GEN_DATA(i); data[i].field3 = (long) GEN_DATA(i); @@ -1935,11 +2130,13 @@ test_cmpd_filtered_dataset_type_conversion_unshared(void) plist_id = H5Pcreate(H5P_DATASET_XFER); VRFY((plist_id >= 0), "DXPL creation succeeded"); - VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded"); + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); /* Ensure that this test currently fails since type conversions break collective mode */ H5E_BEGIN_TRY { - VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) < 0), "Dataset write succeeded"); + VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) < 0), + "Dataset write succeeded"); } H5E_END_TRY; if (data) free(data); @@ -1970,16 +2167,16 @@ test_cmpd_filtered_dataset_type_conversion_unshared(void) */ /* JTH: This test currently cannot be data-verified due to the floating-point data involved */ static void -test_cmpd_filtered_dataset_type_conversion_shared(void) +test_write_cmpd_filtered_dataset_type_conversion_shared(void) { cmpd_filtered_t *data = NULL; - hsize_t dataset_dims[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t chunk_dims[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t sel_dims[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t count[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t stride[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t block[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t offset[COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t offset[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; size_t i; hid_t file_id, dset_id, plist_id, filetype, memtype; hid_t filespace, memspace; @@ -1990,9 +2187,11 @@ test_cmpd_filtered_dataset_type_conversion_shared(void) plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -2000,24 +2199,25 @@ test_cmpd_filtered_dataset_type_conversion_shared(void) VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); /* Create the dataspace for the dataset */ - dataset_dims[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS; - dataset_dims[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS; - chunk_dims[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS; - chunk_dims[1] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; - sel_dims[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size; - sel_dims[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; - - filespace = H5Screate_simple(COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL); + dataset_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS; + dataset_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS; + chunk_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS; + chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; + sel_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size; + sel_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; + + filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL); + memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL); VRFY((memspace >= 0), "Memory dataspace creation succeeded"); /* Create chunked dataset */ plist_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((plist_id >= 0), "DCPL creation succeeded"); - VRFY((H5Pset_chunk(plist_id, COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); /* Add test filter to the pipeline */ VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); @@ -2026,21 +2226,29 @@ test_cmpd_filtered_dataset_type_conversion_shared(void) memtype = H5Tcreate(H5T_COMPOUND, sizeof(cmpd_filtered_t)); VRFY((memtype >= 0), "Datatype creation succeeded"); - VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(cmpd_filtered_t, field1), H5T_NATIVE_SHORT) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "IntData", HOFFSET(cmpd_filtered_t, field2), H5T_NATIVE_INT) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "LongData", HOFFSET(cmpd_filtered_t, field3), H5T_NATIVE_LONG) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "DoubleData", HOFFSET(cmpd_filtered_t, field4), H5T_NATIVE_DOUBLE) >= 0), "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(cmpd_filtered_t, field1), H5T_NATIVE_SHORT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(cmpd_filtered_t, field2), H5T_NATIVE_INT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(cmpd_filtered_t, field3), H5T_NATIVE_LONG) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "DoubleData", HOFFSET(cmpd_filtered_t, field4), H5T_NATIVE_DOUBLE) >= 0), + "Datatype insertion succeeded"); /* Create the compound type for file. */ filetype = H5Tcreate(H5T_COMPOUND, 32); VRFY((filetype >= 0), "Datatype creation succeeded"); - VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(filetype, "DoubleData", 24, H5T_IEEE_F64BE) >= 0), "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "DoubleData", 24, H5T_IEEE_F64BE) >= 0), + "Datatype insertion succeeded"); - dset_id = H5Dcreate2(file_id, COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, filetype, filespace, + dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); @@ -2051,30 +2259,33 @@ test_cmpd_filtered_dataset_type_conversion_shared(void) * it to the hyperslab in the file */ count[0] = 1; - count[1] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; - stride[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS; - stride[1] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; - block[0] = (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size; - block[1] = COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; + count[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; + stride[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS; + stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; + block[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size; + block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; offset[0] = (hsize_t) mpi_rank; offset[1] = 0; - if (VERBOSE_MED) + if (VERBOSE_MED) { printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]); + mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]); + fflush(stdout); + } /* Select hyperslab in the file */ filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded"); + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), + "Hyperslab selection succeeded"); - data = (COMPOUND_C_DATATYPE *) calloc(1, (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data)); + data = (COMPOUND_C_DATATYPE *) calloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data)); VRFY((NULL != data), "calloc succeeded"); /* Fill data buffer */ - memset(data, 0, sizeof(cmpd_filtered_t) * (size_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC); - for (i = 0; i < (hsize_t) COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) { + memset(data, 0, sizeof(cmpd_filtered_t) * (size_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC); + for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) { data[i].field1 = (short) GEN_DATA(i); data[i].field2 = (int) GEN_DATA(i); data[i].field3 = (long) GEN_DATA(i); @@ -2085,11 +2296,13 @@ test_cmpd_filtered_dataset_type_conversion_shared(void) plist_id = H5Pcreate(H5P_DATASET_XFER); VRFY((plist_id >= 0), "DXPL creation succeeded"); - VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded"); + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); /* Ensure that this test currently fails since type conversions break collective mode */ H5E_BEGIN_TRY { - VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) < 0), "Dataset write succeeded"); + VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) < 0), + "Dataset write succeeded"); } H5E_END_TRY; if (data) free(data); @@ -2106,40 +2319,58 @@ test_cmpd_filtered_dataset_type_conversion_shared(void) } /* - * Tests write of filtered data to a dataset - * by a single process. After the write has - * succeeded, the dataset is closed and then - * re-opened in parallel and read by all - * processes to ensure data correctness. + * Tests parallel read of filtered data in the special + * case where a dataset is composed of a single chunk. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads a part of + * the singular chunk and contributes its piece to a + * global buffer that is checked for consistency. * * Programmer: Jordan Henderson - * 08/03/2017 + * 05/14/2018 */ static void -test_write_serial_read_parallel(void) +test_read_one_chunk_filtered_dataset(void) { - C_DATATYPE *data = NULL; C_DATATYPE *read_buf = NULL; C_DATATYPE *correct_buf = NULL; - hsize_t dataset_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS]; - hsize_t chunk_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS]; - size_t i, data_size, correct_buf_size; + C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t chunk_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t sel_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t start[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t stride[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t count[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t block[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; hid_t file_id = -1, dset_id = -1, plist_id = -1; - hid_t filespace = -1; + hid_t filespace = -1, memspace = -1; + int *recvcounts = NULL; + int *displs = NULL; - if (MAINPROCESS) puts("Testing write file serially; read file in parallel"); + dataset_dims[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NROWS; + dataset_dims[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NCOLS; - dataset_dims[0] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_NROWS; - dataset_dims[1] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_NCOLS; - dataset_dims[2] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_DEPTH; + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "calloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = ((C_DATATYPE) i % (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) + + ((C_DATATYPE) i / (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)); - /* Write the file on the MAINPROCESS rank */ if (MAINPROCESS) { - /* Set up file access property list */ + puts("Testing read from one-chunk filtered dataset"); + plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -2147,85 +2378,135 @@ test_write_serial_read_parallel(void) VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); /* Create the dataspace for the dataset */ - chunk_dims[0] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_CH_NROWS; - chunk_dims[1] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_CH_NCOLS; - chunk_dims[2] = 1; - - filespace = H5Screate_simple(WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, dataset_dims, NULL); + filespace = H5Screate_simple(READ_ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); /* Create chunked dataset */ + chunk_dims[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS; + chunk_dims[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS; + plist_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((plist_id >= 0), "DCPL creation succeeded"); - VRFY((H5Pset_chunk(plist_id, WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + VRFY((H5Pset_chunk(plist_id, READ_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); /* Add test filter to the pipeline */ VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); - dset_id = H5Dcreate2(file_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + dset_id = H5Dcreate2(file_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*data); - - data = (C_DATATYPE *) calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); - - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE) GEN_DATA(i); - - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) >= 0), "Dataset write succeeded"); - - if (data) free(data); + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } - correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); - - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); - - read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); - - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (long) i; - - /* All ranks open the file and verify their "portion" of the dataset is correct */ + /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); - file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); - dset_id = H5Dopen2(file_id, "/" WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, H5P_DEFAULT); + dset_id = H5Dopen2(file_id, "/" READ_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); + sel_dims[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t) mpi_size; + sel_dims[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NCOLS; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = 1; + count[1] = 1; + stride[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS; + stride[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS; + block[0] = sel_dims[0]; + block[1] = sel_dims[1]; + start[0] = ((hsize_t) mpi_rank * sel_dims[0]); + start[1] = 0; + + if (VERBOSE_MED) { + printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Create property list for collective dataset read */ plist_id = H5Pcreate(H5P_DATASET_XFER); VRFY((plist_id >= 0), "DXPL creation succeeded"); - VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded"); + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded"); + read_buf = (C_DATATYPE *) calloc(1, read_buf_size); + VRFY((NULL != read_buf), "calloc succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), + "Dataset read succeeded"); - if (correct_buf) free(correct_buf); + global_buf = (C_DATATYPE *) calloc(1, correct_buf_size); + VRFY((NULL != global_buf), "calloc succeeded"); + + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + recvcounts = (int *) calloc(1, (size_t) mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "calloc succeeded"); + + for (i = 0; i < (size_t) mpi_size; i++) + recvcounts[i] = (int) flat_dims[0]; + + displs = (int *) calloc(1, (size_t) mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); + + for (i = 0; i < (size_t) mpi_size; i++) + displs[i] = (int) (i * flat_dims[0]); + + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + if (displs) free(displs); + if (recvcounts) free(recvcounts); + if (global_buf) free(global_buf); if (read_buf) free(read_buf); + if (correct_buf) free(correct_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -2233,122 +2514,1494 @@ test_write_serial_read_parallel(void) } /* - * Tests parallel write of filtered data - * to a dataset. After the write has - * succeeded, the dataset is closed and - * then re-opened and read by a single - * process to ensure data correctness. + * Tests parallel read of filtered data in the case where only + * one process is reading from a particular chunk in the operation. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads a part of + * the dataset and contributes its piece to a global buffer + * that is checked for consistency. * * Programmer: Jordan Henderson - * 08/03/2017 + * 05/15/2018 */ static void -test_write_parallel_read_serial(void) +test_read_filtered_dataset_no_overlap(void) { - C_DATATYPE *data = NULL; C_DATATYPE *read_buf = NULL; C_DATATYPE *correct_buf = NULL; - hsize_t dataset_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; - hsize_t chunk_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; - hsize_t sel_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; - hsize_t count[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; - hsize_t stride[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; - hsize_t block[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; - hsize_t offset[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; - size_t i, data_size, correct_buf_size; + C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t start[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t stride[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; + int *recvcounts = NULL; + int *displs = NULL; - if (MAINPROCESS) puts("Testing write file in parallel; read serially"); + dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NROWS * (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "calloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (C_DATATYPE) ( + (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1])) + + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1])) + ); + + if (MAINPROCESS) { + puts("Testing read from unshared filtered chunks"); + + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; + + plist_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((plist_id >= 0), "DCPL creation succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + + dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); - file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); - /* Create the dataspace for the dataset */ - dataset_dims[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NROWS; - dataset_dims[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NCOLS; - dataset_dims[2] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_DEPTH; - chunk_dims[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS; - chunk_dims[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS; - chunk_dims[2] = 1; - sel_dims[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS; - sel_dims[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NCOLS; - sel_dims[2] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_DEPTH; + dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); - filespace = H5Screate_simple(WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, dataset_dims, NULL); - VRFY((filespace >= 0), "File dataspace creation succeeded"); + sel_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS; + sel_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS; - memspace = H5Screate_simple(WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, sel_dims, NULL); + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ - plist_id = H5Pcreate(H5P_DATASET_CREATE); - VRFY((plist_id >= 0), "DCPL creation succeeded"); + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Pset_chunk(plist_id, WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + /* + * Each process defines the dataset selection in the file and reads + * it to the selection in memory + */ + count[0] = 1; + count[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; + stride[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS; + stride[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; + block[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS; + block[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; + start[0] = ((hsize_t) mpi_rank * (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]); + start[1] = 0; + + if (VERBOSE_MED) { + printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } - /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); - dset_id = H5Dcreate2(file_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, - H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create property list for collective dataset read */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "DXPL creation succeeded"); - VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); - /* Each process defines the dataset selection in memory and writes - * it to the hyperslab in the file - */ - count[0] = 1; - count[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NCOLS / (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS; - count[2] = (hsize_t) mpi_size; - stride[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS; - stride[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS; - stride[2] = 1; - block[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS; - block[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS; - block[2] = 1; - offset[0] = ((hsize_t) mpi_rank * (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS * count[0]); - offset[1] = 0; - offset[2] = 0; + read_buf_size = flat_dims[0] * sizeof(*read_buf); - if (VERBOSE_MED) - printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n", - mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]); + read_buf = (C_DATATYPE *) calloc(1, read_buf_size); + VRFY((NULL != read_buf), "calloc succeeded"); - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), + "Dataset read succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), "Hyperslab selection succeeded"); + global_buf = (C_DATATYPE *) calloc(1, correct_buf_size); + VRFY((NULL != global_buf), "calloc succeeded"); - /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data); + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + recvcounts = (int *) calloc(1, (size_t) mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "calloc succeeded"); - data = (C_DATATYPE *) calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + for (i = 0; i < (size_t) mpi_size; i++) + recvcounts[i] = (int) flat_dims[0]; - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE) GEN_DATA(i); + displs = (int *) calloc(1, (size_t) mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); - /* Create property list for collective dataset write */ - plist_id = H5Pcreate(H5P_DATASET_XFER); - VRFY((plist_id >= 0), "DXPL creation succeeded"); + for (i = 0; i < (size_t) mpi_size; i++) + displs[i] = (int) (i * flat_dims[0]); - VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded"); + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); + VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + if (displs) free(displs); + if (recvcounts) free(recvcounts); + if (global_buf) free(global_buf); + if (read_buf) free(read_buf); + if (correct_buf) free(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data in the case where + * more than one process is reading from a particular chunk + * in the operation. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads a part of + * each chunk of the dataset and contributes its pieces + * to a global buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/15/2018 + */ +static void +test_read_filtered_dataset_overlap(void) +{ + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t start[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t stride[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t count[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t block[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = -1, dset_id = -1, plist_id = -1; + hid_t filespace = -1, memspace = -1; + int *recvcounts = NULL; + int *displs = NULL; + + dataset_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "calloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (C_DATATYPE) ( + (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1]))) + + (i % dataset_dims[1]) + + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]) + ); + + if (MAINPROCESS) { + puts("Testing read from shared filtered chunks"); + + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS; + + plist_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((plist_id >= 0), "DCPL creation succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + + dset_id = H5Dcreate2(file_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + /* Set up file access property list with parallel I/O access */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + dset_id = H5Dopen2(file_id, "/" READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t) DIM0_SCALE_FACTOR; + sel_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS * (hsize_t) DIM1_SCALE_FACTOR; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NROWS / (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NROWS; + count[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS; + stride[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NROWS; + stride[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS; + block[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NROWS / (hsize_t) mpi_size; + block[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS; + start[0] = (hsize_t) mpi_rank * block[0]; + start[1] = 0; + + if (VERBOSE_MED) { + printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Create property list for collective dataset read */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "DXPL creation succeeded"); + + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *) calloc(1, read_buf_size); + VRFY((NULL != read_buf), "calloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (C_DATATYPE *) calloc(1, correct_buf_size); + VRFY((NULL != global_buf), "calloc succeeded"); + + /* + * Since these chunks are shared, run multiple rounds of MPI_Allgatherv + * to collect all of the pieces into their appropriate locations. The + * number of times MPI_Allgatherv is run should be equal to the number + * of chunks in the first dimension of the dataset. + */ + { + size_t loop_count = count[0]; + size_t total_recvcounts = 0; + + recvcounts = (int *) calloc(1, (size_t) mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "calloc succeeded"); + + displs = (int *) calloc(1, (size_t) mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); + + for (i = 0; i < (size_t) mpi_size; i++) { + recvcounts[i] = (int) dataset_dims[1]; + total_recvcounts += (size_t) recvcounts[i]; + } + + for (i = 0; i < (size_t) mpi_size; i++) + displs[i] = (int) (i * dataset_dims[1]); + + for (; loop_count; loop_count--) { + VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(count[0] - loop_count) * dataset_dims[1]], recvcounts[mpi_rank], C_DATATYPE_MPI, + &global_buf[(count[0] - loop_count) * total_recvcounts], recvcounts, displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + } + } + + VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + if (displs) free(displs); + if (recvcounts) free(recvcounts); + if (global_buf) free(global_buf); + if (read_buf) free(read_buf); + if (correct_buf) free(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data in the case where + * a single process in the read operation has no selection + * in the dataset's dataspace. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank (except for one) + * reads a part of the dataset and contributes its piece + * to a global buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/15/2018 + */ +static void +test_read_filtered_dataset_single_no_selection(void) +{ + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t start[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t stride[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t count[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t block[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + size_t segment_length; + hid_t file_id = -1, dset_id = -1, plist_id = -1; + hid_t filespace = -1, memspace = -1; + int *recvcounts = NULL; + int *displs = NULL; + + dataset_dims[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "calloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = + (C_DATATYPE) ( + (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1])) + + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1])) + ); + + /* Compute the correct offset into the buffer for the process having no selection and clear it */ + segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t) mpi_size; + HDmemset(correct_buf + ((size_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length), + 0, segment_length * sizeof(*correct_buf)); + + if (MAINPROCESS) { + puts("Testing read from filtered chunks with a single process having no selection"); + + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + + plist_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((plist_id >= 0), "DCPL creation succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + + dset_id = H5Dcreate2(file_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + /* Set up file access property list with parallel I/O access */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + dset_id = H5Dopen2(file_id, "/" READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + sel_dims[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS; + + if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) + sel_dims[0] = sel_dims[1] = 0; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = 1; + count[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + stride[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + stride[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + block[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + block[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + start[0] = (hsize_t) mpi_rank * (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0]; + start[1] = 0; + + if (VERBOSE_MED) { + printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } + + if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) + VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded"); + else + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Create property list for collective dataset read */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "DXPL creation succeeded"); + + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *) calloc(1, read_buf_size); + VRFY((NULL != read_buf), "calloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (C_DATATYPE *) calloc(1, correct_buf_size); + VRFY((NULL != global_buf), "calloc succeeded"); + + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + recvcounts = (int *) calloc(1, (size_t) mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "calloc succeeded"); + + for (i = 0; i < (size_t) mpi_size; i++) + recvcounts[i] = (int) (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS); + recvcounts[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC] = 0; + + displs = (int *) calloc(1, (size_t) mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); + + for (i = 0; i < (size_t) mpi_size; i++) + displs[i] = (int) (i * (size_t) (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS)); + + if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, 0, C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + else + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + if (displs) free(displs); + if (recvcounts) free(recvcounts); + if (global_buf) free(global_buf); + if (read_buf) free(read_buf); + if (correct_buf) free(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data in the case where + * no process in the read operation has a selection in the + * dataset's dataspace. This test is to ensure that there + * are no assertion failures or similar issues due to size + * 0 allocations and the like. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank will simply issue + * a no-op read. + * + * Programmer: Jordan Henderson + * 05/15/2018 + */ +static void +test_read_filtered_dataset_all_no_selection(void) +{ + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + size_t read_buf_size, correct_buf_size; + hid_t file_id = -1, dset_id = -1, plist_id = -1; + hid_t filespace = -1, memspace = -1; + + dataset_dims[0] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "calloc succeeded"); + + if (MAINPROCESS) { + puts("Testing read from filtered chunks with all processes having no selection"); + + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + + plist_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((plist_id >= 0), "DCPL creation succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + + dset_id = H5Dcreate2(file_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + /* Set up file access property list with parallel I/O access */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + dset_id = H5Dopen2(file_id, "/" READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = sel_dims[1] = 0; + + memspace = H5Screate_simple(READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded"); + + /* Create property list for collective dataset read */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "DXPL creation succeeded"); + + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); + + read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *) calloc(1, read_buf_size); + VRFY((NULL != read_buf), "calloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + if (read_buf) free(read_buf); + if (correct_buf) free(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data by using point + * selections instead of hyperslab selections. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank will read part + * of the dataset using a point selection and will + * contribute its piece to a global buffer that is + * checked for consistency. + * + * Programmer: Jordan Henderson + * 05/15/2018 + */ +static void +test_read_filtered_dataset_point_selection(void) +{ + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *global_buf = NULL; + hsize_t *coords = NULL; + hsize_t dataset_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, j, read_buf_size, correct_buf_size; + size_t num_points; + hid_t file_id = -1, dset_id = -1, plist_id = -1; + hid_t filespace = -1, memspace = -1; + int *recvcounts = NULL; + int *displs = NULL; + + dataset_dims[0] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "calloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (C_DATATYPE) ( + (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1]))) + + (i % dataset_dims[1]) + + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]) + ); + + if (MAINPROCESS) { + puts("Testing read from filtered chunks with point selection"); + + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + + plist_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((plist_id >= 0), "DCPL creation succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + + dset_id = H5Dcreate2(file_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + /* Set up file access property list with parallel I/O access */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + dset_id = H5Dopen2(file_id, "/" READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t) mpi_size; + sel_dims[1] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Set up point selection */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + num_points = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS * (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) mpi_size; + coords = (hsize_t *) calloc(1, 2 * num_points * sizeof(*coords)); + VRFY((NULL != coords), "Coords calloc succeeded"); + + for (i = 0; i < num_points; i++) + for (j = 0; j < READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++) + coords[(i * READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS) + j] = + (j > 0) ? (i % (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS) + : ((hsize_t) mpi_rank + ((hsize_t) mpi_size * (i / (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS))); + + VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t ) num_points, (const hsize_t * ) coords) >= 0), + "Point selection succeeded"); + + /* Create property list for collective dataset read */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "DXPL creation succeeded"); + + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *) calloc(1, read_buf_size); + VRFY((NULL != read_buf), "calloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (C_DATATYPE *) calloc(1, correct_buf_size); + VRFY((NULL != global_buf), "calloc succeeded"); + + /* + * Since these chunks are shared, run multiple rounds of MPI_Allgatherv + * to collect all of the pieces into their appropriate locations. The + * number of times MPI_Allgatherv is run should be equal to the number + * of chunks in the first dimension of the dataset. + */ + { + size_t original_loop_count = dataset_dims[0] / (hsize_t) mpi_size; + size_t cur_loop_count = original_loop_count; + size_t total_recvcounts = 0; + + recvcounts = (int *) calloc(1, (size_t) mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "calloc succeeded"); + + displs = (int *) calloc(1, (size_t) mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); + + for (i = 0; i < (size_t) mpi_size; i++) { + recvcounts[i] = (int) dataset_dims[1]; + total_recvcounts += (size_t) recvcounts[i]; + } + + for (i = 0; i < (size_t) mpi_size; i++) + displs[i] = (int) (i * dataset_dims[1]); + + for (; cur_loop_count; cur_loop_count--) { + VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(original_loop_count - cur_loop_count) * dataset_dims[1]], recvcounts[mpi_rank], C_DATATYPE_MPI, + &global_buf[(original_loop_count - cur_loop_count) * total_recvcounts], recvcounts, displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + } + } + + VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + if (displs) free(displs); + if (recvcounts) free(recvcounts); + if (global_buf) free(global_buf); + if (read_buf) free(read_buf); + if (correct_buf) free(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data in the case where + * each process reads an equal amount of data from each + * chunk in the dataset. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank will read part + * of each chunk of the dataset and will contribute its + * pieces to a global buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/15/2018 + */ +static void +test_read_filtered_dataset_interleaved_read(void) +{ + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; + hsize_t chunk_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; + hsize_t sel_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; + hsize_t start[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; + hsize_t stride[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; + hsize_t count[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; + hsize_t block[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = -1, dset_id = -1, plist_id = -1; + hid_t filespace = -1, memspace = -1; + int *recvcounts = NULL; + int *displs = NULL; + + dataset_dims[0] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NROWS; + dataset_dims[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "calloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + /* Add Column Index */ + correct_buf[i] = + (C_DATATYPE) ( + (i % (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS) + + /* Add the Row Index */ + + ((i % (hsize_t) (mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS)) / (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS) + + /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */ + + ((hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS * (i / (hsize_t) (mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS))) + ); + + if (MAINPROCESS) { + puts("Testing interleaved read from filtered chunks"); + + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(INTERLEAVED_READ_FILTERED_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS; + chunk_dims[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS; + + plist_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((plist_id >= 0), "DCPL creation succeeded"); + + VRFY((H5Pset_chunk(plist_id, INTERLEAVED_READ_FILTERED_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + + dset_id = H5Dcreate2(file_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + /* Set up file access property list with parallel I/O access */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + dset_id = H5Dopen2(file_id, "/" INTERLEAVED_READ_FILTERED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t) (INTERLEAVED_READ_FILTERED_DATASET_NROWS / mpi_size); + sel_dims[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = (hsize_t) (INTERLEAVED_READ_FILTERED_DATASET_NROWS / INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS); + count[1] = (hsize_t) (INTERLEAVED_READ_FILTERED_DATASET_NCOLS / INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS); + stride[0] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS; + stride[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS; + block[0] = 1; + block[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS; + start[0] = (hsize_t) mpi_rank; + start[1] = 0; + + if (VERBOSE_MED) { + printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Create property list for collective dataset read */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "DXPL creation succeeded"); + + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *) calloc(1, read_buf_size); + VRFY((NULL != read_buf), "calloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (C_DATATYPE *) calloc(1, correct_buf_size); + VRFY((NULL != global_buf), "calloc succeeded"); + + /* + * Since these chunks are shared, run multiple rounds of MPI_Allgatherv + * to collect all of the pieces into their appropriate locations. The + * number of times MPI_Allgatherv is run should be equal to the number + * of chunks in the first dimension of the dataset. + */ + { + size_t loop_count = count[0]; + size_t total_recvcounts = 0; + + recvcounts = (int *) calloc(1, (size_t) mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "calloc succeeded"); + + displs = (int *) calloc(1, (size_t) mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); + + for (i = 0; i < (size_t) mpi_size; i++) { + recvcounts[i] = (int) dataset_dims[1]; + total_recvcounts += (size_t) recvcounts[i]; + } + + for (i = 0; i < (size_t) mpi_size; i++) + displs[i] = (int) (i * dataset_dims[1]); + + for (; loop_count; loop_count--) { + VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(count[0] - loop_count) * dataset_dims[1]], recvcounts[mpi_rank], C_DATATYPE_MPI, + &global_buf[(count[0] - loop_count) * total_recvcounts], recvcounts, displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + } + } + + VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + if (displs) free(displs); + if (recvcounts) free(recvcounts); + if (global_buf) free(global_buf); + if (read_buf) free(read_buf); + if (correct_buf) free(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +static void +test_read_3d_filtered_dataset_no_overlap_separate_pages(void) +{ + +} + +static void +test_read_3d_filtered_dataset_no_overlap_same_pages(void) +{ + +} + +static void +test_read_3d_filtered_dataset_overlap(void) +{ + +} + +static void +test_read_cmpd_filtered_dataset_no_conversion_unshared(void) +{ + +} + +static void +test_read_cmpd_filtered_dataset_no_conversion_shared(void) +{ + +} + +static void +test_read_cmpd_filtered_dataset_type_conversion_unshared(void) +{ + +} + +static void +test_read_cmpd_filtered_dataset_type_conversion_shared(void) +{ + +} + +/* + * Tests write of filtered data to a dataset + * by a single process. After the write has + * succeeded, the dataset is closed and then + * re-opened in parallel and read by all + * processes to ensure data correctness. + * + * Programmer: Jordan Henderson + * 08/03/2017 + */ +static void +test_write_serial_read_parallel(void) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS]; + size_t i, data_size, correct_buf_size; + hid_t file_id = -1, dset_id = -1, plist_id = -1; + hid_t filespace = -1; + + if (MAINPROCESS) puts("Testing write file serially; read file in parallel"); + + dataset_dims[0] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_NROWS; + dataset_dims[1] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_NCOLS; + dataset_dims[2] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_DEPTH; + + /* Write the file on the MAINPROCESS rank */ + if (MAINPROCESS) { + /* Set up file access property list */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + /* Create the dataspace for the dataset */ + chunk_dims[0] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_CH_NROWS; + chunk_dims[1] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_CH_NCOLS; + chunk_dims[2] = 1; + + filespace = H5Screate_simple(WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((plist_id >= 0), "DCPL creation succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + + dset_id = H5Dcreate2(file_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*data); + + data = (C_DATATYPE *) calloc(1, data_size); + VRFY((NULL != data), "calloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE) GEN_DATA(i); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) >= 0), + "Dataset write succeeded"); + + if (data) free(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "calloc succeeded"); + + read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); + VRFY((NULL != read_buf), "calloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (long) i; + + /* All ranks open the file and verify their "portion" of the dataset is correct */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + dset_id = H5Dopen2(file_id, "/" WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "DXPL creation succeeded"); + + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + if (correct_buf) free(correct_buf); + if (read_buf) free(read_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data + * to a dataset. After the write has + * succeeded, the dataset is closed and + * then re-opened and read by a single + * process to ensure data correctness. + * + * Programmer: Jordan Henderson + * 08/03/2017 + */ +static void +test_write_parallel_read_serial(void) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; + hsize_t sel_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; + hsize_t count[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; + hsize_t stride[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; + hsize_t block[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; + hsize_t offset[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; + size_t i, data_size, correct_buf_size; + hid_t file_id = -1, dset_id = -1, plist_id = -1; + hid_t filespace = -1, memspace = -1; + + if (MAINPROCESS) puts("Testing write file in parallel; read serially"); + + /* Set up file access property list with parallel I/O access */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NROWS; + dataset_dims[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NCOLS; + dataset_dims[2] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_DEPTH; + chunk_dims[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS; + chunk_dims[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS; + chunk_dims[2] = 1; + sel_dims[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS; + sel_dims[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NCOLS; + sel_dims[2] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_DEPTH; + + filespace = H5Screate_simple(WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = H5Screate_simple(WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((plist_id >= 0), "DCPL creation succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + + dset_id = H5Dcreate2(file_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = 1; + count[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NCOLS / (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS; + count[2] = (hsize_t) mpi_size; + stride[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS; + stride[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS; + stride[2] = 1; + block[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS; + block[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS; + block[2] = 1; + offset[0] = ((hsize_t) mpi_rank * (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS * count[0]); + offset[1] = 0; + offset[2] = 0; + + if (VERBOSE_MED) { + printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n", + mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]); + fflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, + count, block) >= 0), "Hyperslab selection succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data); + + data = (C_DATATYPE *) calloc(1, data_size); + VRFY((NULL != data), "calloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE) GEN_DATA(i); + + /* Create property list for collective dataset write */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "DXPL creation succeeded"); + + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), + "Dataset write succeeded"); if (data) free(data); @@ -2362,7 +4015,8 @@ test_write_parallel_read_serial(void) plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); - VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -2381,11 +4035,16 @@ test_write_parallel_read_serial(void) VRFY((NULL != read_buf), "calloc succeeded"); for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (C_DATATYPE) ((i % (dataset_dims[0] * dataset_dims[1])) + (i / (dataset_dims[0] * dataset_dims[1])));; + correct_buf[i] = (C_DATATYPE) ( + (i % (dataset_dims[0] * dataset_dims[1])) + + (i / (dataset_dims[0] * dataset_dims[1])) + ); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) >= 0), "Dataset read succeeded"); + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) >= 0), + "Dataset read succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -2416,7 +4075,9 @@ main(int argc, char** argv) } if (H5dont_atexit() < 0) { - printf("Failed to turn off atexit processing. Continue.\n"); + if (MAINPROCESS) { + printf("Failed to turn off atexit processing. Continue.\n"); + } } H5open(); @@ -2437,9 +4098,11 @@ main(int argc, char** argv) VRFY((H5Pset_fapl_mpio(fapl, comm, info) >= 0), "Set FAPL MPIO succeeded"); - VRFY((H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + VRFY((H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); - VRFY((h5_fixname(FILENAME[0], fapl, filenames[0], sizeof(filenames[0])) != NULL), "Test file name created"); + VRFY((h5_fixname(FILENAME[0], fapl, filenames[0], sizeof(filenames[0])) != NULL), + "Test file name created"); file_id = H5Fcreate(filenames[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl); VRFY((file_id >= 0), "Test file creation succeeded"); @@ -2449,7 +4112,8 @@ main(int argc, char** argv) for (i = 0; i < ARRAY_SIZE(tests); i++) { if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) { (*tests[i])(); - } else { + } + else { if (MAINPROCESS) MESG("MPI_Barrier failed"); nerrors++; } @@ -2461,7 +4125,9 @@ main(int argc, char** argv) exit: if (nerrors) - if (MAINPROCESS) printf("*** %d TEST ERROR%s OCCURRED ***\n", nerrors, nerrors > 1 ? "S" : ""); + if (MAINPROCESS) + printf("*** %d TEST ERROR%s OCCURRED ***\n", nerrors, + nerrors > 1 ? "S" : ""); ALARM_OFF; diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h index cb9a1ab..43d471a 100644 --- a/testpar/t_filters_parallel.h +++ b/testpar/t_filters_parallel.h @@ -43,16 +43,19 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES; /* Common defines for all tests */ #define C_DATATYPE long +#define C_DATATYPE_MPI MPI_LONG #define COMPOUND_C_DATATYPE cmpd_filtered_t #define C_DATATYPE_STR(type) STRINGIFY(type) #define HDF5_DATATYPE_NAME H5T_NATIVE_LONG +/* Macro used to generate data for datasets for later verification */ #define GEN_DATA(i) INCREMENTAL_DATA(i) -#define INCREMENTAL_DATA(i) ((size_t) mpi_rank + i) /* Generates incremental test data */ /* For experimental purposes only, will cause tests to fail data verification phase - JTH */ /* #define GEN_DATA(i) RANK_DATA(i) */ /* Given an index value i, generates test data based upon selected mode */ -#define RANK_DATA(i) (mpi_rank) /* Generates test data to visibly show which rank wrote to which parts of the dataset */ + +#define INCREMENTAL_DATA(i) ((size_t) mpi_rank + i) /* Generates incremental test data */ +#define RANK_DATA(i) (mpi_rank) /* Generates test data to visibly show which rank wrote to which parts of the dataset */ #ifdef DYNAMIC_FILTER #define SET_FILTER(dcpl) H5Pset_filter(dcpl, filter_id, flags, FILTER_NUM_CDVALUES, cd_values) /* Test other filter in parallel */ @@ -63,57 +66,57 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES; #define DIM0_SCALE_FACTOR 4 #define DIM1_SCALE_FACTOR 2 -/* Defines for the one-chunk filtered dataset test */ -#define ONE_CHUNK_FILTERED_DATASET_NAME "one_chunk_filtered_dataset" -#define ONE_CHUNK_FILTERED_DATASET_DIMS 2 -#define ONE_CHUNK_FILTERED_DATASET_NROWS (mpi_size * DIM0_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */ -#define ONE_CHUNK_FILTERED_DATASET_NCOLS (mpi_size * DIM1_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */ -#define ONE_CHUNK_FILTERED_DATASET_CH_NROWS ONE_CHUNK_FILTERED_DATASET_NROWS -#define ONE_CHUNK_FILTERED_DATASET_CH_NCOLS ONE_CHUNK_FILTERED_DATASET_NCOLS +/* Defines for the one-chunk filtered dataset write test */ +#define WRITE_ONE_CHUNK_FILTERED_DATASET_NAME "one_chunk_filtered_dataset_write" +#define WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS 2 +#define WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS (mpi_size * DIM0_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */ +#define WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS (mpi_size * DIM1_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */ +#define WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS +#define WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS /* Defines for the unshared filtered chunks write test */ -#define UNSHARED_FILTERED_CHUNKS_DATASET_NAME "unshared_filtered_chunks" -#define UNSHARED_FILTERED_CHUNKS_DATASET_DIMS 2 -#define UNSHARED_FILTERED_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR) -#define UNSHARED_FILTERED_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR) -#define UNSHARED_FILTERED_CHUNKS_CH_NROWS (UNSHARED_FILTERED_CHUNKS_NROWS / mpi_size) -#define UNSHARED_FILTERED_CHUNKS_CH_NCOLS (UNSHARED_FILTERED_CHUNKS_NCOLS / mpi_size) +#define WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME "unshared_filtered_chunks_write" +#define WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS 2 +#define WRITE_UNSHARED_FILTERED_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS (WRITE_UNSHARED_FILTERED_CHUNKS_NROWS / mpi_size) +#define WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS (WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS / mpi_size) /* Defines for the shared filtered chunks write test */ -#define SHARED_FILTERED_CHUNKS_DATASET_NAME "shared_filtered_chunks" -#define SHARED_FILTERED_CHUNKS_DATASET_DIMS 2 -#define SHARED_FILTERED_CHUNKS_CH_NROWS (mpi_size) -#define SHARED_FILTERED_CHUNKS_CH_NCOLS (mpi_size) -#define SHARED_FILTERED_CHUNKS_NROWS (SHARED_FILTERED_CHUNKS_CH_NROWS * DIM0_SCALE_FACTOR) -#define SHARED_FILTERED_CHUNKS_NCOLS (SHARED_FILTERED_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR) +#define WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME "shared_filtered_chunks_write" +#define WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS 2 +#define WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS (mpi_size) +#define WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS (mpi_size) +#define WRITE_SHARED_FILTERED_CHUNKS_NROWS (WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS * DIM0_SCALE_FACTOR) +#define WRITE_SHARED_FILTERED_CHUNKS_NCOLS (WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR) /* Defines for the filtered chunks write test where a process has no selection */ -#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks" -#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 -#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR) -#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR) -#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS (SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size) -#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS (SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size) -#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC (mpi_size - 1) +#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks_write" +#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 +#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR) +#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR) +#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS (WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size) +#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS (WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size) +#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC (mpi_size - 1) /* Defines for the filtered chunks write test where no process has a selection */ -#define ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "all_no_selection_filtered_chunks" -#define ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 -#define ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR) -#define ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR) -#define ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS (ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size) -#define ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS (ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size) +#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "all_no_selection_filtered_chunks_write" +#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 +#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR) +#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR) +#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS (WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size) +#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS (WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size) /* Defines for the filtered chunks write test with a point selection */ -#define POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME "point_selection_filtered_chunks" -#define POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 -#define POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR) -#define POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR) -#define POINT_SELECTION_FILTERED_CHUNKS_NROWS (POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size) -#define POINT_SELECTION_FILTERED_CHUNKS_NCOLS (POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size) +#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME "point_selection_filtered_chunks_write" +#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 +#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR) +#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR) +#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS (WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size) +#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS (WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size) /* Defines for the filtered dataset interleaved write test */ -#define INTERLEAVED_WRITE_FILTERED_DATASET_NAME "interleaved_write_filtered_dataset" +#define INTERLEAVED_WRITE_FILTERED_DATASET_NAME "filtered_dataset_interleaved_write" #define INTERLEAVED_WRITE_FILTERED_DATASET_DIMS 2 #define INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS (mpi_size) #define INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS (DIM1_SCALE_FACTOR) @@ -121,31 +124,31 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES; #define INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS (INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS * DIM1_SCALE_FACTOR) /* Defines for the 3D unshared filtered dataset separate page write test */ -#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_separate_pages" -#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS 3 -#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR) -#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR) -#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH (mpi_size) -#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS (UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / mpi_size) -#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS (UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / mpi_size) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_separate_pages_write" +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS 3 +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH (mpi_size) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / mpi_size) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / mpi_size) /* Defines for the 3D unshared filtered dataset same page write test */ -#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_same_pages" -#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS 3 -#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR) -#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR) -#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH (mpi_size) -#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS (UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS / mpi_size) -#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS (UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / mpi_size) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_same_pages_write" +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS 3 +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH (mpi_size) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS / mpi_size) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / mpi_size) /* Defines for the 3d shared filtered dataset write test */ -#define SHARED_FILTERED_CHUNKS_3D_DATASET_NAME "3D_shared_filtered_chunks" -#define SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS 3 -#define SHARED_FILTERED_CHUNKS_3D_CH_NROWS (mpi_size) -#define SHARED_FILTERED_CHUNKS_3D_CH_NCOLS (DIM1_SCALE_FACTOR) -#define SHARED_FILTERED_CHUNKS_3D_NROWS (SHARED_FILTERED_CHUNKS_3D_CH_NROWS * DIM0_SCALE_FACTOR) -#define SHARED_FILTERED_CHUNKS_3D_NCOLS (SHARED_FILTERED_CHUNKS_3D_CH_NCOLS * DIM1_SCALE_FACTOR) -#define SHARED_FILTERED_CHUNKS_3D_DEPTH (mpi_size) +#define WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME "3D_shared_filtered_chunks_write" +#define WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS 3 +#define WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS (mpi_size) +#define WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS (DIM1_SCALE_FACTOR) +#define WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS (WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS * DIM0_SCALE_FACTOR) +#define WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS (WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS * DIM1_SCALE_FACTOR) +#define WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH (mpi_size) /* Struct type for the compound datatype filtered dataset tests */ typedef struct { @@ -156,40 +159,97 @@ typedef struct { } COMPOUND_C_DATATYPE; /* Defines for the compound datatype filtered dataset no conversion write test with unshared chunks */ -#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_no_conversion" -#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS 2 -#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS 1 -#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS mpi_size -#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS 1 -#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS 1 -#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC (COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS / mpi_size) +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_no_conversion_write" +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS 2 +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS 1 +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS mpi_size +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS 1 +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS 1 +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC (WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS / mpi_size) /* Defines for the compound datatype filtered dataset no conversion write test with shared chunks */ -#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_no_conversion" -#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS 2 -#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS mpi_size -#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS mpi_size -#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS mpi_size -#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS 1 -#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_no_conversion_write" +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS 2 +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS mpi_size +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS mpi_size +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS mpi_size +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS 1 +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS /* Defines for the compound datatype filtered dataset type conversion write test with unshared chunks */ -#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_type_conversion" -#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS 2 -#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS 1 -#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS mpi_size -#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS 1 -#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS 1 -#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC (COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS / mpi_size) +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_type_conversion_write" +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS 2 +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS 1 +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS mpi_size +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS 1 +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS 1 +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC (WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS / mpi_size) /* Defines for the compound datatype filtered dataset type conversion write test with shared chunks */ -#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_type_conversion" -#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS 2 -#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS mpi_size -#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS mpi_size -#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS mpi_size -#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS 1 -#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_type_conversion_write" +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS 2 +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS mpi_size +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS mpi_size +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS mpi_size +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS 1 +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS + +/* Defines for the one-chunk filtered dataset read test */ +#define READ_ONE_CHUNK_FILTERED_DATASET_NAME "one_chunk_filtered_dataset_read" +#define READ_ONE_CHUNK_FILTERED_DATASET_DIMS 2 +#define READ_ONE_CHUNK_FILTERED_DATASET_NROWS (mpi_size * DIM0_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */ +#define READ_ONE_CHUNK_FILTERED_DATASET_NCOLS (mpi_size * DIM1_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */ +#define READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS READ_ONE_CHUNK_FILTERED_DATASET_NROWS +#define READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS READ_ONE_CHUNK_FILTERED_DATASET_NCOLS + +/* Defines for the unshared filtered chunks read test */ +#define READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME "unshared_filtered_chunks_read" +#define READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS 2 +#define READ_UNSHARED_FILTERED_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define READ_UNSHARED_FILTERED_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS (READ_UNSHARED_FILTERED_CHUNKS_NROWS / mpi_size) +#define READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS (READ_UNSHARED_FILTERED_CHUNKS_NCOLS / mpi_size) + +/* Defines for the shared filtered chunks read test */ +#define READ_SHARED_FILTERED_CHUNKS_DATASET_NAME "shared_filtered_chunks_read" +#define READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS 2 +#define READ_SHARED_FILTERED_CHUNKS_CH_NROWS (mpi_size) +#define READ_SHARED_FILTERED_CHUNKS_CH_NCOLS (mpi_size) +#define READ_SHARED_FILTERED_CHUNKS_NROWS (READ_SHARED_FILTERED_CHUNKS_CH_NROWS * DIM0_SCALE_FACTOR) +#define READ_SHARED_FILTERED_CHUNKS_NCOLS (READ_SHARED_FILTERED_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR) + +/* Defines for the filtered chunks read test where a process has no selection */ +#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks_read" +#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 +#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR) +#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR) +#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size) +#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size) +#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC (mpi_size - 1) + +/* Defines for the filtered chunks read test where no process has a selection */ +#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "all_no_selection_filtered_chunks_read" +#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 +#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR) +#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR) +#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS (READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size) +#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS (READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size) + +/* Defines for the filtered chunks read test with a point selection */ +#define READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME "point_selection_filtered_chunks_read" +#define READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 +#define READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR) +#define READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR) +#define READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS (READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size) +#define READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS (READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size) + +/* Defines for the filtered dataset interleaved read test */ +#define INTERLEAVED_READ_FILTERED_DATASET_NAME "filtered_dataset_interleaved_read" +#define INTERLEAVED_READ_FILTERED_DATASET_DIMS 2 +#define INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS (mpi_size) +#define INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS (DIM1_SCALE_FACTOR) +#define INTERLEAVED_READ_FILTERED_DATASET_NROWS (INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS * DIM0_SCALE_FACTOR) +#define INTERLEAVED_READ_FILTERED_DATASET_NCOLS (INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS * DIM1_SCALE_FACTOR) /* Defines for the write file serially/read in parallel test */ #define WRITE_SERIAL_READ_PARALLEL_DATASET_NAME "write_serial_read_parallel" -- cgit v0.12 From 2c8e6e630453249acba3d528d471602bb26462a8 Mon Sep 17 00:00:00 2001 From: Jordan Henderson Date: Wed, 16 May 2018 20:29:35 -0500 Subject: Add data verification to parallel filtered compound write tests Add 3D parallel filtered partial read tests --- testpar/t_filters_parallel.c | 1472 +++++++++++++++++++++++++++++++----------- testpar/t_filters_parallel.h | 28 +- 2 files changed, 1137 insertions(+), 363 deletions(-) diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c index 30c048d..886b544 100644 --- a/testpar/t_filters_parallel.c +++ b/testpar/t_filters_parallel.c @@ -216,11 +216,11 @@ test_write_one_chunk_filtered_dataset(void) * (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS * sizeof(*data); correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); - data = (C_DATATYPE *) calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + data = (C_DATATYPE *) HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); for (i = 0; i < data_size / sizeof(*data); i++) data[i] = (C_DATATYPE) GEN_DATA(i); @@ -239,13 +239,13 @@ test_write_one_chunk_filtered_dataset(void) VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); - if (data) free(data); + if (data) HDfree(data); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); dset_id = H5Dopen2(file_id, "/" WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); @@ -256,8 +256,8 @@ test_write_one_chunk_filtered_dataset(void) VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - if (correct_buf) free(correct_buf); - if (read_buf) free(read_buf); + if (correct_buf) HDfree(correct_buf); + if (read_buf) HDfree(read_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -371,11 +371,11 @@ test_write_filtered_dataset_no_overlap(void) data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); - data = (C_DATATYPE *) calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + data = (C_DATATYPE *) HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); for (i = 0; i < data_size / sizeof(*data); i++) data[i] = (C_DATATYPE) GEN_DATA(i); @@ -397,13 +397,13 @@ test_write_filtered_dataset_no_overlap(void) VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); - if (data) free(data); + if (data) HDfree(data); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); @@ -414,8 +414,8 @@ test_write_filtered_dataset_no_overlap(void) VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - if (correct_buf) free(correct_buf); - if (read_buf) free(read_buf); + if (correct_buf) HDfree(correct_buf); + if (read_buf) HDfree(read_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -530,11 +530,11 @@ test_write_filtered_dataset_overlap(void) data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); - data = (C_DATATYPE *) calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + data = (C_DATATYPE *) HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); for (i = 0; i < data_size / sizeof(*data); i++) data[i] = (C_DATATYPE) GEN_DATA(i); @@ -556,13 +556,13 @@ test_write_filtered_dataset_overlap(void) VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); - if (data) free(data); + if (data) HDfree(data); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); /* Verify correct data was written */ - read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); dset_id = H5Dopen2(file_id, "/" WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); @@ -573,8 +573,8 @@ test_write_filtered_dataset_overlap(void) VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - if (correct_buf) free(correct_buf); - if (read_buf) free(read_buf); + if (correct_buf) HDfree(correct_buf); + if (read_buf) HDfree(read_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -698,11 +698,11 @@ test_write_filtered_dataset_single_no_selection(void) data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); - data = (C_DATATYPE *) calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + data = (C_DATATYPE *) HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); for (i = 0; i < data_size / sizeof(*data); i++) data[i] = (C_DATATYPE) GEN_DATA(i); @@ -729,13 +729,13 @@ test_write_filtered_dataset_single_no_selection(void) VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); - if (data) free(data); + if (data) HDfree(data); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); dset_id = H5Dopen2(file_id, "/" WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); @@ -746,8 +746,8 @@ test_write_filtered_dataset_single_no_selection(void) VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - if (correct_buf) free(correct_buf); - if (read_buf) free(read_buf); + if (correct_buf) HDfree(correct_buf); + if (read_buf) HDfree(read_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -840,11 +840,11 @@ test_write_filtered_dataset_all_no_selection(void) data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); - data = (C_DATATYPE *) calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + data = (C_DATATYPE *) HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); for (i = 0; i < data_size / sizeof(*data); i++) data[i] = (C_DATATYPE) GEN_DATA(i); @@ -859,13 +859,13 @@ test_write_filtered_dataset_all_no_selection(void) VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); - if (data) free(data); + if (data) HDfree(data); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); dset_id = H5Dopen2(file_id, "/" WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); @@ -876,8 +876,8 @@ test_write_filtered_dataset_all_no_selection(void) VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - if (correct_buf) free(correct_buf); - if (read_buf) free(read_buf); + if (correct_buf) HDfree(correct_buf); + if (read_buf) HDfree(read_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -963,8 +963,8 @@ test_write_filtered_dataset_point_selection(void) VRFY((filespace >= 0), "File dataspace retrieval succeeded"); num_points = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS * (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) mpi_size; - coords = (hsize_t *) calloc(1, 2 * num_points * sizeof(*coords)); - VRFY((NULL != coords), "Coords calloc succeeded"); + coords = (hsize_t *) HDcalloc(1, 2 * num_points * sizeof(*coords)); + VRFY((NULL != coords), "Coords HDcalloc succeeded"); for (i = 0; i < num_points; i++) for (j = 0; j < WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++) @@ -979,11 +979,11 @@ test_write_filtered_dataset_point_selection(void) data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); - data = (C_DATATYPE *) calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + data = (C_DATATYPE *) HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); for (i = 0; i < data_size / sizeof(*data); i++) data[i] = (C_DATATYPE) GEN_DATA(i); @@ -1005,13 +1005,13 @@ test_write_filtered_dataset_point_selection(void) VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); - if (data) free(data); + if (data) HDfree(data); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); dset_id = H5Dopen2(file_id, "/" WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); @@ -1022,9 +1022,9 @@ test_write_filtered_dataset_point_selection(void) VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - if (coords) free(coords); - if (correct_buf) free(correct_buf); - if (read_buf) free(read_buf); + if (coords) HDfree(coords); + if (correct_buf) HDfree(correct_buf); + if (read_buf) HDfree(read_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -1140,11 +1140,11 @@ test_write_filtered_dataset_interleaved_write(void) data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); - data = (C_DATATYPE *) calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + data = (C_DATATYPE *) HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); for (i = 0; i < data_size / sizeof(*data); i++) data[i] = (C_DATATYPE) GEN_DATA(i); @@ -1172,13 +1172,13 @@ test_write_filtered_dataset_interleaved_write(void) VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); - if (data) free(data); + if (data) HDfree(data); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); dset_id = H5Dopen2(file_id, "/" INTERLEAVED_WRITE_FILTERED_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); @@ -1189,8 +1189,8 @@ test_write_filtered_dataset_interleaved_write(void) VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - if (correct_buf) free(correct_buf); - if (read_buf) free(read_buf); + if (correct_buf) HDfree(correct_buf); + if (read_buf) HDfree(read_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -1218,10 +1218,10 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void) hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; - hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t start[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; - hsize_t offset[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; size_t i, data_size, correct_buf_size; hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; @@ -1289,13 +1289,13 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void) block[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; block[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; block[2] = 1; - offset[0] = 0; - offset[1] = 0; - offset[2] = (hsize_t) mpi_rank; + start[0] = 0; + start[1] = 0; + start[2] = (hsize_t) mpi_rank; if (VERBOSE_MED) { - printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n", - mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]); + printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n", + mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]); fflush(stdout); } @@ -1303,18 +1303,18 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void) filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), "Hyperslab selection succeeded"); /* Fill data buffer */ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data); correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); - data = (C_DATATYPE *) calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + data = (C_DATATYPE *) HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); for (i = 0; i < data_size / sizeof(*data); i++) data[i] = (C_DATATYPE) GEN_DATA(i); @@ -1332,13 +1332,13 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void) VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); - if (data) free(data); + if (data) HDfree(data); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); @@ -1349,8 +1349,8 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void) VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - if (correct_buf) free(correct_buf); - if (read_buf) free(read_buf); + if (correct_buf) HDfree(correct_buf); + if (read_buf) HDfree(read_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -1379,10 +1379,10 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void) hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; - hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t start[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; - hsize_t offset[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; size_t i, data_size, correct_buf_size; hid_t file_id, dset_id, plist_id; hid_t filespace, memspace; @@ -1450,13 +1450,13 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void) block[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; block[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; block[2] = 1; - offset[0] = ((hsize_t) mpi_rank * (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS * count[0]); - offset[1] = 0; - offset[2] = 0; + start[0] = ((hsize_t) mpi_rank * (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS * count[0]); + start[1] = 0; + start[2] = 0; if (VERBOSE_MED) { - printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n", - mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]); + printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n", + mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]); fflush(stdout); } @@ -1464,18 +1464,18 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void) filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), "Hyperslab selection succeeded"); /* Fill data buffer */ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data); correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); - data = (C_DATATYPE *) calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + data = (C_DATATYPE *) HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); for (i = 0; i < data_size / sizeof(*data); i++) data[i] = (C_DATATYPE) GEN_DATA(i); @@ -1496,13 +1496,13 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void) VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); - if (data) free(data); + if (data) HDfree(data); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); @@ -1513,8 +1513,8 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void) VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - if (correct_buf) free(correct_buf); - if (read_buf) free(read_buf); + if (correct_buf) HDfree(correct_buf); + if (read_buf) HDfree(read_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -1543,10 +1543,10 @@ test_write_3d_filtered_dataset_overlap(void) hsize_t dataset_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; hsize_t chunk_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; hsize_t sel_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; - hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t start[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; hsize_t stride[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; hsize_t block[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; - hsize_t offset[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; size_t i, data_size, correct_buf_size; hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; @@ -1614,13 +1614,13 @@ test_write_3d_filtered_dataset_overlap(void) block[0] = 1; block[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; block[2] = 1; - offset[0] = (hsize_t) mpi_rank; - offset[1] = 0; - offset[2] = 0; + start[0] = (hsize_t) mpi_rank; + start[1] = 0; + start[2] = 0; if (VERBOSE_MED) { - printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n", - mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]); + printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n", + mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]); fflush(stdout); } @@ -1628,18 +1628,18 @@ test_write_3d_filtered_dataset_overlap(void) filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), "Hyperslab selection succeeded"); /* Fill data buffer */ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data); correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); - data = (C_DATATYPE *) calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + data = (C_DATATYPE *) HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); for (i = 0; i < data_size / sizeof(*data); i++) data[i] = (C_DATATYPE) GEN_DATA(i); @@ -1669,13 +1669,13 @@ test_write_3d_filtered_dataset_overlap(void) VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); - if (data) free(data); + if (data) HDfree(data); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); dset_id = H5Dopen2(file_id, "/" WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset open succeeded"); @@ -1686,8 +1686,8 @@ test_write_3d_filtered_dataset_overlap(void) VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - if (correct_buf) free(correct_buf); - if (read_buf) free(read_buf); + if (correct_buf) HDfree(correct_buf); + if (read_buf) HDfree(read_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -1706,21 +1706,22 @@ test_write_3d_filtered_dataset_overlap(void) * Programmer: Jordan Henderson * 02/10/2017 */ -/* JTH: This test currently cannot be data-verified due to the floating-point data involved */ static void test_write_cmpd_filtered_dataset_no_conversion_unshared(void) { - cmpd_filtered_t *data = NULL; - hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t offset[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - size_t i; - hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1; - hid_t filespace = -1, memspace = -1; + COMPOUND_C_DATATYPE *data = NULL; + COMPOUND_C_DATATYPE *read_buf = NULL; + COMPOUND_C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + size_t i, correct_buf_size; + hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1; + hid_t filespace = -1, memspace = -1; if (MAINPROCESS) puts("Testing write to unshared filtered chunks in Compound Datatype dataset without Datatype conversion"); @@ -1764,16 +1765,14 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void) VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); /* Create the compound type for memory. */ - memtype = H5Tcreate(H5T_COMPOUND, sizeof(cmpd_filtered_t)); + memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); VRFY((memtype >= 0), "Datatype creation succeeded"); - VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(cmpd_filtered_t, field1), H5T_NATIVE_SHORT) >= 0), - "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "IntData", HOFFSET(cmpd_filtered_t, field2), H5T_NATIVE_INT) >= 0), + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "LongData", HOFFSET(cmpd_filtered_t, field3), H5T_NATIVE_LONG) >= 0), + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "DoubleData", HOFFSET(cmpd_filtered_t, field4), H5T_NATIVE_DOUBLE) >= 0), + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), "Datatype insertion succeeded"); dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype, filespace, @@ -1792,12 +1791,12 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void) stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; block[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; - offset[0] = 0; - offset[1] = ((hsize_t) mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS); + start[0] = 0; + start[1] = ((hsize_t) mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS); if (VERBOSE_MED) { - printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]); + printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); fflush(stdout); } @@ -1805,19 +1804,39 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void) filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), "Hyperslab selection succeeded"); - data = (COMPOUND_C_DATATYPE *) calloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data)); - VRFY((NULL != data), "calloc succeeded"); + data = (COMPOUND_C_DATATYPE *) HDcalloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data)); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); + + correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); /* Fill data buffer */ - memset(data, 0, sizeof(cmpd_filtered_t) * (size_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC); for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) { data[i].field1 = (short) GEN_DATA(i); data[i].field2 = (int) GEN_DATA(i); data[i].field3 = (long) GEN_DATA(i); - data[i].field4 = (double) GEN_DATA(i); + } + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { + correct_buf[i].field1 = (short) ( + (i % dataset_dims[1]) + + (i / dataset_dims[1]) + ); + + correct_buf[i].field2 = (int) ( + (i % dataset_dims[1]) + + (i / dataset_dims[1]) + ); + + correct_buf[i].field3 = (long) ( + (i % dataset_dims[1]) + + (i / dataset_dims[1]) + ); } /* Create property list for collective dataset write */ @@ -1830,7 +1849,25 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void) VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); - if (data) free(data); + if (data) HDfree(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + if (correct_buf) HDfree(correct_buf); + if (read_buf) HDfree(read_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -1854,17 +1891,19 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void) static void test_write_cmpd_filtered_dataset_no_conversion_shared(void) { - cmpd_filtered_t *data = NULL; - hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t offset[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - size_t i; - hid_t file_id, dset_id, plist_id, memtype; - hid_t filespace, memspace; + COMPOUND_C_DATATYPE *data = NULL; + COMPOUND_C_DATATYPE *read_buf = NULL; + COMPOUND_C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + size_t i, correct_buf_size; + hid_t file_id, dset_id, plist_id, memtype; + hid_t filespace, memspace; if (MAINPROCESS) puts("Testing write to shared filtered chunks in Compound Datatype dataset without Datatype conversion"); @@ -1908,16 +1947,14 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void) VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); /* Create the compound type for memory. */ - memtype = H5Tcreate(H5T_COMPOUND, sizeof(cmpd_filtered_t)); + memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); VRFY((memtype >= 0), "Datatype creation succeeded"); - VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(cmpd_filtered_t, field1), H5T_NATIVE_SHORT) >= 0), + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "IntData", HOFFSET(cmpd_filtered_t, field2), H5T_NATIVE_INT) >= 0), + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "LongData", HOFFSET(cmpd_filtered_t, field3), H5T_NATIVE_LONG) >= 0), - "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "DoubleData", HOFFSET(cmpd_filtered_t, field4), H5T_NATIVE_DOUBLE) >= 0), + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), "Datatype insertion succeeded"); dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype, filespace, @@ -1936,12 +1973,12 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void) stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; block[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size; block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; - offset[0] = (hsize_t) mpi_rank; - offset[1] = 0; + start[0] = (hsize_t) mpi_rank; + start[1] = 0; if (VERBOSE_MED) { - printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]); + printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); fflush(stdout); } @@ -1949,19 +1986,42 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void) filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), "Hyperslab selection succeeded"); - data = (COMPOUND_C_DATATYPE *) calloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data)); - VRFY((NULL != data), "calloc succeeded"); + data = (COMPOUND_C_DATATYPE *) HDcalloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data)); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); + + correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); /* Fill data buffer */ - memset(data, 0, sizeof(cmpd_filtered_t) * (size_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC); for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) { data[i].field1 = (short) GEN_DATA(i); data[i].field2 = (int) GEN_DATA(i); data[i].field3 = (long) GEN_DATA(i); - data[i].field4 = (double) GEN_DATA(i); + } + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { + correct_buf[i].field1 = (short) ( + (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1]))) + + (i % dataset_dims[1]) + + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]) + ); + + correct_buf[i].field2 = (int) ( + (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1]))) + + (i % dataset_dims[1]) + + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]) + ); + + correct_buf[i].field3 = (long) ( + (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1]))) + + (i % dataset_dims[1]) + + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]) + ); } /* Create property list for collective dataset write */ @@ -1974,7 +2034,25 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void) VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); - if (data) free(data); + if (data) HDfree(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + if (correct_buf) HDfree(correct_buf); + if (read_buf) HDfree(read_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -1991,29 +2069,30 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void) * chunks using a compound datatype which requires a * datatype conversion. * - * This test currently should fail because the datatype - * conversion causes the parallel library to break - * to independent I/O and this isn't allowed when + * NOTE: This test currently should fail because the + * datatype conversion causes the parallel library to + * break to independent I/O and this isn't allowed when * there are filters in the pipeline. * * Programmer: Jordan Henderson * 02/07/2017 */ -/* JTH: This test currently cannot be data-verified due to the floating-point data involved */ static void test_write_cmpd_filtered_dataset_type_conversion_unshared(void) { - cmpd_filtered_t *data = NULL; - hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t offset[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - size_t i; - hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1; - hid_t filespace = -1, memspace = -1; + COMPOUND_C_DATATYPE *data = NULL; + COMPOUND_C_DATATYPE *read_buf = NULL; + COMPOUND_C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + size_t i, correct_buf_size; + hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1; + hid_t filespace = -1, memspace = -1; if (MAINPROCESS) puts("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype conversion"); @@ -2057,16 +2136,14 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void) VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); /* Create the compound type for memory. */ - memtype = H5Tcreate(H5T_COMPOUND, sizeof(cmpd_filtered_t)); + memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); VRFY((memtype >= 0), "Datatype creation succeeded"); - VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(cmpd_filtered_t, field1), H5T_NATIVE_SHORT) >= 0), + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "IntData", HOFFSET(cmpd_filtered_t, field2), H5T_NATIVE_INT) >= 0), + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "LongData", HOFFSET(cmpd_filtered_t, field3), H5T_NATIVE_LONG) >= 0), - "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "DoubleData", HOFFSET(cmpd_filtered_t, field4), H5T_NATIVE_DOUBLE) >= 0), + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), "Datatype insertion succeeded"); /* Create the compound type for file. */ @@ -2079,8 +2156,6 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void) "Datatype insertion succeeded"); VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(filetype, "DoubleData", 24, H5T_IEEE_F64BE) >= 0), - "Datatype insertion succeeded"); dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -2098,12 +2173,12 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void) stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; block[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; - offset[0] = 0; - offset[1] = ((hsize_t) mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS); + start[0] = 0; + start[1] = ((hsize_t) mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS); if (VERBOSE_MED) { - printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]); + printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); fflush(stdout); } @@ -2111,19 +2186,22 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void) filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), "Hyperslab selection succeeded"); - data = (COMPOUND_C_DATATYPE *) calloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data)); - VRFY((NULL != data), "calloc succeeded"); + data = (COMPOUND_C_DATATYPE *) HDcalloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data)); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); + + correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); /* Fill data buffer */ - memset(data, 0, sizeof(cmpd_filtered_t) * (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC); for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) { data[i].field1 = (short) GEN_DATA(i); data[i].field2 = (int) GEN_DATA(i); data[i].field3 = (long) GEN_DATA(i); - data[i].field4 = (double) GEN_DATA(i); } /* Create property list for collective dataset write */ @@ -2139,7 +2217,25 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void) "Dataset write succeeded"); } H5E_END_TRY; - if (data) free(data); + if (data) HDfree(data); + + /* Verify that no data was written */ + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + if (correct_buf) HDfree(correct_buf); + if (read_buf) HDfree(read_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -2157,29 +2253,30 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void) * chunks using a compound datatype which requires * a datatype conversion. * - * This test currently should fail because the datatype - * conversion causes the parallel library to break - * to independent I/O and this isn't allowed when + * NOTE: This test currently should fail because the + * datatype conversion causes the parallel library to + * break to independent I/O and this isn't allowed when * there are filters in the pipeline. * * Programmer: Jordan Henderson * 02/10/2017 */ -/* JTH: This test currently cannot be data-verified due to the floating-point data involved */ static void test_write_cmpd_filtered_dataset_type_conversion_shared(void) { - cmpd_filtered_t *data = NULL; - hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t offset[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - size_t i; - hid_t file_id, dset_id, plist_id, filetype, memtype; - hid_t filespace, memspace; + COMPOUND_C_DATATYPE *data = NULL; + COMPOUND_C_DATATYPE *read_buf = NULL; + COMPOUND_C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + size_t i, correct_buf_size; + hid_t file_id, dset_id, plist_id, filetype, memtype; + hid_t filespace, memspace; if (MAINPROCESS) puts("Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion"); @@ -2223,16 +2320,14 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void) VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); /* Create the compound type for memory. */ - memtype = H5Tcreate(H5T_COMPOUND, sizeof(cmpd_filtered_t)); + memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); VRFY((memtype >= 0), "Datatype creation succeeded"); - VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(cmpd_filtered_t, field1), H5T_NATIVE_SHORT) >= 0), - "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "IntData", HOFFSET(cmpd_filtered_t, field2), H5T_NATIVE_INT) >= 0), + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "LongData", HOFFSET(cmpd_filtered_t, field3), H5T_NATIVE_LONG) >= 0), + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(memtype, "DoubleData", HOFFSET(cmpd_filtered_t, field4), H5T_NATIVE_DOUBLE) >= 0), + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), "Datatype insertion succeeded"); /* Create the compound type for file. */ @@ -2245,8 +2340,6 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void) "Datatype insertion succeeded"); VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); - VRFY((H5Tinsert(filetype, "DoubleData", 24, H5T_IEEE_F64BE) >= 0), - "Datatype insertion succeeded"); dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -2264,12 +2357,12 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void) stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; block[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size; block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; - offset[0] = (hsize_t) mpi_rank; - offset[1] = 0; + start[0] = (hsize_t) mpi_rank; + start[1] = 0; if (VERBOSE_MED) { - printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], offset[ %llu, %llu ], block size[ %llu, %llu ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], offset[0], offset[1], block[0], block[1]); + printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); fflush(stdout); } @@ -2277,19 +2370,22 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void) filespace = H5Dget_space(dset_id); VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), "Hyperslab selection succeeded"); - data = (COMPOUND_C_DATATYPE *) calloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data)); - VRFY((NULL != data), "calloc succeeded"); + data = (COMPOUND_C_DATATYPE *) HDcalloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data)); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); + + correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); /* Fill data buffer */ - memset(data, 0, sizeof(cmpd_filtered_t) * (size_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC); for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) { data[i].field1 = (short) GEN_DATA(i); data[i].field2 = (int) GEN_DATA(i); data[i].field3 = (long) GEN_DATA(i); - data[i].field4 = (double) GEN_DATA(i); } /* Create property list for collective dataset write */ @@ -2305,7 +2401,25 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void) "Dataset write succeeded"); } H5E_END_TRY; - if (data) free(data); + if (data) HDfree(data); + + /* Verify that no data was written */ + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + if (correct_buf) HDfree(correct_buf); + if (read_buf) HDfree(read_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -2356,8 +2470,8 @@ test_read_one_chunk_filtered_dataset(void) /* Setup the buffer for writing and for comparison */ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) correct_buf[i] = ((C_DATATYPE) i % (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) @@ -2470,24 +2584,24 @@ test_read_one_chunk_filtered_dataset(void) read_buf_size = flat_dims[0] * sizeof(*read_buf); - read_buf = (C_DATATYPE *) calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded"); - global_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != global_buf), "calloc succeeded"); + global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); /* Collect each piece of data from all ranks into a global buffer on all ranks */ - recvcounts = (int *) calloc(1, (size_t) mpi_size * sizeof(*recvcounts)); - VRFY((NULL != recvcounts), "calloc succeeded"); + recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); for (i = 0; i < (size_t) mpi_size; i++) recvcounts[i] = (int) flat_dims[0]; - displs = (int *) calloc(1, (size_t) mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); + displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); for (i = 0; i < (size_t) mpi_size; i++) displs[i] = (int) (i * flat_dims[0]); @@ -2498,11 +2612,11 @@ test_read_one_chunk_filtered_dataset(void) VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - if (displs) free(displs); - if (recvcounts) free(recvcounts); - if (global_buf) free(global_buf); - if (read_buf) free(read_buf); - if (correct_buf) free(correct_buf); + if (displs) HDfree(displs); + if (recvcounts) HDfree(recvcounts); + if (global_buf) HDfree(global_buf); + if (read_buf) HDfree(read_buf); + if (correct_buf) HDfree(correct_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -2551,8 +2665,8 @@ test_read_filtered_dataset_no_overlap(void) /* Setup the buffer for writing and for comparison */ correct_buf_size = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NROWS * (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS * sizeof(*correct_buf); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) correct_buf[i] = (C_DATATYPE) ( @@ -2667,24 +2781,24 @@ test_read_filtered_dataset_no_overlap(void) read_buf_size = flat_dims[0] * sizeof(*read_buf); - read_buf = (C_DATATYPE *) calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded"); - global_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != global_buf), "calloc succeeded"); + global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); /* Collect each piece of data from all ranks into a global buffer on all ranks */ - recvcounts = (int *) calloc(1, (size_t) mpi_size * sizeof(*recvcounts)); - VRFY((NULL != recvcounts), "calloc succeeded"); + recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); for (i = 0; i < (size_t) mpi_size; i++) recvcounts[i] = (int) flat_dims[0]; - displs = (int *) calloc(1, (size_t) mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); + displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); for (i = 0; i < (size_t) mpi_size; i++) displs[i] = (int) (i * flat_dims[0]); @@ -2695,11 +2809,11 @@ test_read_filtered_dataset_no_overlap(void) VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - if (displs) free(displs); - if (recvcounts) free(recvcounts); - if (global_buf) free(global_buf); - if (read_buf) free(read_buf); - if (correct_buf) free(correct_buf); + if (displs) HDfree(displs); + if (recvcounts) HDfree(recvcounts); + if (global_buf) HDfree(global_buf); + if (read_buf) HDfree(read_buf); + if (correct_buf) HDfree(correct_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -2749,8 +2863,8 @@ test_read_filtered_dataset_overlap(void) /* Setup the buffer for writing and for comparison */ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) correct_buf[i] = (C_DATATYPE) ( @@ -2866,14 +2980,14 @@ test_read_filtered_dataset_overlap(void) read_buf_size = flat_dims[0] * sizeof(*read_buf); - read_buf = (C_DATATYPE *) calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded"); - global_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != global_buf), "calloc succeeded"); + global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); /* * Since these chunks are shared, run multiple rounds of MPI_Allgatherv @@ -2885,11 +2999,11 @@ test_read_filtered_dataset_overlap(void) size_t loop_count = count[0]; size_t total_recvcounts = 0; - recvcounts = (int *) calloc(1, (size_t) mpi_size * sizeof(*recvcounts)); - VRFY((NULL != recvcounts), "calloc succeeded"); + recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); - displs = (int *) calloc(1, (size_t) mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); + displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); for (i = 0; i < (size_t) mpi_size; i++) { recvcounts[i] = (int) dataset_dims[1]; @@ -2909,11 +3023,11 @@ test_read_filtered_dataset_overlap(void) VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - if (displs) free(displs); - if (recvcounts) free(recvcounts); - if (global_buf) free(global_buf); - if (read_buf) free(read_buf); - if (correct_buf) free(correct_buf); + if (displs) HDfree(displs); + if (recvcounts) HDfree(recvcounts); + if (global_buf) HDfree(global_buf); + if (read_buf) HDfree(read_buf); + if (correct_buf) HDfree(correct_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -2964,8 +3078,8 @@ test_read_filtered_dataset_single_no_selection(void) /* Setup the buffer for writing and for comparison */ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) correct_buf[i] = @@ -3092,25 +3206,25 @@ test_read_filtered_dataset_single_no_selection(void) read_buf_size = flat_dims[0] * sizeof(*read_buf); - read_buf = (C_DATATYPE *) calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded"); - global_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != global_buf), "calloc succeeded"); + global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); /* Collect each piece of data from all ranks into a global buffer on all ranks */ - recvcounts = (int *) calloc(1, (size_t) mpi_size * sizeof(*recvcounts)); - VRFY((NULL != recvcounts), "calloc succeeded"); + recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); for (i = 0; i < (size_t) mpi_size; i++) recvcounts[i] = (int) (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS); recvcounts[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC] = 0; - displs = (int *) calloc(1, (size_t) mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); + displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); for (i = 0; i < (size_t) mpi_size; i++) displs[i] = (int) (i * (size_t) (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS)); @@ -3125,11 +3239,11 @@ test_read_filtered_dataset_single_no_selection(void) VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - if (displs) free(displs); - if (recvcounts) free(recvcounts); - if (global_buf) free(global_buf); - if (read_buf) free(read_buf); - if (correct_buf) free(correct_buf); + if (displs) HDfree(displs); + if (recvcounts) HDfree(recvcounts); + if (global_buf) HDfree(global_buf); + if (read_buf) HDfree(read_buf); + if (correct_buf) HDfree(correct_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -3172,8 +3286,8 @@ test_read_filtered_dataset_all_no_selection(void) /* Setup the buffer for writing and for comparison */ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); if (MAINPROCESS) { puts("Testing read from filtered chunks with all processes having no selection"); @@ -3258,14 +3372,14 @@ test_read_filtered_dataset_all_no_selection(void) read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf); - read_buf = (C_DATATYPE *) calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded"); - if (read_buf) free(read_buf); - if (correct_buf) free(correct_buf); + if (read_buf) HDfree(read_buf); + if (correct_buf) HDfree(correct_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -3313,8 +3427,8 @@ test_read_filtered_dataset_point_selection(void) /* Setup the buffer for writing and for comparison */ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) correct_buf[i] = (C_DATATYPE) ( @@ -3400,8 +3514,8 @@ test_read_filtered_dataset_point_selection(void) VRFY((filespace >= 0), "File dataspace retrieval succeeded"); num_points = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS * (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) mpi_size; - coords = (hsize_t *) calloc(1, 2 * num_points * sizeof(*coords)); - VRFY((NULL != coords), "Coords calloc succeeded"); + coords = (hsize_t *) HDcalloc(1, 2 * num_points * sizeof(*coords)); + VRFY((NULL != coords), "Coords HDcalloc succeeded"); for (i = 0; i < num_points; i++) for (j = 0; j < READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++) @@ -3421,14 +3535,14 @@ test_read_filtered_dataset_point_selection(void) read_buf_size = flat_dims[0] * sizeof(*read_buf); - read_buf = (C_DATATYPE *) calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded"); - global_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != global_buf), "calloc succeeded"); + global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); /* * Since these chunks are shared, run multiple rounds of MPI_Allgatherv @@ -3441,11 +3555,11 @@ test_read_filtered_dataset_point_selection(void) size_t cur_loop_count = original_loop_count; size_t total_recvcounts = 0; - recvcounts = (int *) calloc(1, (size_t) mpi_size * sizeof(*recvcounts)); - VRFY((NULL != recvcounts), "calloc succeeded"); + recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); - displs = (int *) calloc(1, (size_t) mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); + displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); for (i = 0; i < (size_t) mpi_size; i++) { recvcounts[i] = (int) dataset_dims[1]; @@ -3465,11 +3579,11 @@ test_read_filtered_dataset_point_selection(void) VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - if (displs) free(displs); - if (recvcounts) free(recvcounts); - if (global_buf) free(global_buf); - if (read_buf) free(read_buf); - if (correct_buf) free(correct_buf); + if (displs) HDfree(displs); + if (recvcounts) HDfree(recvcounts); + if (global_buf) HDfree(global_buf); + if (read_buf) HDfree(read_buf); + if (correct_buf) HDfree(correct_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -3483,7 +3597,10 @@ test_read_filtered_dataset_point_selection(void) /* * Tests parallel read of filtered data in the case where * each process reads an equal amount of data from each - * chunk in the dataset. + * chunk in the dataset. Each chunk is distributed among the + * processes in round-robin fashion by blocks of size 1 until + * the whole chunk is selected, leading to an interleaved + * read pattern. * * The MAINPROCESS rank will first write out all of the * data to the dataset. Then, each rank will read part @@ -3519,8 +3636,8 @@ test_read_filtered_dataset_interleaved_read(void) /* Setup the buffer for writing and for comparison */ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) /* Add Column Index */ @@ -3642,14 +3759,14 @@ test_read_filtered_dataset_interleaved_read(void) read_buf_size = flat_dims[0] * sizeof(*read_buf); - read_buf = (C_DATATYPE *) calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded"); - global_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != global_buf), "calloc succeeded"); + global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); /* * Since these chunks are shared, run multiple rounds of MPI_Allgatherv @@ -3661,11 +3778,11 @@ test_read_filtered_dataset_interleaved_read(void) size_t loop_count = count[0]; size_t total_recvcounts = 0; - recvcounts = (int *) calloc(1, (size_t) mpi_size * sizeof(*recvcounts)); - VRFY((NULL != recvcounts), "calloc succeeded"); + recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); - displs = (int *) calloc(1, (size_t) mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); + displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); for (i = 0; i < (size_t) mpi_size; i++) { recvcounts[i] = (int) dataset_dims[1]; @@ -3685,11 +3802,11 @@ test_read_filtered_dataset_interleaved_read(void) VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - if (displs) free(displs); - if (recvcounts) free(recvcounts); - if (global_buf) free(global_buf); - if (read_buf) free(read_buf); - if (correct_buf) free(correct_buf); + if (displs) HDfree(displs); + if (recvcounts) HDfree(recvcounts); + if (global_buf) HDfree(global_buf); + if (read_buf) HDfree(read_buf); + if (correct_buf) HDfree(correct_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -3700,24 +3817,655 @@ test_read_filtered_dataset_interleaved_read(void) return; } +/* + * Tests parallel read of filtered data in the case where + * the dataset has 3 dimensions and each process reads from + * its own "page" in the 3rd dimension. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads its own "page" + * of the dataset and contributes its piece to a global buffer + * that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/16/2018 + */ static void test_read_3d_filtered_dataset_no_overlap_separate_pages(void) { + MPI_Datatype vector_type; + MPI_Datatype resized_vector_type; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t start[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t stride[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = -1, dset_id = -1, plist_id = -1; + hid_t filespace = -1, memspace = -1; + + dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS; + dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS; + dataset_dims[2] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH; -} + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); -static void -test_read_3d_filtered_dataset_no_overlap_same_pages(void) -{ + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); -} + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (C_DATATYPE) ((i % (hsize_t) mpi_size) + (i / (hsize_t) mpi_size)); -static void -test_read_3d_filtered_dataset_overlap(void) -{ + if (MAINPROCESS) { + puts("Testing read from unshared filtered chunks on separate pages in 3D dataset"); -} + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; + chunk_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; + chunk_dims[2] = 1; + + plist_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((plist_id >= 0), "DCPL creation succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + + dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + /* Set up file access property list with parallel I/O access */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS; + sel_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS; + sel_dims[2] = 1; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; + count[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; + count[2] = 1; + stride[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; + stride[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; + stride[2] = 1; + block[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; + block[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; + block[2] = 1; + start[0] = 0; + start[1] = 0; + start[2] = (hsize_t) mpi_rank; + + if (VERBOSE_MED) { + printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Create property list for collective dataset read */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "DXPL creation succeeded"); + + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* + * Due to the nature of 3-dimensional reading, create an MPI vector type that allows each + * rank to write to the nth position of the global data buffer, where n is the rank number. + */ + VRFY((MPI_SUCCESS == MPI_Type_vector((int) flat_dims[0], 1, mpi_size, C_DATATYPE_MPI, &vector_type)), + "MPI_Type_vector succeeded"); + VRFY((MPI_SUCCESS == MPI_Type_commit(&vector_type)), "MPI_Type_commit succeeded"); + + /* + * Resize the type to allow interleaving, + * so make it only one MPI_LONG wide + */ + VRFY((MPI_SUCCESS == MPI_Type_create_resized(vector_type, 0, sizeof(long), &resized_vector_type)), + "MPI_Type_create_resized"); + VRFY((MPI_SUCCESS == MPI_Type_commit(&resized_vector_type)), "MPI_Type_commit succeeded"); + + VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, 1, resized_vector_type, comm)), + "MPI_Allgather succeeded"); + + VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded"); + VRFY((MPI_SUCCESS == MPI_Type_free(&resized_vector_type)), "MPI_Type_free succeeded"); + + if (global_buf) HDfree(global_buf); + if (read_buf) HDfree(read_buf); + if (correct_buf) HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data in the case where + * the dataset has 3 dimensions and each process reads from + * each "page" in the 3rd dimension. However, no chunk on a + * given "page" is read from by more than one process. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads a part of + * each "page" of the dataset and contributes its piece to a + * global buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/16/2018 + */ +static void +test_read_3d_filtered_dataset_no_overlap_same_pages(void) +{ + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t start[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t stride[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id, dset_id, plist_id; + hid_t filespace, memspace; + int *recvcounts = NULL; + int *displs = NULL; + + dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS; + dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS; + dataset_dims[2] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (C_DATATYPE) ( + (i % (dataset_dims[0] * dataset_dims[1])) + + (i / (dataset_dims[0] * dataset_dims[1])) + ); + + if (MAINPROCESS) { + puts("Testing read from unshared filtered chunks on the same pages in 3D dataset"); + + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; + chunk_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; + chunk_dims[2] = 1; + + plist_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((plist_id >= 0), "DCPL creation succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + + dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + /* Set up file access property list with parallel I/O access */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; + sel_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS; + sel_dims[2] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = 1; + count[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; + count[2] = (hsize_t) mpi_size; + stride[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; + stride[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; + stride[2] = 1; + block[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; + block[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; + block[2] = 1; + start[0] = ((hsize_t) mpi_rank * (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS * count[0]); + start[1] = 0; + start[2] = 0; + + if (VERBOSE_MED) { + printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Create property list for collective dataset read */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "DXPL creation succeeded"); + + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + for (i = 0; i < (size_t) mpi_size; i++) + recvcounts[i] = (int) flat_dims[0]; + + displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t) mpi_size; i++) + displs[i] = (int) (i * flat_dims[0]); + + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + if (displs) HDfree(displs); + if (recvcounts) HDfree(recvcounts); + if (global_buf) HDfree(global_buf); + if (read_buf) HDfree(read_buf); + if (correct_buf) HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data in the case where + * the dataset has 3 dimensions and each process reads from + * each "page" in the 3rd dimension. Further, each chunk in + * each "page" is read from equally by all processes. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads part of each + * chunk of each "page" and contributes its pieces to a + * global buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/16/2018 + */ +static void +test_read_3d_filtered_dataset_overlap(void) +{ + MPI_Datatype vector_type; + MPI_Datatype resized_vector_type; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t chunk_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t sel_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t start[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t stride[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t count[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t block[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = -1, dset_id = -1, plist_id = -1; + hid_t filespace = -1, memspace = -1; + + dataset_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_NROWS; + dataset_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_NCOLS; + dataset_dims[2] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_DEPTH; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + /* Add the Column Index */ + correct_buf[i] = + (C_DATATYPE) ( + (i % (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) + + /* Add the Row Index */ + + ((i % (hsize_t) (mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) + / (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) + + /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */ + + ((hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS) + * (i / (hsize_t) (mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))) + ); + + if (MAINPROCESS) { + puts("Testing read from shared filtered chunks in 3D dataset"); + + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS; + chunk_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; + chunk_dims[2] = 1; + + plist_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((plist_id >= 0), "DCPL creation succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + + dset_id = H5Dcreate2(file_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + /* Set up file access property list with parallel I/O access */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + dset_id = H5Dopen2(file_id, "/" READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size); + sel_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_NCOLS; + sel_dims[2] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_DEPTH; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_NROWS / READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS); + count[1] = (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_NCOLS / READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS); + count[2] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_DEPTH; + stride[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS; + stride[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; + stride[2] = 1; + block[0] = 1; + block[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; + block[2] = 1; + start[0] = (hsize_t) mpi_rank; + start[1] = 0; + start[2] = 0; + + if (VERBOSE_MED) { + printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Create property list for collective dataset read */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "DXPL creation succeeded"); + + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + { + size_t run_length = (size_t) (READ_SHARED_FILTERED_CHUNKS_3D_NCOLS * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH); + size_t num_blocks = (size_t) (READ_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size); + + /* + * Due to the nature of 3-dimensional reading, create an MPI vector type that allows each + * rank to write to the nth position of the global data buffer, where n is the rank number. + */ + VRFY((MPI_SUCCESS == MPI_Type_vector((int) num_blocks, (int) run_length, (int) (mpi_size * (int) run_length), C_DATATYPE_MPI, &vector_type)), + "MPI_Type_vector succeeded"); + VRFY((MPI_SUCCESS == MPI_Type_commit(&vector_type)), "MPI_Type_commit succeeded"); + + /* + * Resize the type to allow interleaving, + * so make it "run_length" MPI_LONGs wide + */ + VRFY((MPI_SUCCESS == MPI_Type_create_resized(vector_type, 0, (MPI_Aint) (run_length * sizeof(long)), &resized_vector_type)), + "MPI_Type_create_resized"); + VRFY((MPI_SUCCESS == MPI_Type_commit(&resized_vector_type)), "MPI_Type_commit succeeded"); + } + + VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, 1, resized_vector_type, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded"); + VRFY((MPI_SUCCESS == MPI_Type_free(&resized_vector_type)), "MPI_Type_free succeeded"); + + if (global_buf) HDfree(global_buf); + if (read_buf) HDfree(read_buf); + if (correct_buf) HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data to unshared + * chunks using a compound datatype which doesn't + * require a datatype conversion. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads a part of + * the dataset and contributes its piece to a global + * buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/16/2018 + */ static void test_read_cmpd_filtered_dataset_no_conversion_unshared(void) { @@ -3811,8 +4559,8 @@ test_write_serial_read_parallel(void) data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*data); - data = (C_DATATYPE *) calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + data = (C_DATATYPE *) HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); for (i = 0; i < data_size / sizeof(*data); i++) data[i] = (C_DATATYPE) GEN_DATA(i); @@ -3820,7 +4568,7 @@ test_write_serial_read_parallel(void) VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) >= 0), "Dataset write succeeded"); - if (data) free(data); + if (data) HDfree(data); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -3828,11 +4576,11 @@ test_write_serial_read_parallel(void) correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); - read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) correct_buf[i] = (long) i; @@ -3867,8 +4615,8 @@ test_write_serial_read_parallel(void) VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - if (correct_buf) free(correct_buf); - if (read_buf) free(read_buf); + if (correct_buf) HDfree(correct_buf); + if (read_buf) HDfree(read_buf); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); @@ -3987,8 +4735,8 @@ test_write_parallel_read_serial(void) /* Fill data buffer */ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data); - data = (C_DATATYPE *) calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + data = (C_DATATYPE *) HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); for (i = 0; i < data_size / sizeof(*data); i++) data[i] = (C_DATATYPE) GEN_DATA(i); @@ -4003,7 +4751,7 @@ test_write_parallel_read_serial(void) VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded"); - if (data) free(data); + if (data) HDfree(data); VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -4028,11 +4776,11 @@ test_write_parallel_read_serial(void) correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); - correct_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); - read_buf = (C_DATATYPE *) calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) correct_buf[i] = (C_DATATYPE) ( diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h index 43d471a..df989e5 100644 --- a/testpar/t_filters_parallel.h +++ b/testpar/t_filters_parallel.h @@ -155,7 +155,6 @@ typedef struct { short field1; int field2; long field3; - double field4; } COMPOUND_C_DATATYPE; /* Defines for the compound datatype filtered dataset no conversion write test with unshared chunks */ @@ -251,6 +250,33 @@ typedef struct { #define INTERLEAVED_READ_FILTERED_DATASET_NROWS (INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS * DIM0_SCALE_FACTOR) #define INTERLEAVED_READ_FILTERED_DATASET_NCOLS (INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS * DIM1_SCALE_FACTOR) +/* Defines for the 3D unshared filtered dataset separate page read test */ +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_separate_pages_read" +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS 3 +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH (mpi_size) +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS (READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / mpi_size) +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS (READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / mpi_size) + +/* Defines for the 3D unshared filtered dataset same page read test */ +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_same_pages_read" +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS 3 +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH (mpi_size) +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS (READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS / mpi_size) +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS (READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / mpi_size) + +/* Defines for the 3d shared filtered dataset read test */ +#define READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME "3D_shared_filtered_chunks_read" +#define READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS 3 +#define READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS (mpi_size) +#define READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS (DIM1_SCALE_FACTOR) +#define READ_SHARED_FILTERED_CHUNKS_3D_NROWS (READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS * DIM0_SCALE_FACTOR) +#define READ_SHARED_FILTERED_CHUNKS_3D_NCOLS (READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS * DIM1_SCALE_FACTOR) +#define READ_SHARED_FILTERED_CHUNKS_3D_DEPTH (mpi_size) + /* Defines for the write file serially/read in parallel test */ #define WRITE_SERIAL_READ_PARALLEL_DATASET_NAME "write_serial_read_parallel" #define WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS 3 -- cgit v0.12 From 6e37dff71f3305ef664b4bfd1e81c1b78b7692d0 Mon Sep 17 00:00:00 2001 From: Jordan Henderson Date: Thu, 17 May 2018 11:07:23 -0500 Subject: Fix bug in parallel reads of compressed data Add remaining parallel compound dataset partial read tests --- src/H5Dmpio.c | 41 +- testpar/t_filters_parallel.c | 895 ++++++++++++++++++++++++++++++++++++++++++- testpar/t_filters_parallel.h | 50 ++- 3 files changed, 962 insertions(+), 24 deletions(-) diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 107b751..5e33384 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -2981,6 +2981,7 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk { H5D_chunk_info_t *chunk_info = NULL; H5S_sel_iter_t *mem_iter = NULL; /* Memory iterator for H5D__scatter_mem/H5D__gather_mem */ + H5S_sel_iter_t *file_iter = NULL; unsigned char *mod_data = NULL; /* Chunk modification data sent by a process to a chunk's owner */ H5Z_EDC_t err_detect; /* Error detection info */ H5Z_cb_t filter_cb; /* I/O filter callback function */ @@ -2989,11 +2990,13 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk hssize_t extent_npoints; hsize_t true_chunk_size; hbool_t mem_iter_init = FALSE; + hbool_t file_iter_init = FALSE; size_t buf_size; size_t i; H5S_t *dataspace = NULL; /* Other process' dataspace for the chunk */ - void *tmp_gath_buf = NULL; /* Temporary gather buffer for owner of the chunk to gather into from - application write buffer before scattering out to the chunk data buffer */ + void *tmp_gath_buf = NULL; /* Temporary gather buffer to gather into from application buffer + before scattering out to the chunk data buffer (when writing data), + or vice versa (when reading data) */ int mpi_code; herr_t ret_value = SUCCEED; @@ -3073,9 +3076,6 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize memory selection information") mem_iter_init = TRUE; - if ((iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->mspace)) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "dataspace is invalid") - /* If this is a read operation, scatter the read chunk data to the user's buffer. * * If this is a write operation, update the chunk data buffer with the modifications @@ -3084,11 +3084,34 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk */ switch (io_info->op_type) { case H5D_IO_OP_READ: - if (H5D__scatter_mem(chunk_entry->buf, chunk_info->mspace, mem_iter, (size_t)iter_nelmts, io_info->u.rbuf) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't scatter to read buffer") + if (NULL == (file_iter = (H5S_sel_iter_t *) H5MM_malloc(sizeof(H5S_sel_iter_t)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate file iterator") + + if (H5S_select_iter_init(file_iter, chunk_info->fspace, type_info->src_type_size) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize memory selection information") + file_iter_init = TRUE; + + if ((iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->fspace)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "dataspace is invalid") + + if (NULL == (tmp_gath_buf = H5MM_malloc((hsize_t) iter_nelmts * type_info->src_type_size))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate temporary gather buffer") + + if (!H5D__gather_mem(chunk_entry->buf, chunk_info->fspace, file_iter, iter_nelmts, tmp_gath_buf)) + HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "couldn't gather from chunk buffer") + + if ((iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->mspace)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "dataspace is invalid") + + if (H5D__scatter_mem(tmp_gath_buf, chunk_info->mspace, mem_iter, (size_t)iter_nelmts, io_info->u.rbuf) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't scatter to read buffer") + break; case H5D_IO_OP_WRITE: + if ((iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->mspace)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "dataspace is invalid") + if (NULL == (tmp_gath_buf = H5MM_malloc((hsize_t) iter_nelmts * type_info->src_type_size))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate temporary gather buffer") @@ -3185,6 +3208,10 @@ done: HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator") if (mem_iter) H5MM_free(mem_iter); + if (file_iter_init && H5S_SELECT_ITER_RELEASE(file_iter) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator") + if (file_iter) + H5MM_free(file_iter); if (dataspace) if (H5S_close(dataspace) < 0) HDONE_ERROR(H5E_DATASPACE, H5E_CANTFREE, FAIL, "can't close dataspace") diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c index 886b544..2c639ae 100644 --- a/testpar/t_filters_parallel.c +++ b/testpar/t_filters_parallel.c @@ -1887,7 +1887,6 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void) * Programmer: Jordan Henderson * 02/10/2017 */ -/* JTH: This test currently cannot be data-verified due to the floating-point data involved */ static void test_write_cmpd_filtered_dataset_no_conversion_shared(void) { @@ -4464,30 +4463,906 @@ test_read_3d_filtered_dataset_overlap(void) * buffer that is checked for consistency. * * Programmer: Jordan Henderson - * 05/16/2018 + * 05/17/2018 */ static void test_read_cmpd_filtered_dataset_no_conversion_unshared(void) { + COMPOUND_C_DATATYPE *read_buf = NULL; + COMPOUND_C_DATATYPE *correct_buf = NULL; + COMPOUND_C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1; + hid_t filespace = -1, memspace = -1; + int *recvcounts = NULL; + int *displs = NULL; + + dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS; + dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { + correct_buf[i].field1 = (short) ( + (i % dataset_dims[1]) + + (i / dataset_dims[1]) + ); + + correct_buf[i].field2 = (int) ( + (i % dataset_dims[1]) + + (i / dataset_dims[1]) + ); + + correct_buf[i].field3 = (long) ( + (i % dataset_dims[1]) + + (i / dataset_dims[1]) + ); + } + + /* Create the compound type for memory. */ + memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); + VRFY((memtype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), + "Datatype insertion succeeded"); + + if (MAINPROCESS) { + puts("Testing read from unshared filtered chunks in Compound Datatype dataset without Datatype conversion"); + + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; + chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; + + plist_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((plist_id >= 0), "DCPL creation succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + + dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + /* Set up file access property list with parallel I/O access */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; + sel_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = 1; + count[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; + stride[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; + stride[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; + block[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; + block[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; + start[0] = 0; + start[1] = ((hsize_t) mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS); + + if (VERBOSE_MED) { + printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Create property list for collective dataset read */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "DXPL creation succeeded"); + + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + for (i = 0; i < (size_t) mpi_size; i++) + recvcounts[i] = (int) (flat_dims[0] * sizeof(*read_buf)); + + displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t) mpi_size; i++) + displs[i] = (int) (i * flat_dims[0] * sizeof(*read_buf)); + + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + if (displs) HDfree(displs); + if (recvcounts) HDfree(recvcounts); + if (global_buf) HDfree(global_buf); + if (read_buf) HDfree(read_buf); + if (correct_buf) HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + return; } +/* + * Tests parallel read of filtered data from shared + * chunks using a compound datatype which doesn't + * require a datatype conversion. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads a part of + * each chunk of the dataset and contributes its piece + * to a global buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/17/2018 + */ static void test_read_cmpd_filtered_dataset_no_conversion_shared(void) { + COMPOUND_C_DATATYPE *read_buf = NULL; + COMPOUND_C_DATATYPE *correct_buf = NULL; + COMPOUND_C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id, dset_id, plist_id, memtype; + hid_t filespace, memspace; + int *recvcounts = NULL; + int *displs = NULL; -} + dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS; + dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS; -static void -test_read_cmpd_filtered_dataset_type_conversion_unshared(void) -{ + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); -} + correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); -static void -test_read_cmpd_filtered_dataset_type_conversion_shared(void) -{ + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { + correct_buf[i].field1 = (short) ( + (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1]))) + + (i % dataset_dims[1]) + + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]) + ); + + correct_buf[i].field2 = (int) ( + (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1]))) + + (i % dataset_dims[1]) + + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]) + ); + + correct_buf[i].field3 = (long) ( + (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1]))) + + (i % dataset_dims[1]) + + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]) + ); + } + + /* Create the compound type for memory. */ + memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); + VRFY((memtype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), + "Datatype insertion succeeded"); + + if (MAINPROCESS) { + puts("Testing read from shared filtered chunks in Compound Datatype dataset without Datatype conversion"); + + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS; + chunk_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; + + plist_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((plist_id >= 0), "DCPL creation succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + + dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + /* Set up file access property list with parallel I/O access */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size; + sel_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = 1; + count[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; + stride[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS; + stride[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; + block[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size; + block[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; + start[0] = (hsize_t) mpi_rank; + start[1] = 0; + + if (VERBOSE_MED) { + printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Create property list for collective dataset read */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "DXPL creation succeeded"); + + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + for (i = 0; i < (size_t) mpi_size; i++) + recvcounts[i] = (int) (flat_dims[0] * sizeof(*read_buf)); + + displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t) mpi_size; i++) + displs[i] = (int) (i * flat_dims[0] * sizeof(*read_buf)); + + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + if (displs) HDfree(displs); + if (recvcounts) HDfree(recvcounts); + if (global_buf) HDfree(global_buf); + if (read_buf) HDfree(read_buf); + if (correct_buf) HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data from unshared + * chunks using a compound datatype which requires a + * datatype conversion. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads a part of + * the dataset and contributes its piece to a global + * buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/17/2018 + */ +static void +test_read_cmpd_filtered_dataset_type_conversion_unshared(void) +{ + COMPOUND_C_DATATYPE *read_buf = NULL; + COMPOUND_C_DATATYPE *correct_buf = NULL; + COMPOUND_C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1; + hid_t filespace = -1, memspace = -1; + int *recvcounts = NULL; + int *displs = NULL; + + dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS; + dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { + correct_buf[i].field1 = (short) ( + (i % dataset_dims[1]) + + (i / dataset_dims[1]) + ); + + correct_buf[i].field2 = (int) ( + (i % dataset_dims[1]) + + (i / dataset_dims[1]) + ); + + correct_buf[i].field3 = (long) ( + (i % dataset_dims[1]) + + (i / dataset_dims[1]) + ); + } + + /* Create the compound type for memory. */ + memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); + VRFY((memtype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), + "Datatype insertion succeeded"); + + /* Create the compound type for file. */ + filetype = H5Tcreate(H5T_COMPOUND, 32); + VRFY((filetype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), + "Datatype insertion succeeded"); + + if (MAINPROCESS) { + puts("Testing read from unshared filtered chunks in Compound Datatype dataset with Datatype conversion"); + + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; + chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; + + plist_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((plist_id >= 0), "DCPL creation succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + + dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, filetype, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + /* Set up file access property list with parallel I/O access */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; + sel_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = 1; + count[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; + stride[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; + stride[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; + block[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; + block[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; + start[0] = 0; + start[1] = ((hsize_t) mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS); + + if (VERBOSE_MED) { + printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Create property list for collective dataset read */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "DXPL creation succeeded"); + + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + for (i = 0; i < (size_t) mpi_size; i++) + recvcounts[i] = (int) (flat_dims[0] * sizeof(*read_buf)); + + displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t) mpi_size; i++) + displs[i] = (int) (i * flat_dims[0] * sizeof(*read_buf)); + + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + if (displs) HDfree(displs); + if (recvcounts) HDfree(recvcounts); + if (global_buf) HDfree(global_buf); + if (read_buf) HDfree(read_buf); + if (correct_buf) HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded"); + VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data from shared + * chunks using a compound datatype which requires + * a datatype conversion. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads a part of + * each chunk of the dataset and contributes its pieces + * to a global buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/17/2018 + */ +static void +test_read_cmpd_filtered_dataset_type_conversion_shared(void) +{ + COMPOUND_C_DATATYPE *read_buf = NULL; + COMPOUND_C_DATATYPE *correct_buf = NULL; + COMPOUND_C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id, dset_id, plist_id, filetype, memtype; + hid_t filespace, memspace; + int *recvcounts = NULL; + int *displs = NULL; + + dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS; + dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { + correct_buf[i].field1 = (short) ( + (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1]))) + + (i % dataset_dims[1]) + + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]) + ); + + correct_buf[i].field2 = (int) ( + (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1]))) + + (i % dataset_dims[1]) + + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]) + ); + + correct_buf[i].field3 = (long) ( + (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1]))) + + (i % dataset_dims[1]) + + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]) + ); + } + + /* Create the compound type for memory. */ + memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); + VRFY((memtype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), + "Datatype insertion succeeded"); + + /* Create the compound type for file. */ + filetype = H5Tcreate(H5T_COMPOUND, 32); + VRFY((filetype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), + "Datatype insertion succeeded"); + + if (MAINPROCESS) { + puts("Testing read from shared filtered chunks in Compound Datatype dataset with Datatype conversion"); + + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS; + chunk_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; + + plist_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((plist_id >= 0), "DCPL creation succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + + dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, filetype, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + /* Set up file access property list with parallel I/O access */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size; + sel_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = 1; + count[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; + stride[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS; + stride[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; + block[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size; + block[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; + start[0] = (hsize_t) mpi_rank; + start[1] = 0; + + if (VERBOSE_MED) { + printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Create property list for collective dataset read */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "DXPL creation succeeded"); + + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + for (i = 0; i < (size_t) mpi_size; i++) + recvcounts[i] = (int) (flat_dims[0] * sizeof(*read_buf)); + + displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t) mpi_size; i++) + displs[i] = (int) (i * flat_dims[0] * sizeof(*read_buf)); + + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), + "Data verification succeeded"); + + if (displs) HDfree(displs); + if (recvcounts) HDfree(recvcounts); + if (global_buf) HDfree(global_buf); + if (read_buf) HDfree(read_buf); + if (correct_buf) HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; } /* diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h index df989e5..4844abd 100644 --- a/testpar/t_filters_parallel.h +++ b/testpar/t_filters_parallel.h @@ -66,6 +66,13 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES; #define DIM0_SCALE_FACTOR 4 #define DIM1_SCALE_FACTOR 2 +/* Struct type for the compound datatype filtered dataset tests */ +typedef struct { + short field1; + int field2; + long field3; +} COMPOUND_C_DATATYPE; + /* Defines for the one-chunk filtered dataset write test */ #define WRITE_ONE_CHUNK_FILTERED_DATASET_NAME "one_chunk_filtered_dataset_write" #define WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS 2 @@ -150,13 +157,6 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES; #define WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS (WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS * DIM1_SCALE_FACTOR) #define WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH (mpi_size) -/* Struct type for the compound datatype filtered dataset tests */ -typedef struct { - short field1; - int field2; - long field3; -} COMPOUND_C_DATATYPE; - /* Defines for the compound datatype filtered dataset no conversion write test with unshared chunks */ #define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_no_conversion_write" #define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS 2 @@ -277,6 +277,42 @@ typedef struct { #define READ_SHARED_FILTERED_CHUNKS_3D_NCOLS (READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS * DIM1_SCALE_FACTOR) #define READ_SHARED_FILTERED_CHUNKS_3D_DEPTH (mpi_size) +/* Defines for the compound datatype filtered dataset no conversion read test with unshared chunks */ +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_no_conversion_read" +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS 2 +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS 1 +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS mpi_size +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS 1 +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS 1 +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC (READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS / mpi_size) + +/* Defines for the compound datatype filtered dataset no conversion read test with shared chunks */ +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_no_conversion_read" +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS 2 +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS mpi_size +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS mpi_size +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS mpi_size +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS 1 +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS + +/* Defines for the compound datatype filtered dataset type conversion read test with unshared chunks */ +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_type_conversion_read" +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS 2 +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS 1 +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS mpi_size +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS 1 +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS 1 +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC (READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS / mpi_size) + +/* Defines for the compound datatype filtered dataset type conversion read test with shared chunks */ +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_type_conversion_read" +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS 2 +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS mpi_size +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS mpi_size +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS mpi_size +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS 1 +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS + /* Defines for the write file serially/read in parallel test */ #define WRITE_SERIAL_READ_PARALLEL_DATASET_NAME "write_serial_read_parallel" #define WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS 3 -- cgit v0.12 From eb53d09bb28618a95fae68e42f22b99a477f9625 Mon Sep 17 00:00:00 2001 From: Jordan Henderson Date: Thu, 17 May 2018 11:14:52 -0500 Subject: Remove unused local variable Ensure frees are done in a more heap-fragmentation friendly order --- src/H5Dmpio.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 5e33384..6f6d409 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -2982,7 +2982,6 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk H5D_chunk_info_t *chunk_info = NULL; H5S_sel_iter_t *mem_iter = NULL; /* Memory iterator for H5D__scatter_mem/H5D__gather_mem */ H5S_sel_iter_t *file_iter = NULL; - unsigned char *mod_data = NULL; /* Chunk modification data sent by a process to a chunk's owner */ H5Z_EDC_t err_detect; /* Error detection info */ H5Z_cb_t filter_cb; /* I/O filter callback function */ unsigned filter_mask = 0; @@ -3200,18 +3199,16 @@ done: H5MM_free(chunk_entry->async_info.receive_buffer_array); if (chunk_entry->async_info.receive_requests_array) H5MM_free(chunk_entry->async_info.receive_requests_array); - if (mod_data) - H5MM_free(mod_data); if (tmp_gath_buf) H5MM_free(tmp_gath_buf); - if (mem_iter_init && H5S_SELECT_ITER_RELEASE(mem_iter) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator") - if (mem_iter) - H5MM_free(mem_iter); if (file_iter_init && H5S_SELECT_ITER_RELEASE(file_iter) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator") if (file_iter) H5MM_free(file_iter); + if (mem_iter_init && H5S_SELECT_ITER_RELEASE(mem_iter) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator") + if (mem_iter) + H5MM_free(mem_iter); if (dataspace) if (H5S_close(dataspace) < 0) HDONE_ERROR(H5E_DATASPACE, H5E_CANTFREE, FAIL, "can't close dataspace") -- cgit v0.12 From ee1ed5c0640a95808d65caf8e3f85ee7afcc21f6 Mon Sep 17 00:00:00 2001 From: Jordan Henderson Date: Thu, 17 May 2018 11:17:05 -0500 Subject: Eliminate warning about signed to unsigned conversion --- src/H5Dmpio.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 6f6d409..887a05f 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -3096,13 +3096,13 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk if (NULL == (tmp_gath_buf = H5MM_malloc((hsize_t) iter_nelmts * type_info->src_type_size))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate temporary gather buffer") - if (!H5D__gather_mem(chunk_entry->buf, chunk_info->fspace, file_iter, iter_nelmts, tmp_gath_buf)) + if (!H5D__gather_mem(chunk_entry->buf, chunk_info->fspace, file_iter, (size_t) iter_nelmts, tmp_gath_buf)) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "couldn't gather from chunk buffer") if ((iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->mspace)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "dataspace is invalid") - if (H5D__scatter_mem(tmp_gath_buf, chunk_info->mspace, mem_iter, (size_t)iter_nelmts, io_info->u.rbuf) < 0) + if (H5D__scatter_mem(tmp_gath_buf, chunk_info->mspace, mem_iter, (size_t) iter_nelmts, io_info->u.rbuf) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't scatter to read buffer") break; @@ -3115,7 +3115,7 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate temporary gather buffer") /* Gather modification data from the application write buffer into a temporary buffer */ - if(!H5D__gather_mem(io_info->u.wbuf, chunk_info->mspace, mem_iter, (size_t)iter_nelmts, tmp_gath_buf)) + if(!H5D__gather_mem(io_info->u.wbuf, chunk_info->mspace, mem_iter, (size_t) iter_nelmts, tmp_gath_buf)) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "couldn't gather from write buffer") if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0) @@ -3133,7 +3133,7 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk /* Scatter the owner's modification data into the chunk data buffer according to * the file space. */ - if(H5D__scatter_mem(tmp_gath_buf, chunk_info->fspace, mem_iter, (size_t)iter_nelmts, chunk_entry->buf) < 0) + if(H5D__scatter_mem(tmp_gath_buf, chunk_info->fspace, mem_iter, (size_t) iter_nelmts, chunk_entry->buf) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't scatter to chunk data buffer") if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0) -- cgit v0.12 From 12128a478218652607e8319e3a3f4b03a4b0b65f Mon Sep 17 00:00:00 2001 From: Vailin Choi Date: Fri, 1 Jun 2018 11:27:00 -0500 Subject: Fix daily test failure. --- java/src/jni/h5oImp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/java/src/jni/h5oImp.c b/java/src/jni/h5oImp.c index d9276da..7665c70 100644 --- a/java/src/jni/h5oImp.c +++ b/java/src/jni/h5oImp.c @@ -442,7 +442,7 @@ Java_hdf_hdf5lib_H5_H5Ovisit h5nullArgument(env, "H5Ovisit: callback_op is NULL"); } /* end if */ else { - status = H5Ovisit((hid_t)grp_id, (H5_index_t)idx_type, (H5_iter_order_t)order, (H5O_iterate_t)H5O_iterate_cb, (void*)op_data, (unsigned)fields); + status = H5Ovisit2((hid_t)grp_id, (H5_index_t)idx_type, (H5_iter_order_t)order, (H5O_iterate_t)H5O_iterate_cb, (void*)op_data, (unsigned)fields); if (status < 0) h5libraryError(env); @@ -478,7 +478,7 @@ Java_hdf_hdf5lib_H5_H5Ovisit_1by_1name else { PIN_JAVA_STRING(name, lName); if (lName != NULL) { - status = H5Ovisit_by_name((hid_t)grp_id, lName, (H5_index_t)idx_type, (H5_iter_order_t)order, (H5O_iterate_t)H5O_iterate_cb, (void*)op_data, (unsigned)fields, (hid_t)access_id); + status = H5Ovisit_by_name2((hid_t)grp_id, lName, (H5_index_t)idx_type, (H5_iter_order_t)order, (H5O_iterate_t)H5O_iterate_cb, (void*)op_data, (unsigned)fields, (hid_t)access_id); UNPIN_JAVA_STRING(name, lName); -- cgit v0.12 From e2c9f5a27a0fa4c908ff95cce8fb43282acd5df7 Mon Sep 17 00:00:00 2001 From: Jordan Henderson Date: Mon, 4 Jun 2018 10:53:08 -0500 Subject: Changes to test with checksum filter as well as deflate filter --- testpar/t_filters_parallel.c | 121 ++++++++++++++++++++++++++++++++----------- testpar/t_filters_parallel.h | 6 +-- 2 files changed, 92 insertions(+), 35 deletions(-) diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c index 2c639ae..fe134e0 100644 --- a/testpar/t_filters_parallel.c +++ b/testpar/t_filters_parallel.c @@ -31,8 +31,12 @@ char filenames[1][256]; int nerrors = 0; +size_t cur_filter_idx = 0; + #define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0]) +static herr_t set_dcpl_filter(hid_t dcpl); + /* Tests for writing data in parallel */ static void test_write_one_chunk_filtered_dataset(void); static void test_write_filtered_dataset_no_overlap(void); @@ -116,6 +120,26 @@ static void (*tests[])(void) = { }; /* + * Function to call the appropriate HDF5 filter-setting function + * depending on the currently set index. Used to re-run the tests + * with different filters to check that the data still comes back + * correctly under a variety of circumstances, such as the + * Fletcher32 checksum filter increasing the size of the chunk. + */ +static herr_t +set_dcpl_filter(hid_t dcpl) +{ + switch (cur_filter_idx) { + case 0: + return H5Pset_deflate(dcpl, DEFAULT_DEFLATE_LEVEL); + case 1: + return H5Pset_fletcher32(dcpl); + default: + return H5Pset_deflate(dcpl, DEFAULT_DEFLATE_LEVEL); + } +} + +/* * Tests parallel write of filtered data in the special * case where a dataset is composed of a single chunk. * @@ -177,7 +201,7 @@ test_write_one_chunk_filtered_dataset(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -333,7 +357,7 @@ test_write_filtered_dataset_no_overlap(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -492,7 +516,7 @@ test_write_filtered_dataset_overlap(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -657,7 +681,7 @@ test_write_filtered_dataset_single_no_selection(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -822,7 +846,7 @@ test_write_filtered_dataset_all_no_selection(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -949,7 +973,7 @@ test_write_filtered_dataset_point_selection(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -1102,7 +1126,7 @@ test_write_filtered_dataset_interleaved_write(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -1268,7 +1292,7 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -1429,7 +1453,7 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -1593,7 +1617,7 @@ test_write_3d_filtered_dataset_overlap(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -1762,7 +1786,7 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); /* Create the compound type for memory. */ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); @@ -1943,7 +1967,7 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); /* Create the compound type for memory. */ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); @@ -2132,7 +2156,7 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); /* Create the compound type for memory. */ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); @@ -2316,7 +2340,7 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); /* Create the compound type for memory. */ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); @@ -2505,7 +2529,7 @@ test_read_one_chunk_filtered_dataset(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -2702,7 +2726,7 @@ test_read_filtered_dataset_no_overlap(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -2901,7 +2925,7 @@ test_read_filtered_dataset_overlap(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -3121,7 +3145,7 @@ test_read_filtered_dataset_single_no_selection(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -3317,7 +3341,7 @@ test_read_filtered_dataset_all_no_selection(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -3465,7 +3489,7 @@ test_read_filtered_dataset_point_selection(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -3680,7 +3704,7 @@ test_read_filtered_dataset_interleaved_read(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -3892,7 +3916,7 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -4102,7 +4126,7 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -4317,7 +4341,7 @@ test_read_3d_filtered_dataset_overlap(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -4551,7 +4575,7 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -4775,7 +4799,7 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -5007,7 +5031,7 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -5243,7 +5267,7 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -5423,7 +5447,7 @@ test_write_serial_read_parallel(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -5569,7 +5593,7 @@ test_write_parallel_read_serial(void) "Chunk size set"); /* Add test filter to the pipeline */ - VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); dset_id = H5Dcreate2(file_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); @@ -5742,6 +5766,43 @@ main(int argc, char** argv) } } + /* + * Increment the filter index to switch to the checksum filter + * and re-run the tests. + */ + cur_filter_idx++; + + h5_clean_files(FILENAME, fapl); + + fapl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_fapl_mpio(fapl, comm, info) >= 0), "Set FAPL MPIO succeeded"); + + VRFY((H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fcreate(filenames[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + VRFY((file_id >= 0), "Test file creation succeeded"); + + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + if (MAINPROCESS) { + printf("\n=================================================================\n"); + printf("Re-running Parallel Filters tests with Fletcher32 checksum filter\n"); + printf("=================================================================\n\n"); + } + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) { + (*tests[i])(); + } + else { + if (MAINPROCESS) MESG("MPI_Barrier failed"); + nerrors++; + } + } + if (nerrors) goto exit; if (MAINPROCESS) puts("All Parallel Filters tests passed\n"); diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h index 4844abd..797d12d 100644 --- a/testpar/t_filters_parallel.h +++ b/testpar/t_filters_parallel.h @@ -57,11 +57,7 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES; #define INCREMENTAL_DATA(i) ((size_t) mpi_rank + i) /* Generates incremental test data */ #define RANK_DATA(i) (mpi_rank) /* Generates test data to visibly show which rank wrote to which parts of the dataset */ -#ifdef DYNAMIC_FILTER -#define SET_FILTER(dcpl) H5Pset_filter(dcpl, filter_id, flags, FILTER_NUM_CDVALUES, cd_values) /* Test other filter in parallel */ -#else -#define SET_FILTER(dcpl) H5Pset_deflate(dcpl, 6) /* Test GZIP filter in parallel */ -#endif +#define DEFAULT_DEFLATE_LEVEL 6 #define DIM0_SCALE_FACTOR 4 #define DIM1_SCALE_FACTOR 2 -- cgit v0.12 From 9fe86bc51c6016ec82977e0125cbd9ac1a1cf23b Mon Sep 17 00:00:00 2001 From: Jordan Henderson Date: Mon, 4 Jun 2018 13:49:42 -0500 Subject: Add test to continually grow and shrink chunks --- testpar/t_filters_parallel.c | 140 +++++++++++++++++++++++++++++++++++++++++++ testpar/t_filters_parallel.h | 9 +++ 2 files changed, 149 insertions(+) diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c index fe134e0..3647732 100644 --- a/testpar/t_filters_parallel.c +++ b/testpar/t_filters_parallel.c @@ -69,6 +69,9 @@ static void test_read_cmpd_filtered_dataset_no_conversion_shared(void); static void test_read_cmpd_filtered_dataset_type_conversion_unshared(void); static void test_read_cmpd_filtered_dataset_type_conversion_shared(void); +/* Other miscellaneous tests */ +static void test_shrinking_growing_chunks(void); + /* * Tests for attempting to round-trip the data going from * @@ -117,6 +120,7 @@ static void (*tests[])(void) = { test_read_cmpd_filtered_dataset_type_conversion_shared, test_write_serial_read_parallel, test_write_parallel_read_serial, + test_shrinking_growing_chunks, }; /* @@ -5700,6 +5704,142 @@ test_write_parallel_read_serial(void) return; } +/* + * Tests that causing chunks to continually grow and shrink + * by writing random data followed by zeroed-out data (and + * thus controlling the compression ratio) does not cause + * problems. + * + * Programmer: Jordan Henderson + * 06/04/2018 + */ +static void +test_shrinking_growing_chunks(void) +{ + float *data = NULL; + hsize_t dataset_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t start[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t stride[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t count[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t block[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + size_t i, data_size; + hid_t file_id = -1, dset_id = -1, plist_id = -1; + hid_t filespace = -1, memspace = -1; + + if (MAINPROCESS) puts("Testing continually shrinking/growing chunks"); + + /* Set up file access property list with parallel I/O access */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), + "Set FAPL MPIO succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_NCOLS; + chunk_dims[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NCOLS; + sel_dims[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS; + sel_dims[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_NCOLS; + + filespace = H5Screate_simple(SHRINKING_GROWING_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = H5Screate_simple(SHRINKING_GROWING_CHUNKS_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((plist_id >= 0), "DCPL creation succeeded"); + + VRFY((H5Pset_chunk(plist_id, SHRINKING_GROWING_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set"); + + dset_id = H5Dcreate2(file_id, SHRINKING_GROWING_CHUNKS_DATASET_NAME, H5T_NATIVE_DOUBLE, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* + * Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = 1; + count[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_NCOLS / (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NCOLS; + stride[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS; + stride[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NCOLS; + block[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS; + block[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NCOLS; + start[0] = ((hsize_t) mpi_rank * (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS * count[0]); + start[1] = 0; + + if (VERBOSE_MED) { + printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((dset_id >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Create property list for collective dataset write */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "DXPL creation succeeded"); + + VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), + "Set DXPL MPIO succeeded"); + + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + + data = (float *) HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + for (i = 0; i < SHRINKING_GROWING_CHUNKS_NLOOPS; i++) { + /* Continually write random float data, followed by zeroed-out data */ + if ((i % 2)) + HDmemset(data, 0, data_size); + else { + size_t j; + for (j = 0; j < data_size / sizeof(*data); j++) { + data[j] = (float) ( rand() / (double) (RAND_MAX / (double) 1.0L) ); + } + } + + VRFY((H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, plist_id, data) >= 0), + "Dataset write succeeded"); + } + + if (data) HDfree(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + int main(int argc, char** argv) { diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h index 797d12d..9543508 100644 --- a/testpar/t_filters_parallel.h +++ b/testpar/t_filters_parallel.h @@ -327,4 +327,13 @@ typedef struct { #define WRITE_PARALLEL_READ_SERIAL_CH_NROWS (WRITE_PARALLEL_READ_SERIAL_NROWS / mpi_size) #define WRITE_PARALLEL_READ_SERIAL_CH_NCOLS (WRITE_PARALLEL_READ_SERIAL_NCOLS / mpi_size) +/* Defines for the shrinking/growing chunks test */ +#define SHRINKING_GROWING_CHUNKS_DATASET_NAME "shrink_grow_chunks_test" +#define SHRINKING_GROWING_CHUNKS_DATASET_DIMS 2 +#define SHRINKING_GROWING_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define SHRINKING_GROWING_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define SHRINKING_GROWING_CHUNKS_CH_NROWS (SHRINKING_GROWING_CHUNKS_NROWS / mpi_size) +#define SHRINKING_GROWING_CHUNKS_CH_NCOLS (SHRINKING_GROWING_CHUNKS_NCOLS / mpi_size) +#define SHRINKING_GROWING_CHUNKS_NLOOPS 20 + #endif /* TEST_PARALLEL_FILTERS_H_ */ -- cgit v0.12 From 8da8a1dcb5d1f51654018527fbc3fc9baeff130e Mon Sep 17 00:00:00 2001 From: Jordan Henderson Date: Wed, 6 Jun 2018 13:29:03 -0500 Subject: Revise H5D__mpio_array_gatherv() to not allocate memory needlessly --- src/H5Dmpio.c | 99 ++++++++++++++++++++++++++++++++++++----------------------- 1 file changed, 61 insertions(+), 38 deletions(-) diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 887a05f..81aa799 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -234,7 +234,7 @@ static herr_t H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t *io_info H5D_filtered_collective_io_info_t *local_chunk_array, size_t *local_chunk_array_num_entries); static herr_t H5D__mpio_array_gatherv(void *local_array, size_t local_array_num_entries, size_t array_entry_size, void **gathered_array, size_t *gathered_array_num_entries, - int nprocs, hbool_t allgather, int root, MPI_Comm comm, int (*sort_func)(const void *, const void *)); + hbool_t allgather, int root, MPI_Comm comm, int (*sort_func)(const void *, const void *)); static herr_t H5D__mpio_filtered_collective_write_type( H5D_filtered_collective_io_info_t *chunk_list, size_t num_entries, MPI_Datatype *new_mem_type, hbool_t *mem_type_derived, @@ -418,19 +418,16 @@ done: * Function: H5D__mpio_array_gatherv * * Purpose: Given an array, specified in local_array, by each processor - * calling this function, gathers each array into a single + * calling this function, collects each array into a single * array which is then either gathered to the processor * specified by root, when allgather is false, or is * distributed back to all processors when allgather is true. * - * The size of each entry and number of entries in the array - * contributed by an individual processor should be specified - * in array_entry_size and local_array_num_entries, + * The number of entries in the array contributed by an + * individual processor and the size of each entry should be + * specified in local_array_num_entries and array_entry_size, * respectively. * - * The number of processors participating in the gather - * operation should be specified for nprocs. - * * The MPI communicator to use should be specified for comm. * * If the sort_func argument is supplied, the array is sorted @@ -448,14 +445,13 @@ done: static herr_t H5D__mpio_array_gatherv(void *local_array, size_t local_array_num_entries, size_t array_entry_size, void **_gathered_array, size_t *_gathered_array_num_entries, - int nprocs, hbool_t allgather, int root, MPI_Comm comm, int (*sort_func)(const void *, const void *)) + hbool_t allgather, int root, MPI_Comm comm, int (*sort_func)(const void *, const void *)) { size_t gathered_array_num_entries = 0; /* The size of the newly-constructed array */ - size_t i; void *gathered_array = NULL; /* The newly-constructed array returned to the caller */ - int *receive_counts_array = NULL; /* Array containing number of entries each process is contributing */ - int *displacements_array = NULL; /* Array of displacements where each process places its data in the final array */ - int mpi_code; + int *receive_counts_array = NULL; /* Array containing number of entries each processor is contributing */ + int *displacements_array = NULL; /* Array of displacements where each processor places its data in the final array */ + int mpi_code, mpi_rank, mpi_size; int sendcount; herr_t ret_value = SUCCEED; @@ -464,34 +460,62 @@ H5D__mpio_array_gatherv(void *local_array, size_t local_array_num_entries, HDassert(_gathered_array); HDassert(_gathered_array_num_entries); - /* Determine the size of the end result array */ + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + /* + * Determine the size of the end result array by collecting the number + * of entries contributed by each processor into a single total. + */ if (MPI_SUCCESS != (mpi_code = MPI_Allreduce(&local_array_num_entries, &gathered_array_num_entries, 1, MPI_INT, MPI_SUM, comm))) HMPI_GOTO_ERROR(FAIL, "MPI_Allreduce failed", mpi_code) - /* If 0 entries resulted from the collective operation, no one is writing anything */ + /* If 0 entries resulted from the collective operation, no processor is contributing anything and there is nothing to do */ if (gathered_array_num_entries > 0) { - if (NULL == (gathered_array = H5MM_malloc(gathered_array_num_entries * array_entry_size))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate gathered array") + /* + * If gathering to all processors, all processors need to allocate space for the resulting array, as well as + * the receive counts and displacements arrays for the collective MPI_Allgatherv call. Otherwise, only the + * root processor needs to allocate the space for an MPI_Gatherv call. + */ + if (allgather || (mpi_rank == root)) { + if (NULL == (gathered_array = H5MM_malloc(gathered_array_num_entries * array_entry_size))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate gathered array") - if (NULL == (receive_counts_array = (int *) H5MM_malloc((size_t) nprocs * sizeof(int)))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate receive counts array") + if (NULL == (receive_counts_array = (int *) H5MM_malloc((size_t) mpi_size * sizeof(int)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate receive counts array") - if (NULL == (displacements_array = (int *) H5MM_malloc((size_t) nprocs * sizeof(int)))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate receive displacements array") + if (NULL == (displacements_array = (int *) H5MM_malloc((size_t) mpi_size * sizeof(int)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate receive displacements array") + } /* end if */ - /* Inform each process of how many entries each other process is contributing to the resulting array */ - if (MPI_SUCCESS != (mpi_code = MPI_Allgather(&local_array_num_entries, 1, MPI_INT, receive_counts_array, 1, MPI_INT, comm))) - HMPI_GOTO_ERROR(FAIL, "MPI_Allgather failed", mpi_code) + /* + * If gathering to all processors, inform each processor of how many entries each other processor is + * contributing to the resulting array by collecting the counts into each processor's "receive counts" + * array. Otherwise, inform only the root processor of how many entries each other processor is contributing. + */ + if (allgather) { + if (MPI_SUCCESS != (mpi_code = MPI_Allgather(&local_array_num_entries, 1, MPI_INT, receive_counts_array, 1, MPI_INT, comm))) + HMPI_GOTO_ERROR(FAIL, "MPI_Allgather failed", mpi_code) + } /* end if */ + else { + if (MPI_SUCCESS != (mpi_code = MPI_Gather(&local_array_num_entries, 1, MPI_INT, receive_counts_array, 1, MPI_INT, root, comm))) + HMPI_GOTO_ERROR(FAIL, "MPI_Gather failed", mpi_code) + } /* end else */ + + if (allgather || (mpi_rank == root)) { + size_t i; - /* Multiply each receive count by the size of the array entry, since the data is sent as bytes */ - for (i = 0; i < (size_t) nprocs; i++) - H5_CHECKED_ASSIGN(receive_counts_array[i], int, (size_t) receive_counts_array[i] * array_entry_size, size_t); + /* Multiply each receive count by the size of the array entry, since the data is sent as bytes. */ + for (i = 0; i < (size_t) mpi_size; i++) + H5_CHECKED_ASSIGN(receive_counts_array[i], int, (size_t) receive_counts_array[i] * array_entry_size, size_t); - /* Set receive buffer offsets for MPI_Allgatherv */ - displacements_array[0] = 0; - for (i = 1; i < (size_t) nprocs; i++) - displacements_array[i] = displacements_array[i - 1] + receive_counts_array[i - 1]; + /* Set receive buffer offsets for the collective MPI_Allgatherv/MPI_Gatherv call. */ + displacements_array[0] = 0; + for (i = 1; i < (size_t) mpi_size; i++) + displacements_array[i] = displacements_array[i - 1] + receive_counts_array[i - 1]; + } /* end if */ + /* As the data is sent as bytes, calculate the true sendcount for the data. */ H5_CHECKED_ASSIGN(sendcount, int, local_array_num_entries * array_entry_size, size_t); if (allgather) { @@ -505,7 +529,8 @@ H5D__mpio_array_gatherv(void *local_array, size_t local_array_num_entries, HMPI_GOTO_ERROR(FAIL, "MPI_Allgatherv failed", mpi_code) } /* end else */ - if (sort_func) HDqsort(gathered_array, gathered_array_num_entries, array_entry_size, sort_func); + if (sort_func && (allgather || (mpi_rank == root))) + HDqsort(gathered_array, gathered_array_num_entries, array_entry_size, sort_func); } /* end if */ *_gathered_array = gathered_array; @@ -1295,8 +1320,7 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_in * of the chunks in the file. */ if (H5D__mpio_array_gatherv(chunk_list, chunk_list_num_entries, sizeof(H5D_filtered_collective_io_info_t), - (void **) &collective_chunk_list, &collective_chunk_list_num_entries, mpi_size, - true, 0, io_info->comm, NULL) < 0) + (void **) &collective_chunk_list, &collective_chunk_list_num_entries, true, 0, io_info->comm, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL, "couldn't gather new chunk sizes") /* Collectively re-allocate the modified chunks (from each process) in the file */ @@ -1768,8 +1792,7 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_i * of the chunks in the file */ if (H5D__mpio_array_gatherv(&chunk_list[i], have_chunk_to_process ? 1 : 0, sizeof(H5D_filtered_collective_io_info_t), - (void **) &collective_chunk_list, &collective_chunk_list_num_entries, mpi_size, - true, 0, io_info->comm, NULL) < 0) + (void **) &collective_chunk_list, &collective_chunk_list_num_entries, true, 0, io_info->comm, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL, "couldn't gather new chunk sizes") /* Participate in the collective re-allocation of all chunks modified @@ -2655,8 +2678,8 @@ H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t *io_info, const H5D_ty * call, the gathered list will initially be sorted in increasing order of chunk offset in the file. */ if (H5D__mpio_array_gatherv(local_chunk_array, *local_chunk_array_num_entries, sizeof(H5D_filtered_collective_io_info_t), - (void **) &shared_chunks_info_array, &shared_chunks_info_array_num_entries, mpi_size, - false, 0, io_info->comm, H5D__cmp_filtered_collective_io_info_entry) < 0) + (void **) &shared_chunks_info_array, &shared_chunks_info_array_num_entries, false, 0, + io_info->comm, H5D__cmp_filtered_collective_io_info_entry) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL, "couldn't gather array") /* Rank 0 redistributes any shared chunks to new owners as necessary */ -- cgit v0.12 From 5afeefe1bae62fa5faf06c0c6f240e10863a2693 Mon Sep 17 00:00:00 2001 From: Jerome Soumagne Date: Fri, 15 Jun 2018 16:47:32 -0500 Subject: Fix H5detect to use no_sanitize_address attribute and support GCC sanitizers --- src/H5detect.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/H5detect.c b/src/H5detect.c index 1c5554e..69e07fd 100644 --- a/src/H5detect.c +++ b/src/H5detect.c @@ -54,12 +54,8 @@ static const char *FileHeader = "\n\ #include "H5Tpublic.h" #include "H5Rpublic.h" -#if defined(__has_attribute) -#if __has_attribute(no_sanitize) -#define HDF_NO_UBSAN __attribute__((no_sanitize("undefined"))) -#else -#define HDF_NO_UBSAN -#endif +#if defined(__has_attribute) && __has_attribute(no_sanitize_address) +#define HDF_NO_UBSAN __attribute__((no_sanitize_address)) #else #define HDF_NO_UBSAN #endif @@ -1675,11 +1671,13 @@ detect_alignments(void) */ static int verify_signal_handlers(int signum, void (*handler)(int)) { -#if defined(__has_feature) +#if defined(__has_feature) /* Clang */ #if __has_feature(address_sanitizer) || __has_feature(thread_sanitizer) /* Under the address and thread sanitizers, don't raise any signals. */ return 0; #endif +#elif defined(__SANITIZE_ADDRESS__) || defined(__SANITIZE_THREAD__) /* GCC */ + return 0; #endif void (*save_handler)(int) = HDsignal(signum, handler); volatile int i, val; -- cgit v0.12 From 3c07f4738ff724738a295c5fc7d0379dc153ae7c Mon Sep 17 00:00:00 2001 From: Allen Byrne Date: Fri, 29 Jun 2018 13:04:48 -0500 Subject: Add actual file names fo diff --- java/test/junit.sh.in | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 71 insertions(+), 1 deletion(-) diff --git a/java/test/junit.sh.in b/java/test/junit.sh.in index 3bb0e85..5d69bee 100644 --- a/java/test/junit.sh.in +++ b/java/test/junit.sh.in @@ -311,7 +311,7 @@ else echo "**FAILED** JUnit-TestH5" echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" - test yes = "$verbose" && $DIFF $expect $actual |sed 's/^/ /' + test yes = "$verbose" && $DIFF JUnit-TestH5.txt JUnit-TestH5.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Eparams" @@ -330,7 +330,9 @@ if diff JUnit-TestH5Eparams.out JUnit-TestH5Eparams.txt > /dev/null; then echo " PASSED JUnit-TestH5Eparams" else echo "**FAILED** JUnit-TestH5Eparams" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Eparams.txt JUnit-TestH5Eparams.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Eregister" @@ -349,7 +351,9 @@ if diff JUnit-TestH5Eregister.out JUnit-TestH5Eregister.txt > /dev/null; then echo " PASSED JUnit-TestH5Eregister" else echo "**FAILED** JUnit-TestH5Eregister" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Eregister.txt JUnit-TestH5Eregister.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Fparams" @@ -368,7 +372,9 @@ if diff JUnit-TestH5Fparams.out JUnit-TestH5Fparams.txt > /dev/null; then echo " PASSED JUnit-TestH5Fparams" else echo "**FAILED** JUnit-TestH5Fparams" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Fparams.txt JUnit-TestH5Fparams.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Fbasic" @@ -387,7 +393,9 @@ if diff JUnit-TestH5Fbasic.out JUnit-TestH5Fbasic.txt > /dev/null; then echo " PASSED JUnit-TestH5Fbasic" else echo "**FAILED** JUnit-TestH5Fbasic" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Fbasic.txt JUnit-TestH5Fbasic.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5F" @@ -406,7 +414,9 @@ if diff JUnit-TestH5F.out JUnit-TestH5F.txt > /dev/null; then echo " PASSED JUnit-TestH5F" else echo "**FAILED** JUnit-TestH5F" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5F.txt JUnit-TestH5F.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Fswmr" @@ -425,7 +435,9 @@ if diff JUnit-TestH5Fswmr.out JUnit-TestH5Fswmr.txt > /dev/null; then echo " PASSED JUnit-TestH5Fswmr" else echo "**FAILED** JUnit-TestH5Fswmr" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Fswmr.txt JUnit-TestH5Fswmr.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Gbasic" @@ -444,7 +456,9 @@ if diff JUnit-TestH5Gbasic.out JUnit-TestH5Gbasic.txt > /dev/null; then echo " PASSED JUnit-TestH5Gbasic" else echo "**FAILED** JUnit-TestH5Gbasic" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Gbasic.txt JUnit-TestH5Gbasic.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5G" @@ -463,7 +477,9 @@ if diff JUnit-TestH5G.out JUnit-TestH5G.txt > /dev/null; then echo " PASSED JUnit-TestH5G" else echo "**FAILED** JUnit-TestH5G" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5G.txt JUnit-TestH5G.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Sbasic" @@ -482,7 +498,9 @@ if diff JUnit-TestH5Sbasic.out JUnit-TestH5Sbasic.txt > /dev/null; then echo " PASSED JUnit-TestH5Sbasic" else echo "**FAILED** JUnit-TestH5Sbasic" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Sbasic.txt JUnit-TestH5Sbasic.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5S" @@ -501,7 +519,9 @@ if diff JUnit-TestH5S.out JUnit-TestH5S.txt > /dev/null; then echo " PASSED JUnit-TestH5S" else echo "**FAILED** JUnit-TestH5S" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5S.txt JUnit-TestH5S.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Tparams" @@ -520,7 +540,9 @@ if diff JUnit-TestH5Tparams.out JUnit-TestH5Tparams.txt > /dev/null; then echo " PASSED JUnit-TestH5Tparams" else echo "**FAILED** JUnit-TestH5Tparams" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Tparams.txt JUnit-TestH5Tparams.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Tbasic" @@ -539,7 +561,9 @@ if diff JUnit-TestH5Tbasic.out JUnit-TestH5Tbasic.txt > /dev/null; then echo " PASSED JUnit-TestH5Tbasic" else echo "**FAILED** JUnit-TestH5Tbasic" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Tbasic.txt JUnit-TestH5Tbasic.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5T" @@ -558,7 +582,9 @@ if diff JUnit-TestH5T.out JUnit-TestH5T.txt > /dev/null; then echo " PASSED JUnit-TestH5T" else echo "**FAILED** JUnit-TestH5T" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5T.txt JUnit-TestH5T.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Dparams" @@ -577,7 +603,9 @@ if diff JUnit-TestH5Dparams.out JUnit-TestH5Dparams.txt > /dev/null; then echo " PASSED JUnit-TestH5Dparams" else echo "**FAILED** JUnit-TestH5Dparams" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Dparams.txt JUnit-TestH5Dparams.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5D" @@ -596,7 +624,9 @@ if diff JUnit-TestH5D.out JUnit-TestH5D.txt > /dev/null; then echo " PASSED JUnit-TestH5D" else echo "**FAILED** JUnit-TestH5D" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5D.txt JUnit-TestH5D.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Dplist" @@ -615,7 +645,9 @@ if diff JUnit-TestH5Dplist.out JUnit-TestH5Dplist.txt > /dev/null; then echo " PASSED JUnit-TestH5Dplist" else echo "**FAILED** JUnit-TestH5Dplist" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Dplist.txt JUnit-TestH5Dplist.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Lparams" @@ -634,7 +666,9 @@ if diff JUnit-TestH5Lparams.out JUnit-TestH5Lparams.txt > /dev/null; then echo " PASSED JUnit-TestH5Lparams" else echo "**FAILED** JUnit-TestH5Lparams" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Lparams.txt JUnit-TestH5Lparams.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Lbasic" @@ -653,7 +687,9 @@ if diff JUnit-TestH5Lbasic.out JUnit-TestH5Lbasic.txt > /dev/null; then echo " PASSED JUnit-TestH5Lbasic" else echo "**FAILED** JUnit-TestH5Lbasic" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Lbasic.txt JUnit-TestH5Lbasic.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Lcreate" @@ -672,7 +708,9 @@ if diff JUnit-TestH5Lcreate.out JUnit-TestH5Lcreate.txt > /dev/null; then echo " PASSED JUnit-TestH5Lcreate" else echo "**FAILED** JUnit-TestH5Lcreate" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Lcreate.txt JUnit-TestH5Lcreate.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5R" @@ -691,7 +729,9 @@ if diff JUnit-TestH5R.out JUnit-TestH5R.txt > /dev/null; then echo " PASSED JUnit-TestH5R" else echo "**FAILED** JUnit-TestH5R" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5R.txt JUnit-TestH5R.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5P" @@ -710,7 +750,9 @@ if diff JUnit-TestH5P.out JUnit-TestH5P.txt > /dev/null; then echo " PASSED JUnit-TestH5P" else echo "**FAILED** JUnit-TestH5P" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5P.txt JUnit-TestH5P.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5PData" @@ -729,7 +771,9 @@ if diff JUnit-TestH5PData.out JUnit-TestH5PData.txt > /dev/null; then echo " PASSED JUnit-TestH5PData" else echo "**FAILED** JUnit-TestH5PData" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5PData.txt JUnit-TestH5PData.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Pfapl" @@ -748,7 +792,9 @@ if diff JUnit-TestH5Pfapl.out JUnit-TestH5Pfapl.txt > /dev/null; then echo " PASSED JUnit-TestH5Pfapl" else echo "**FAILED** JUnit-TestH5Pfapl" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Pfapl.txt JUnit-TestH5Pfapl.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Pvirtual" @@ -767,7 +813,9 @@ if diff JUnit-TestH5Pvirtual.out JUnit-TestH5Pvirtual.txt > /dev/null; then echo " PASSED JUnit-TestH5Pvirtual" else echo "**FAILED** JUnit-TestH5Pvirtual" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Pvirtual.txt JUnit-TestH5Pvirtual.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Plist" @@ -786,7 +834,9 @@ if diff JUnit-TestH5Plist.out JUnit-TestH5Plist.txt > /dev/null; then echo " PASSED JUnit-TestH5Plist" else echo "**FAILED** JUnit-TestH5Plist" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Plist.txt JUnit-TestH5Plist.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5A" @@ -805,7 +855,9 @@ if diff JUnit-TestH5A.out JUnit-TestH5A.txt > /dev/null; then echo " PASSED JUnit-TestH5A" else echo "**FAILED** JUnit-TestH5A" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5A.txt JUnit-TestH5A.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Oparams" @@ -824,7 +876,9 @@ if diff JUnit-TestH5Oparams.out JUnit-TestH5Oparams.txt > /dev/null; then echo " PASSED JUnit-TestH5Oparams" else echo "**FAILED** JUnit-TestH5Oparams" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Oparams.txt JUnit-TestH5Oparams.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Obasic" @@ -843,7 +897,9 @@ if diff JUnit-TestH5Obasic.out JUnit-TestH5Obasic.txt > /dev/null; then echo " PASSED JUnit-TestH5Obasic" else echo "**FAILED** JUnit-TestH5Obasic" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Obasic.txt JUnit-TestH5Obasic.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Ocreate" @@ -862,7 +918,9 @@ if diff JUnit-TestH5Ocreate.out JUnit-TestH5Ocreate.txt > /dev/null; then echo " PASSED JUnit-TestH5Ocreate" else echo "**FAILED** JUnit-TestH5Ocreate" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Ocreate.txt JUnit-TestH5Ocreate.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Ocopy" @@ -881,7 +939,9 @@ if diff JUnit-TestH5Ocopy.out JUnit-TestH5Ocopy.txt > /dev/null; then echo " PASSED JUnit-TestH5Ocopy" else echo "**FAILED** JUnit-TestH5Ocopy" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Ocopy.txt JUnit-TestH5Ocopy.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5PL" @@ -900,7 +960,9 @@ if diff JUnit-TestH5PL.out JUnit-TestH5PL.txt > /dev/null; then echo " PASSED JUnit-TestH5PL" else echo "**FAILED** JUnit-TestH5PL" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5PL.txt JUnit-TestH5PL.out |sed 's/^/ /' fi echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Z" @@ -919,7 +981,9 @@ if diff JUnit-TestH5Z.out JUnit-TestH5Z.txt > /dev/null; then echo " PASSED JUnit-TestH5Z" else echo "**FAILED** JUnit-TestH5Z" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Z.txt JUnit-TestH5Z.out |sed 's/^/ /' fi if test "X-$BUILD_MODE" = "X-production" ; then @@ -940,7 +1004,9 @@ if test "X-$BUILD_MODE" = "X-production" ; then echo " PASSED JUnit-TestH5E" else echo "**FAILED** JUnit-TestH5E" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5E.txt JUnit-TestH5E.out |sed 's/^/ /' fi fi @@ -961,7 +1027,9 @@ if test "X-$BUILD_MODE" = "X-production" ; then echo " PASSED JUnit-TestH5Edefault" else echo "**FAILED** JUnit-TestH5Edefault" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Edefault.txt JUnit-TestH5Edefault.out |sed 's/^/ /' fi fi fi @@ -982,7 +1050,9 @@ if test $USE_FILTER_SZIP = "yes"; then echo " PASSED JUnit-TestH5Giterate" else echo "**FAILED** JUnit-TestH5Giterate" + echo " Expected result differs from actual result" nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Giterate.txt JUnit-TestH5Giterate.out |sed 's/^/ /' fi fi -- cgit v0.12 From 8f652a87ca4b77779c4e338d255b4a5217b76227 Mon Sep 17 00:00:00 2001 From: Allen Byrne Date: Mon, 2 Jul 2018 09:13:31 -0500 Subject: HDFFV-10519 add JNI note --- release_docs/RELEASE.txt | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 48fbef0..431143a 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -67,7 +67,13 @@ New Features Java Library: ---------------- - - + - JNI Read and Write + + Refactored variable-length functions, H5DreadVL and H5AreadVL, + to correct dataset and attribute reads. New write functions, + H5DwriteVL and H5AwriteVL, are under construction. + + (ADB - 2018/06/02, HDFFV10519) Tools: ------ -- cgit v0.12 From ef10aa6211779b4f99606243651adcd716904f4e Mon Sep 17 00:00:00 2001 From: Jordan Henderson Date: Tue, 3 Jul 2018 21:09:19 -0500 Subject: Fix for HDFFV-10509 --- src/H5Dchunk.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 7216c49..d1528eb 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -2933,8 +2933,21 @@ H5D__chunk_lookup(const H5D_t *dset, const hsize_t *scaled, H5F_set_coll_md_read(idx_info.f, temp_cmr); #endif /* H5_HAVE_PARALLEL */ - /* Cache the information retrieved */ - H5D__chunk_cinfo_cache_update(&dset->shared->cache.chunk.last, udata); + /* + * Cache the information retrieved. + * + * Note that if we are writing to the dataset in parallel and filters + * are involved, we skip caching this information as it is highly likely + * that the chunk information will be invalidated as a result of the + * filter operation (e.g. the chunk gets re-allocated to a different + * address in the file and/or gets re-allocated with a different size). + * If we were to cache this information, subsequent reads/writes would + * retrieve the invalid information and cause a variety of issues. + */ +#ifdef H5_HAVE_PARALLEL + if ( !((H5F_INTENT(dset->oloc.file) & H5F_ACC_RDWR) && dset->shared->dcpl_cache.pline.nused) ) +#endif + H5D__chunk_cinfo_cache_update(&dset->shared->cache.chunk.last, udata); } /* end if */ } /* end else */ -- cgit v0.12 From 863148d1fe0b6949708698ad9277dbdba8129e0c Mon Sep 17 00:00:00 2001 From: Hyo-Kyung Lee Date: Tue, 10 Jul 2018 18:13:44 -0500 Subject: fixed typo. --- test/vds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/vds.c b/test/vds.c index 3f00224..4f03bbd 100644 --- a/test/vds.c +++ b/test/vds.c @@ -115,7 +115,7 @@ vds_select_equal(hid_t space1, hid_t space2) if(npoints1 != npoints2) return FALSE; - /* Allocate point lists. Do not return directly afer + /* Allocate point lists. Do not return directly after * allocating, to make sure buffers are freed. */ if(NULL == (buf1 = (hsize_t *)HDmalloc((size_t)rank1 * (size_t)npoints1 * sizeof(hsize_t)))) TEST_ERROR -- cgit v0.12 From cf38292064a0c3ffc6971de31573bbd1dab25b80 Mon Sep 17 00:00:00 2001 From: Vailin Choi Date: Tue, 10 Jul 2018 22:00:14 -0500 Subject: Fix for HDFFV-10333: 1) Check for valid object header version for a refcount messge 2) Check for invalid fill value size 3) Check for invalid dimension size in a layout message 4) Add --enable-error-stack option to h5stat 5) Add error checks to h5stat.c 6) Add tests to h5stat and h5dump --- src/H5Ocache.c | 28 +- src/H5Ofill.c | 22 +- src/H5Olayout.c | 4 +- src/H5Sselect.c | 2 +- tools/src/h5stat/h5stat.c | 290 +++++++++++---------- tools/test/h5dump/h5dumpgentest.c | 86 ++++++ tools/test/h5dump/testh5dump.sh.in | 5 + tools/test/h5stat/h5stat_gentest.c | 142 ++++++++++ .../test/h5stat/testfiles/h5stat_err_old_fill.ddl | 2 + tools/test/h5stat/testfiles/h5stat_err_old_fill.h5 | Bin 0 -> 2068 bytes .../h5stat/testfiles/h5stat_err_old_layout.ddl | 2 + .../test/h5stat/testfiles/h5stat_err_old_layout.h5 | Bin 0 -> 2088 bytes .../test/h5stat/testfiles/h5stat_err_refcount.ddl | 2 + tools/test/h5stat/testfiles/h5stat_err_refcount.h5 | Bin 0 -> 4640 bytes tools/test/h5stat/testfiles/h5stat_help1.ddl | 1 + tools/test/h5stat/testfiles/h5stat_help2.ddl | 1 + tools/test/h5stat/testfiles/h5stat_nofile.ddl | 1 + tools/test/h5stat/testh5stat.sh.in | 15 +- tools/testfiles/err_attr_dspace.ddl | 5 + tools/testfiles/err_attr_dspace.h5 | Bin 0 -> 3047 bytes 20 files changed, 461 insertions(+), 147 deletions(-) create mode 100644 tools/test/h5stat/testfiles/h5stat_err_old_fill.ddl create mode 100644 tools/test/h5stat/testfiles/h5stat_err_old_fill.h5 create mode 100644 tools/test/h5stat/testfiles/h5stat_err_old_layout.ddl create mode 100644 tools/test/h5stat/testfiles/h5stat_err_old_layout.h5 create mode 100644 tools/test/h5stat/testfiles/h5stat_err_refcount.ddl create mode 100644 tools/test/h5stat/testfiles/h5stat_err_refcount.h5 create mode 100644 tools/testfiles/err_attr_dspace.ddl create mode 100644 tools/testfiles/err_attr_dspace.h5 diff --git a/src/H5Ocache.c b/src/H5Ocache.c index 3607839..d65942b 100644 --- a/src/H5Ocache.c +++ b/src/H5Ocache.c @@ -1430,9 +1430,10 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image /* Check for combining two adjacent 'null' messages */ if((udata->file_intent & H5F_ACC_RDWR) && - H5O_NULL_ID == id && oh->nmesgs > 0 && - H5O_NULL_ID == oh->mesg[oh->nmesgs - 1].type->id && - oh->mesg[oh->nmesgs - 1].chunkno == chunkno) { + H5O_NULL_ID == id && oh->nmesgs > 0 && + H5O_NULL_ID == oh->mesg[oh->nmesgs - 1].type->id && + oh->mesg[oh->nmesgs - 1].chunkno == chunkno) { + size_t mesgno; /* Current message to operate on */ /* Combine adjacent null messages */ @@ -1467,13 +1468,13 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image /* Point unknown messages at 'unknown' message class */ /* (Usually from future versions of the library) */ - if(id >= H5O_UNKNOWN_ID || + if(id >= H5O_UNKNOWN_ID || #ifdef H5O_ENABLE_BOGUS - id == H5O_BOGUS_VALID_ID || + id == H5O_BOGUS_VALID_ID || #endif - NULL == H5O_msg_class_g[id]) { + NULL == H5O_msg_class_g[id]) { - H5O_unknown_t *unknown; /* Pointer to "unknown" message info */ + H5O_unknown_t *unknown; /* Pointer to "unknown" message info */ /* Allocate "unknown" message info */ if(NULL == (unknown = H5FL_MALLOC(H5O_unknown_t))) @@ -1490,9 +1491,9 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image /* Check for "fail if unknown" message flags */ if(((udata->file_intent & H5F_ACC_RDWR) && - (flags & H5O_MSG_FLAG_FAIL_IF_UNKNOWN_AND_OPEN_FOR_WRITE)) - || (flags & H5O_MSG_FLAG_FAIL_IF_UNKNOWN_ALWAYS)) - HGOTO_ERROR(H5E_OHDR, H5E_BADMESG, FAIL, "unknown message with 'fail if unknown' flag found") + (flags & H5O_MSG_FLAG_FAIL_IF_UNKNOWN_AND_OPEN_FOR_WRITE)) + || (flags & H5O_MSG_FLAG_FAIL_IF_UNKNOWN_ALWAYS)) + HGOTO_ERROR(H5E_OHDR, H5E_BADMESG, FAIL, "unknown message with 'fail if unknown' flag found") /* Check for "mark if unknown" message flag, etc. */ else if((flags & H5O_MSG_FLAG_MARK_IF_UNKNOWN) && !(flags & H5O_MSG_FLAG_WAS_UNKNOWN) && @@ -1543,7 +1544,8 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image H5O_refcount_t *refcount; /* Decode ref. count message */ - HDassert(oh->version > H5O_VERSION_1); + if(oh->version <= H5O_VERSION_1) + HGOTO_ERROR(H5E_OHDR, H5E_VERSION, FAIL, "object header version does not support reference count message") refcount = (H5O_refcount_t *)(H5O_MSG_REFCOUNT->decode)(udata->f, NULL, 0, &ioflags, mesg->raw_size, mesg->raw); /* Save 'native' form of ref. count message */ @@ -1614,6 +1616,10 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image } /* end if */ done: + if(ret_value < 0 && udata->cont_msg_info->msgs) { + udata->cont_msg_info->msgs = (H5O_chunk_t *)H5FL_SEQ_FREE(H5O_cont_t, udata->cont_msg_info->msgs); + udata->cont_msg_info->alloc_nmsgs = 0; + } FUNC_LEAVE_NOAPI(ret_value) } /* H5O__chunk_deserialize() */ diff --git a/src/H5Ofill.c b/src/H5Ofill.c index 932241f..3ce2e4f 100644 --- a/src/H5Ofill.c +++ b/src/H5Ofill.c @@ -19,6 +19,7 @@ */ #include "H5Omodule.h" /* This source code file is part of the H5O module */ +#define H5T_FRIEND /*prevent warning from including H5Tpkg */ #include "H5private.h" /* Generic Functions */ @@ -31,6 +32,7 @@ #include "H5Pprivate.h" /* Property lists */ #include "H5Sprivate.h" /* Dataspaces */ +#include "H5Tpkg.h" /* Datatypes */ static void *H5O_fill_old_decode(H5F_t *f, H5O_t *open_oh, unsigned mesg_flags, unsigned *ioflags, size_t p_size, const uint8_t *p); @@ -307,11 +309,13 @@ done: *------------------------------------------------------------------------- */ static void * -H5O_fill_old_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, +H5O_fill_old_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, size_t H5_ATTR_UNUSED p_size, const uint8_t *p) { H5O_fill_t *fill = NULL; /* Decoded fill value message */ + htri_t exists = FALSE; + H5T_t *dt = NULL; void *ret_value = NULL; /* Return value */ FUNC_ENTER_NOAPI_NOINIT @@ -332,6 +336,19 @@ H5O_fill_old_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, /* Only decode the fill value itself if there is one */ if(fill->size > 0) { + H5_CHECK_OVERFLOW(fill->size, ssize_t, size_t); + + /* Get the datatype message */ + if((exists = H5O_msg_exists_oh(open_oh, H5O_DTYPE_ID)) < 0) + HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, NULL, "unable to read object header") + if(exists) { + if((dt = H5O_msg_read_oh(f, open_oh, H5O_DTYPE_ID, NULL)) < 0) + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, NULL, "can't read DTYPE message") + /* Verify size */ + if(fill->size != dt->shared->size) + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, NULL, "inconsistent fill value size") + } + if(NULL == (fill->buf = H5MM_malloc((size_t)fill->size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for fill value") HDmemcpy(fill->buf, p, (size_t)fill->size); @@ -344,6 +361,9 @@ H5O_fill_old_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, ret_value = (void*)fill; done: + if(dt) + H5O_msg_free(H5O_DTYPE_ID, dt); + if(!ret_value && fill) { if(fill->buf) H5MM_xfree(fill->buf); diff --git a/src/H5Olayout.c b/src/H5Olayout.c index d8f05f0..5f16837 100644 --- a/src/H5Olayout.c +++ b/src/H5Olayout.c @@ -125,8 +125,8 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, /* Dimensionality */ ndims = *p++; - if(ndims > H5O_LAYOUT_NDIMS) - HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large") + if(!ndims || ndims > H5O_LAYOUT_NDIMS) + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is out of range") /* Layout class */ mesg->type = (H5D_layout_t)*p++; diff --git a/src/H5Sselect.c b/src/H5Sselect.c index 4462295..8cd73d9 100644 --- a/src/H5Sselect.c +++ b/src/H5Sselect.c @@ -160,7 +160,7 @@ H5S_select_release(H5S_t *ds) HDassert(ds); /* Call the selection type's release function */ - if((ret_value = (*ds->select.type->release)(ds)) < 0) + if((ds->select.type) && ((ret_value = (*ds->select.type->release)(ds)) < 0)) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection") done: diff --git a/tools/src/h5stat/h5stat.c b/tools/src/h5stat/h5stat.c index da713ac..ff67cf1 100644 --- a/tools/src/h5stat/h5stat.c +++ b/tools/src/h5stat/h5stat.c @@ -146,7 +146,7 @@ struct handler_t { char **obj; }; -static const char *s_opts ="Aa:Ddm:FfhGgl:sSTO:V"; +static const char *s_opts ="Aa:Ddm:EFfhGgl:sSTO:V"; /* e.g. "filemetadata" has to precede "file"; "groupmetadata" has to precede "group" etc. */ static struct long_options l_opts[] = { {"help", no_arg, 'h'}, @@ -224,6 +224,7 @@ static struct long_options l_opts[] = { { "attr", no_arg, 'A' }, { "att", no_arg, 'A' }, { "at", no_arg, 'A' }, + { "enable-error-stack", no_arg, 'E' }, { "numattrs", require_arg, 'a' }, { "numattr", require_arg, 'a' }, { "numatt", require_arg, 'a' }, @@ -293,6 +294,7 @@ static void usage(const char *prog) HDfprintf(stdout, " than 0. The default threshold is 10.\n"); HDfprintf(stdout, " -s, --freespace Print free space information\n"); HDfprintf(stdout, " -S, --summary Print summary of file space information\n"); + HDfprintf(stdout, " --enable-error-stack Prints messages from the HDF5 error stack as they occur\n"); } @@ -378,9 +380,8 @@ attribute_stats(iter_t *iter, const H5O_info_t *oi) * * Purpose: Gather statistics about the group * - * Return: Success: 0 - * - * Failure: -1 + * Return: Success: 0 + * Failure: -1 * * Programmer: Quincey Koziol * Tuesday, August 16, 2005 @@ -402,9 +403,9 @@ attribute_stats(iter_t *iter, const H5O_info_t *oi) static herr_t group_stats(iter_t *iter, const char *name, const H5O_info_t *oi) { - H5G_info_t ginfo; /* Group information */ - unsigned bin; /* "bin" the number of objects falls in */ - herr_t ret; + H5G_info_t ginfo; /* Group information */ + unsigned bin; /* "bin" the number of objects falls in */ + herr_t ret_value = SUCCEED; /* Return value */ /* Gather statistics about this type of object */ iter->uniq_groups++; @@ -414,8 +415,8 @@ group_stats(iter_t *iter, const char *name, const H5O_info_t *oi) iter->group_ohdr_info.free_size += oi->hdr.space.free; /* Get group information */ - ret = H5Gget_info_by_name(iter->fid, name, &ginfo, H5P_DEFAULT); - HDassert(ret >= 0); + if((ret_value = H5Gget_info_by_name(iter->fid, name, &ginfo, H5P_DEFAULT)) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Gget_info_by_name() failed"); /* Update link stats */ /* Collect statistics for small groups */ @@ -429,10 +430,10 @@ group_stats(iter_t *iter, const char *name, const H5O_info_t *oi) bin = ceil_log10((unsigned long)ginfo.nlinks); if((bin + 1) > iter->group_nbins) { /* Allocate more storage for info about dataset's datatype */ - iter->group_bins = (unsigned long *)HDrealloc(iter->group_bins, (bin + 1) * sizeof(unsigned long)); - HDassert(iter->group_bins); + if((iter->group_bins = (unsigned long *)HDrealloc(iter->group_bins, (bin + 1) * sizeof(unsigned long))) == NULL) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Drealloc() failed"); - /* Initialize counts for intermediate bins */ + /* Initialize counts for intermediate bins */ while(iter->group_nbins < bin) iter->group_bins[iter->group_nbins++] = 0; iter->group_nbins++; @@ -448,10 +449,11 @@ group_stats(iter_t *iter, const char *name, const H5O_info_t *oi) iter->groups_heap_storage_size += oi->meta_size.obj.heap_size; /* Update attribute metadata info */ - ret = attribute_stats(iter, oi); - HDassert(ret >= 0); + if((ret_value = attribute_stats(iter, oi)) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "attribute_stats failed"); - return 0; +done: + return ret_value; } /* end group_stats() */ @@ -461,7 +463,6 @@ group_stats(iter_t *iter, const char *name, const H5O_info_t *oi) * Purpose: Gather statistics about the dataset * * Return: Success: 0 - * * Failure: -1 * * Programmer: Quincey Koziol @@ -472,22 +473,22 @@ group_stats(iter_t *iter, const char *name, const H5O_info_t *oi) static herr_t dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi) { - unsigned bin; /* "bin" the number of objects falls in */ - hid_t did; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t tid; /* Datatype ID */ - hid_t dcpl; /* Dataset creation property list ID */ - hsize_t dims[H5S_MAX_RANK];/* Dimensions of dataset */ - H5D_layout_t lout; /* Layout of dataset */ - unsigned type_found; /* Whether the dataset's datatype was */ - /* already found */ - int ndims; /* Number of dimensions of dataset */ - hsize_t storage; /* Size of dataset storage */ - unsigned u; /* Local index variable */ - int num_ext; /* Number of external files for a dataset */ - int nfltr; /* Number of filters for a dataset */ - H5Z_filter_t fltr; /* Filter identifier */ - herr_t ret; + unsigned bin; /* "bin" the number of objects falls in */ + hid_t did; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t tid; /* Datatype ID */ + hid_t dcpl; /* Dataset creation property list ID */ + hsize_t dims[H5S_MAX_RANK]; /* Dimensions of dataset */ + H5D_layout_t lout; /* Layout of dataset */ + unsigned type_found; /* Whether the dataset's datatype was */ + /* already found */ + int ndims; /* Number of dimensions of dataset */ + hsize_t storage; /* Size of dataset storage */ + unsigned u; /* Local index variable */ + int num_ext; /* Number of external files for a dataset */ + int nfltr; /* Number of filters for a dataset */ + H5Z_filter_t fltr; /* Filter identifier */ + herr_t ret_value = SUCCEED; /* Return value */ /* Gather statistics about this type of object */ iter->uniq_dsets++; @@ -496,26 +497,27 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi) iter->dset_ohdr_info.total_size += oi->hdr.space.total; iter->dset_ohdr_info.free_size += oi->hdr.space.free; - did = H5Dopen2(iter->fid, name, H5P_DEFAULT); - HDassert(did > 0); + if((did = H5Dopen2(iter->fid, name, H5P_DEFAULT)) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Dopen() failed"); /* Update dataset metadata info */ iter->datasets_index_storage_size += oi->meta_size.obj.index_size; iter->datasets_heap_storage_size += oi->meta_size.obj.heap_size; /* Update attribute metadata info */ - ret = attribute_stats(iter, oi); - HDassert(ret >= 0); + if((ret_value = attribute_stats(iter, oi)) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "attribute_stats() failed"); /* Get storage info */ - storage = H5Dget_storage_size(did); + if((storage = H5Dget_storage_size(did)) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Dget_storage_size() failed"); /* Gather layout statistics */ - dcpl = H5Dget_create_plist(did); - HDassert(dcpl > 0); + if((dcpl = H5Dget_create_plist(did)) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Dget_create_plist() failed"); - lout = H5Pget_layout(dcpl); - HDassert(lout >= 0); + if((lout = H5Pget_layout(dcpl)) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pget_layout() failed"); /* Object header's total size for H5D_COMPACT layout includes raw data size */ /* "storage" also includes H5D_COMPACT raw data size */ @@ -526,8 +528,8 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi) (iter->dset_layouts[lout])++; /* Get the number of external files for the dataset */ - num_ext = H5Pget_external_count(dcpl); - assert (num_ext >= 0); + if((num_ext = H5Pget_external_count(dcpl)) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pget_external_count() failed"); /* Accumulate raw data size accordingly */ if(num_ext) { @@ -537,11 +539,11 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi) iter->dset_storage_size += storage; /* Gather dataspace statistics */ - sid = H5Dget_space(did); - HDassert(sid > 0); + if((sid = H5Dget_space(did)) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Sget_space() failed"); - ndims = H5Sget_simple_extent_dims(sid, dims, NULL); - HDassert(ndims >= 0); + if((ndims = H5Sget_simple_extent_dims(sid, dims, NULL)) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Sget_simple_extent_dims() failed"); /* Check for larger rank of dataset */ if((unsigned)ndims > iter->max_dset_rank) @@ -552,38 +554,38 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi) /* Only gather dim size statistics on 1-D datasets */ if(ndims == 1) { - /* Determine maximum dimension size */ - if(dims[0] > iter->max_dset_dims) - iter->max_dset_dims = dims[0]; - /* Collect statistics for small datasets */ - if(dims[0] < (hsize_t)sdsets_threshold) - (iter->small_dset_dims[(size_t)dims[0]])++; - - /* Add dim count to proper bin */ - bin = ceil_log10((unsigned long)dims[0]); - if((bin + 1) > iter->dset_dim_nbins) { - /* Allocate more storage for info about dataset's datatype */ - iter->dset_dim_bins = (unsigned long *)HDrealloc(iter->dset_dim_bins, (bin + 1) * sizeof(unsigned long)); - HDassert(iter->dset_dim_bins); - - /* Initialize counts for intermediate bins */ - while(iter->dset_dim_nbins < bin) - iter->dset_dim_bins[iter->dset_dim_nbins++] = 0; - iter->dset_dim_nbins++; - - /* Initialize count for this bin */ - iter->dset_dim_bins[bin] = 1; + /* Determine maximum dimension size */ + if(dims[0] > iter->max_dset_dims) + iter->max_dset_dims = dims[0]; + /* Collect statistics for small datasets */ + if(dims[0] < (hsize_t)sdsets_threshold) + (iter->small_dset_dims[(size_t)dims[0]])++; + + /* Add dim count to proper bin */ + bin = ceil_log10((unsigned long)dims[0]); + if((bin + 1) > iter->dset_dim_nbins) { + /* Allocate more storage for info about dataset's datatype */ + if((iter->dset_dim_bins = (unsigned long *)HDrealloc(iter->dset_dim_bins, (bin + 1) * sizeof(unsigned long))) == NULL) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Drealloc() failed"); + + /* Initialize counts for intermediate bins */ + while(iter->dset_dim_nbins < bin) + iter->dset_dim_bins[iter->dset_dim_nbins++] = 0; + iter->dset_dim_nbins++; + + /* Initialize count for this bin */ + iter->dset_dim_bins[bin] = 1; } /* end if */ else (iter->dset_dim_bins[bin])++; } /* end if */ - ret = H5Sclose(sid); - HDassert(ret >= 0); + if(H5Sclose(sid) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Sclose() failed"); /* Gather datatype statistics */ - tid = H5Dget_type(did); - HDassert(tid > 0); + if((tid = H5Dget_type(did)) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Dget_type() failed"); type_found = FALSE; for(u = 0; u < iter->dset_ntypes; u++) @@ -591,6 +593,7 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi) type_found = TRUE; break; } /* end for */ + if(type_found) (iter->dset_type_info[u].count)++; else { @@ -600,12 +603,12 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi) iter->dset_ntypes++; /* Allocate more storage for info about dataset's datatype */ - iter->dset_type_info = (dtype_info_t *)HDrealloc(iter->dset_type_info, iter->dset_ntypes * sizeof(dtype_info_t)); - HDassert(iter->dset_type_info); + if((iter->dset_type_info = (dtype_info_t *)HDrealloc(iter->dset_type_info, iter->dset_ntypes * sizeof(dtype_info_t))) == NULL) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Drealloc() failed"); /* Initialize information about datatype */ - iter->dset_type_info[curr_ntype].tid = H5Tcopy(tid); - HDassert(iter->dset_type_info[curr_ntype].tid > 0); + if((iter->dset_type_info[curr_ntype].tid = H5Tcopy(tid)) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Tcopy() failed"); iter->dset_type_info[curr_ntype].count = 1; iter->dset_type_info[curr_ntype].named = 0; @@ -617,8 +620,8 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi) if(H5Tcommitted(tid) > 0) (iter->dset_type_info[u].named)++; - ret = H5Tclose(tid); - HDassert(ret >= 0); + if(H5Tclose(tid) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Tclose() failed"); /* Track different filters */ if((nfltr = H5Pget_nfilters(dcpl)) >= 0) { @@ -635,13 +638,14 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi) } /* end for */ } /* endif nfltr */ - ret = H5Pclose(dcpl); - HDassert(ret >= 0); + if(H5Pclose(dcpl) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pclose() failed"); - ret = H5Dclose(did); - HDassert(ret >= 0); + if(H5Dclose(did) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Dclose() failed"); - return 0; +done: + return ret_value; } /* end dataset_stats() */ @@ -660,7 +664,7 @@ dataset_stats(iter_t *iter, const char *name, const H5O_info_t *oi) static herr_t datatype_stats(iter_t *iter, const H5O_info_t *oi) { - herr_t ret; + herr_t ret_value = SUCCEED; /* Gather statistics about this type of object */ iter->uniq_dtypes++; @@ -670,10 +674,10 @@ datatype_stats(iter_t *iter, const H5O_info_t *oi) iter->dtype_ohdr_info.free_size += oi->hdr.space.free; /* Update attribute metadata info */ - ret = attribute_stats(iter, oi); - HDassert(ret >= 0); - - return 0; + if((ret_value = attribute_stats(iter, oi)) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "attribute_stats() failed"); +done: + return ret_value; } /* end datatype_stats() */ @@ -695,6 +699,7 @@ obj_stats(const char *path, const H5O_info_t *oi, const char *already_visited, void *_iter) { iter_t *iter = (iter_t *)_iter; + herr_t ret_value = SUCCEED; /* If the object has already been seen then just return */ if(NULL == already_visited) { @@ -704,15 +709,18 @@ obj_stats(const char *path, const H5O_info_t *oi, const char *already_visited, switch(oi->type) { case H5O_TYPE_GROUP: - group_stats(iter, path, oi); + if(group_stats(iter, path, oi) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "group_stats failed"); break; case H5O_TYPE_DATASET: - dataset_stats(iter, path, oi); + if(dataset_stats(iter, path, oi) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "dataset_stats failed"); break; case H5O_TYPE_NAMED_DATATYPE: - datatype_stats(iter, oi); + if(datatype_stats(iter, oi) < 0) + HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "datatype_stats failed"); break; case H5O_TYPE_UNKNOWN: @@ -724,7 +732,8 @@ obj_stats(const char *path, const H5O_info_t *oi, const char *already_visited, } /* end switch */ } /* end if */ - return 0; +done: + return ret_value; } /* end obj_stats() */ @@ -733,9 +742,8 @@ obj_stats(const char *path, const H5O_info_t *oi, const char *already_visited, * * Purpose: Gather statistics about a link * - * Return: Success: 0 - * - * Failure: -1 + * Return: Success: 0 + * Failure: -1 * * Programmer: Quincey Koziol * Tuesday, November 6, 2007 @@ -892,6 +900,10 @@ parse_command_line(int argc, const char *argv[], struct handler_t **hand_ret) goto done; break; + case 'E': + enable_error_stack = 1; + break; + case 'F': display_all = FALSE; display_file_metadata = TRUE; @@ -913,14 +925,14 @@ parse_command_line(int argc, const char *argv[], struct handler_t **hand_ret) break; case 'l': - if(opt_arg) { - sgroups_threshold = HDatoi(opt_arg); - if(sgroups_threshold < 1) { - error_msg("Invalid threshold for small groups\n"); - goto error; - } - } else - error_msg("Missing threshold for small groups\n"); + if(opt_arg) { + sgroups_threshold = HDatoi(opt_arg); + if(sgroups_threshold < 1) { + error_msg("Invalid threshold for small groups\n"); + goto error; + } + } else + error_msg("Missing threshold for small groups\n"); break; @@ -935,14 +947,14 @@ parse_command_line(int argc, const char *argv[], struct handler_t **hand_ret) break; case 'm': - if(opt_arg) { - sdsets_threshold = HDatoi(opt_arg); - if(sdsets_threshold < 1) { - error_msg("Invalid threshold for small datasets\n"); - goto error; - } - } else - error_msg("Missing threshold for small datasets\n"); + if(opt_arg) { + sdsets_threshold = HDatoi(opt_arg); + if(sdsets_threshold < 1) { + error_msg("Invalid threshold for small datasets\n"); + goto error; + } + } else + error_msg("Missing threshold for small datasets\n"); break; @@ -957,13 +969,13 @@ parse_command_line(int argc, const char *argv[], struct handler_t **hand_ret) break; case 'a': - if(opt_arg) { - sattrs_threshold = HDatoi(opt_arg); - if(sattrs_threshold < 1) { - error_msg("Invalid threshold for small # of attributes\n"); - goto error; - } - } else + if(opt_arg) { + sattrs_threshold = HDatoi(opt_arg); + if(sattrs_threshold < 1) { + error_msg("Invalid threshold for small # of attributes\n"); + goto error; + } + } else error_msg("Missing threshold for small # of attributes\n"); break; @@ -1249,9 +1261,8 @@ print_group_info(const iter_t *iter) * * Purpose: Prints file space information for groups' metadata * - * Return: Success: 0 - * - * Failure: Never fails + * Return: Success: 0 + * Failure: Never fails * * Programmer: Vailin Choi; October 2009 * @@ -1277,9 +1288,8 @@ print_group_metadata(const iter_t *iter) * * Purpose: Prints information about datasets in the file * - * Return: Success: 0 - * - * Failure: Never fails + * Return: Success: 0 + * Failure: Never fails * * Programmer: Elena Pourmal * Saturday, August 12, 2006 @@ -1340,7 +1350,7 @@ print_dataset_info(const iter_t *iter) printf("Dataset layout information:\n"); for(u = 0; u < H5D_NLAYOUTS; u++) - printf("\tDataset layout counts[%s]: %lu\n", (u == H5D_COMPACT ? "COMPACT" : + printf("\tDataset layout counts[%s]: %lu\n", (u == H5D_COMPACT ? "COMPACT" : (u == H5D_CONTIGUOUS ? "CONTIG" : (u == H5D_CHUNKED ? "CHUNKED" : "VIRTUAL"))), iter->dset_layouts[u]); printf("\tNumber of external files : %lu\n", iter->nexternal); @@ -1704,16 +1714,25 @@ main(int argc, const char *argv[]) iter_t iter; const char *fname = NULL; hid_t fid = -1; + H5E_auto2_t func; + H5E_auto2_t tools_func; + void *edata; + void *tools_edata; struct handler_t *hand = NULL; h5tools_setprogname(PROGRAMNAME); h5tools_setstatus(EXIT_SUCCESS); /* Disable error reporting */ + H5Eget_auto2(H5E_DEFAULT, &func, &edata); H5Eset_auto2(H5E_DEFAULT, NULL, NULL); /* Initialize h5tools lib */ h5tools_init(); + + /* Disable tools error reporting */ + H5Eget_auto2(H5tools_ERR_STACK_g, &tools_func, &tools_edata); + H5Eset_auto2(H5tools_ERR_STACK_g, NULL, NULL); HDmemset(&iter, 0, sizeof(iter)); @@ -1722,6 +1741,11 @@ main(int argc, const char *argv[]) fname = argv[opt_ind]; + if(enable_error_stack > 0) { + H5Eset_auto2(H5E_DEFAULT, func, edata); + H5Eset_auto2(H5tools_ERR_STACK_g, tools_func, tools_edata); + } + /* Check for filename given */ if(fname) { hid_t fcpl; @@ -1788,16 +1812,18 @@ main(int argc, const char *argv[]) unsigned u; for(u = 0; u < hand->obj_count; u++) { - if(h5trav_visit(fid, hand->obj[u], TRUE, TRUE, obj_stats, lnk_stats, &iter, H5O_INFO_ALL) < 0) - warn_msg("Unable to traverse object \"%s\"\n", hand->obj[u]); - else + if(h5trav_visit(fid, hand->obj[u], TRUE, TRUE, obj_stats, lnk_stats, &iter, H5O_INFO_ALL) < 0) { + error_msg("unable to traverse object \"%s\"\n", hand->obj[u]); + h5tools_setstatus(EXIT_FAILURE); + } else print_statistics(hand->obj[u], &iter); } /* end for */ } /* end if */ else { - if(h5trav_visit(fid, "/", TRUE, TRUE, obj_stats, lnk_stats, &iter, H5O_INFO_ALL) < 0) - warn_msg("Unable to traverse objects/links in file \"%s\"\n", fname); - else + if(h5trav_visit(fid, "/", TRUE, TRUE, obj_stats, lnk_stats, &iter, H5O_INFO_ALL) < 0) { + error_msg("unable to traverse objects/links in file \"%s\"\n", fname); + h5tools_setstatus(EXIT_FAILURE); + } else print_statistics("/", &iter); } /* end else */ } /* end if */ @@ -1813,6 +1839,8 @@ done: h5tools_setstatus(EXIT_FAILURE); } /* end if */ + H5Eset_auto2(H5E_DEFAULT, func, edata); + leave(h5tools_getstatus()); } /* end main() */ diff --git a/tools/test/h5dump/h5dumpgentest.c b/tools/test/h5dump/h5dumpgentest.c index 9358fbb..f34d479 100644 --- a/tools/test/h5dump/h5dumpgentest.c +++ b/tools/test/h5dump/h5dumpgentest.c @@ -113,6 +113,7 @@ #define FILE83 "tvlenstr_array.h5" #define FILE84 "tudfilter.h5" #define FILE85 "tgrpnullspace.h5" +#define FILE86 "err_attr_dspace.h5" /*------------------------------------------------------------------------- * prototypes @@ -10476,6 +10477,89 @@ static void gent_null_space_group(void) H5Fclose(fid); } +/*------------------------------------------------------------------------- + * Function: gent_err_attr_dspace + * + * Purpose: Generate a file with shared dataspace message. + * Then write an illegal version to the shared dataspace message + * to trigger the error. + * This is to verify HDFFV-10333 that h5dump will exit + * gracefully when encountered error similar to + * H5O_attr_decode in the jira issue. + * + *------------------------------------------------------------------------- + */ +static void +gent_err_attr_dspace() +{ + hid_t fid = -1; /* File identifier */ + hid_t fcpl = -1; /* File access property list */ + hid_t sid; /* Dataspace identifier */ + hid_t aid; /* Attribute identifier */ + hsize_t dims = 2; /* Dimensino size */ + int wdata[2] = {7, 42}; /* The buffer to write */ + int fd = -1; /* The file descriptor */ + char val = 6; /* An invalid version */ + + /* Create an fcpl */ + if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0) + goto error; + + /* Set up the dataspace message to be shared */ + if(H5Pset_shared_mesg_nindexes(fcpl, 1) < 0) + goto error; + if(H5Pset_shared_mesg_index(fcpl, 0, H5O_SHMESG_SDSPACE_FLAG, 1) < 0) + goto error; + + /* Create the file with the shared message setting */ + if((fid = H5Fcreate(FILE86, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT)) < 0) + goto error; + + /* Create the dataspace */ + if((sid = H5Screate_simple(1, &dims, &dims)) < 0) + goto error; + + /* Create an attribute with shared dataspace */ + if((aid = H5Acreate2(fid, "attribute", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) + goto error; + if(H5Awrite(aid, H5T_NATIVE_INT, wdata) < 0) + goto error; + + /* Closing */ + if(H5Aclose(aid) < 0) + goto error; + if(H5Sclose(sid) < 0) + goto error; + if(H5Pclose(fcpl) < 0) + goto error; + if(H5Fclose(fid) < 0) + goto error; + + /* This section of code will write an illegal version to the "version" field + of the shared dataspace message */ + if((fd = HDopen(FILE86, O_RDWR, 0633)) < 0) + goto error; + + /* Offset of the "version" field to modify is as follows: */ + /* 1916: offset of the object header containing the attribute message */ + /* 32: offset of the attribute message in the object header */ + /* 30: offset in the attribute message containing the version of the shared dataspace message */ + if(HDlseek(fd, 1916+32+30, SEEK_SET) < 0) + goto error; + if(HDwrite(fd, &val, 1) < 0) + goto error; + if(HDclose(fd) < 0) + goto error; + +error: + H5E_BEGIN_TRY { + H5Pclose(fcpl); + H5Aclose(aid); + H5Sclose(sid); + H5Fclose(fid); + } H5E_END_TRY; +} /* gen_err_attr_dspace() */ + int main(void) { gent_group(); @@ -10569,6 +10653,8 @@ int main(void) gent_udfilter(); + gent_err_attr_dspace(); + return 0; } diff --git a/tools/test/h5dump/testh5dump.sh.in b/tools/test/h5dump/testh5dump.sh.in index 1935b0d..a2ec035 100644 --- a/tools/test/h5dump/testh5dump.sh.in +++ b/tools/test/h5dump/testh5dump.sh.in @@ -175,6 +175,7 @@ $SRC_H5DUMP_TESTFILES/tvldtypes5.h5 $SRC_H5DUMP_TESTFILES/tvlenstr_array.h5 $SRC_H5DUMP_TESTFILES/tvlstr.h5 $SRC_H5DUMP_TESTFILES/tvms.h5 +$SRC_H5DUMP_TESTFILES/err_attr_dspace.h5 " LIST_OTHER_TEST_FILES=" @@ -360,6 +361,7 @@ $SRC_H5DUMP_TESTFILES/twithddlfile.exp $SRC_H5DUMP_TESTFILES/h5dump-help.txt $SRC_H5DUMP_TESTFILES/out3.h5import $SRC_H5DUMP_TESTFILES/tbinregR.exp +$SRC_H5DUMP_TESTFILES/err_attr_dspace.ddl " LIST_ERROR_TEST_FILES=" @@ -1322,6 +1324,9 @@ TOOLTEST tattrreg.ddl --enable-error-stack tattrreg.h5 TOOLTEST4 tattrregR.ddl --enable-error-stack -R tattrreg.h5 TOOLTEST2 tbinregR.exp --enable-error-stack -d /Dataset1 -s 0 -R -y -o tbinregR.txt tdatareg.h5 +# test to verify HDFFV-10333: error similar to H5O_attr_decode in the jira issue +TOOLTEST err_attr_dspace.ddl err_attr_dspace.h5 + # Clean up text output files if test -z "$HDF5_NOCLEANUP"; then rm -f tbinregR.txt diff --git a/tools/test/h5stat/h5stat_gentest.c b/tools/test/h5stat/h5stat_gentest.c index 0f696d0..01206dd 100644 --- a/tools/test/h5stat/h5stat_gentest.c +++ b/tools/test/h5stat/h5stat_gentest.c @@ -21,6 +21,7 @@ * of the expected output and update the corresponding *.ddl files. */ #include "hdf5.h" +#include "H5private.h" /* For gen_newgrat_file() */ #define NEWGRAT_FILE "h5stat_newgrat.h5" @@ -43,6 +44,9 @@ #define THRES_NUM 10 #define THRES_NUM_25 25 +/* For gen_err_refcount() */ +#define ERR_REFCOUNT_FILE "h5stat_err_refcount.h5" + /* * Generate HDF5 file with latest format with * NUM_GRPS groups and NUM_ATTRS attributes for the dataset @@ -434,6 +438,141 @@ error: } /* gen_idx_file() */ +/* + * Function: gen_err_refcount_file + * + * Purpose: Create a file with a refcount message ID. + * Then a refcount message ID is written to a + * message in a version 1 object header. + * This will trigger the error as a version 1 + * object header does not support a refcount message. + * This is to verify HDFFV-10333 that h5stat will exit + * gracefully when encountered error similar to + * H5O_refcount_decode in the jira issue. + * + */ +static void +gen_err_refcount(const char *fname) +{ + hid_t fid = -1; /* File identifier */ + hid_t fapl = -1; /* File access property list */ + hid_t sid = -1; /* Dataspace message */ + hid_t did = -1; /* Dataset identifier */ + hid_t gid = -1; /* Group identifier */ + hid_t aid1 = -1, aid2 = -1; /* Attribute identifier */ + hid_t tid = -1; /* Datatype identifier */ + int i, n; /* Local index variables */ + int buf[10]; /* Data buffer */ + hsize_t dims[1]; /* Dimension size */ + int fd = -1; /* File descriptor */ + unsigned short val = 22; /* The refcount message ID */ + + /* Initialize data buffer */ + n = 0; + for(i = 0; i < 10; i++) + buf[i] = n++; + + /* Create the file */ + if((fid = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) + goto error; + + /* Create a group */ + if((gid = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + goto error; + + /* Create a committed datatype in the group */ + if((tid = H5Tcopy(H5T_NATIVE_INT)) < 0) + goto error; + if(H5Tcommit2(gid, "dtype", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) + goto error; + + /* Create the dataspace */ + dims[0] = 10; + if((sid = H5Screate_simple(1, dims, NULL)) < 0) + goto error; + + /* Create a dataset with the committed datatype in the file */ + if((did = H5Dcreate2(fid, "dset", tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + goto error; + /* Write to the dataset */ + if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) + goto error; + + /* Attach an attribute with the committed datatype to the group */ + if((aid1 = H5Acreate2(gid, "attr", tid, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) + goto error; + /* Attach an attribute with the committed datatype to the dataset */ + if((aid2 = H5Acreate2(did, "attr", tid, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) + goto error; + + /* Closing */ + if(H5Aclose(aid1) < 0) + goto error; + if(H5Aclose(aid2) < 0) + goto error; + if(H5Sclose(sid) < 0) + goto error; + if(H5Dclose(did) < 0) + goto error; + if(H5Gclose(gid) < 0) + goto error; + if(H5Tclose(tid) < 0) + goto error; + if(H5Fclose(fid) < 0) + goto error; + + /* This section of code will write a refcount message ID to a message in the + version 1 object header which does not support a refcount message */ + /* Offset of the message ID to modify is as follows: */ + /* 4520: the offset of the object header containing the attribute message + with the committed datatype */ + /* 24: the offset in the object header containing the version of the + attribute message */ + if((fd = HDopen(fname, O_RDWR, 0633)) < 0) + goto error; + if(HDlseek(fd, 4520+24, SEEK_SET) < 0) + goto error; + if(HDwrite(fd, &val, 2) < 0) + goto error; + if(HDclose(fd) < 0) + goto error; + +error: + H5E_BEGIN_TRY { + H5Gclose(gid); + H5Dclose(did); + H5Tclose(tid); + H5Sclose(sid); + H5Aclose(aid1); + H5Aclose(aid2); + H5Fclose(fid); + } H5E_END_TRY; +} /* gen_err_refcount() */ + +/* + * The following two test files are generated with older versions + * of the library for HDFFV-10333. They are used for testing in + * testh5stat.sh.in. + * + * (1) h5stat_err_old_layout.h5 + * This file is generated with the 1.6 library so that a file + * with a version 2 layout message is created. + * Then a "0" is written to the "dimension" field in the layout + * message to trigger the error. + * This is to verify HDFFV-10333 that h5stat will exit gracefully + * when encountered error similar to H5O__layout_decode in the + * jira issue. + * + * (2) h5stat_err_old_fill.h5 + * This file is generated with the 1.4 library so that a file + * with an old fill value message is created. + * Then an illegal size is written to the "size" fild in the + * fill value message to trigger the error. + * This is to verify HDFFV-10333 that h5stat will exit gracefully + * when encountered error similar to H5O_fill_old_decode in the + * jira issue. + */ + int main(void) { gen_newgrat_file(NEWGRAT_FILE); @@ -442,6 +581,9 @@ int main(void) /* Generate an HDF file to test for datasets with Fixed Array indexing */ gen_idx_file(IDX_FILE); + /* Generate a file with a refcount message ID */ + gen_err_refcount(ERR_REFCOUNT_FILE); + return 0; } diff --git a/tools/test/h5stat/testfiles/h5stat_err_old_fill.ddl b/tools/test/h5stat/testfiles/h5stat_err_old_fill.ddl new file mode 100644 index 0000000..e751b7f --- /dev/null +++ b/tools/test/h5stat/testfiles/h5stat_err_old_fill.ddl @@ -0,0 +1,2 @@ +Filename: h5stat_err_old_fill.h5 +h5stat error: unable to traverse objects/links in file "h5stat_err_old_fill.h5" diff --git a/tools/test/h5stat/testfiles/h5stat_err_old_fill.h5 b/tools/test/h5stat/testfiles/h5stat_err_old_fill.h5 new file mode 100644 index 0000000..aa164f0 Binary files /dev/null and b/tools/test/h5stat/testfiles/h5stat_err_old_fill.h5 differ diff --git a/tools/test/h5stat/testfiles/h5stat_err_old_layout.ddl b/tools/test/h5stat/testfiles/h5stat_err_old_layout.ddl new file mode 100644 index 0000000..a3e27e2 --- /dev/null +++ b/tools/test/h5stat/testfiles/h5stat_err_old_layout.ddl @@ -0,0 +1,2 @@ +Filename: h5stat_err_old_layout.h5 +h5stat error: unable to traverse objects/links in file "h5stat_err_old_layout.h5" diff --git a/tools/test/h5stat/testfiles/h5stat_err_old_layout.h5 b/tools/test/h5stat/testfiles/h5stat_err_old_layout.h5 new file mode 100644 index 0000000..5c0b5dc Binary files /dev/null and b/tools/test/h5stat/testfiles/h5stat_err_old_layout.h5 differ diff --git a/tools/test/h5stat/testfiles/h5stat_err_refcount.ddl b/tools/test/h5stat/testfiles/h5stat_err_refcount.ddl new file mode 100644 index 0000000..1f1b491 --- /dev/null +++ b/tools/test/h5stat/testfiles/h5stat_err_refcount.ddl @@ -0,0 +1,2 @@ +Filename: h5stat_err_refcount.h5 +h5stat error: unable to traverse objects/links in file "h5stat_err_refcount.h5" diff --git a/tools/test/h5stat/testfiles/h5stat_err_refcount.h5 b/tools/test/h5stat/testfiles/h5stat_err_refcount.h5 new file mode 100644 index 0000000..5e0d5f9 Binary files /dev/null and b/tools/test/h5stat/testfiles/h5stat_err_refcount.h5 differ diff --git a/tools/test/h5stat/testfiles/h5stat_help1.ddl b/tools/test/h5stat/testfiles/h5stat_help1.ddl index d2a8715..01e39af 100644 --- a/tools/test/h5stat/testfiles/h5stat_help1.ddl +++ b/tools/test/h5stat/testfiles/h5stat_help1.ddl @@ -22,3 +22,4 @@ Usage: h5stat [OPTIONS] file than 0. The default threshold is 10. -s, --freespace Print free space information -S, --summary Print summary of file space information + --enable-error-stack Prints messages from the HDF5 error stack as they occur diff --git a/tools/test/h5stat/testfiles/h5stat_help2.ddl b/tools/test/h5stat/testfiles/h5stat_help2.ddl index d2a8715..01e39af 100644 --- a/tools/test/h5stat/testfiles/h5stat_help2.ddl +++ b/tools/test/h5stat/testfiles/h5stat_help2.ddl @@ -22,3 +22,4 @@ Usage: h5stat [OPTIONS] file than 0. The default threshold is 10. -s, --freespace Print free space information -S, --summary Print summary of file space information + --enable-error-stack Prints messages from the HDF5 error stack as they occur diff --git a/tools/test/h5stat/testfiles/h5stat_nofile.ddl b/tools/test/h5stat/testfiles/h5stat_nofile.ddl index d8a8b2c..7171320 100644 --- a/tools/test/h5stat/testfiles/h5stat_nofile.ddl +++ b/tools/test/h5stat/testfiles/h5stat_nofile.ddl @@ -22,4 +22,5 @@ Usage: h5stat [OPTIONS] file than 0. The default threshold is 10. -s, --freespace Print free space information -S, --summary Print summary of file space information + --enable-error-stack Prints messages from the HDF5 error stack as they occur h5stat error: missing file name diff --git a/tools/test/h5stat/testh5stat.sh.in b/tools/test/h5stat/testh5stat.sh.in index ca7ca4c..d178b0d 100644 --- a/tools/test/h5stat/testh5stat.sh.in +++ b/tools/test/h5stat/testh5stat.sh.in @@ -74,6 +74,9 @@ $SRC_H5STAT_TESTFILES/h5stat_tsohm.h5 $SRC_H5STAT_TESTFILES/h5stat_newgrat.h5 $SRC_H5STAT_TESTFILES/h5stat_idx.h5 $SRC_H5STAT_TESTFILES/h5stat_threshold.h5 +$SRC_H5STAT_TESTFILES/h5stat_err_refcount.h5 +$SRC_H5STAT_TESTFILES/h5stat_err_old_layout.h5 +$SRC_H5STAT_TESTFILES/h5stat_err_old_fill.h5 " LIST_OTHER_TEST_FILES=" @@ -109,6 +112,9 @@ $SRC_H5STAT_TESTFILES/h5stat_numattrs1.ddl $SRC_H5STAT_TESTFILES/h5stat_numattrs2.ddl $SRC_H5STAT_TESTFILES/h5stat_numattrs3.ddl $SRC_H5STAT_TESTFILES/h5stat_numattrs4.ddl +$SRC_H5STAT_TESTFILES/h5stat_err_refcount.ddl +$SRC_H5STAT_TESTFILES/h5stat_err_old_layout.ddl +$SRC_H5STAT_TESTFILES/h5stat_err_old_fill.ddl " # @@ -243,6 +249,7 @@ TOOLTEST h5stat_help2.ddl --help TOOLTEST h5stat_notexist.ddl notexist.h5 TOOLTEST h5stat_nofile.ddl '' + # Test file with groups, compressed datasets, user-applied fileters, etc. # h5stat_filters.h5 is a copy of ../../testfiles/tfilters.h5 as of release 1.8.0-alpha4 TOOLTEST h5stat_filters.ddl h5stat_filters.h5 @@ -304,7 +311,13 @@ TOOLTEST h5stat_numattrs3.ddl -A --numattrs=25 h5stat_threshold.h5 # -A -a 100 TOOLTEST h5stat_numattrs4.ddl -A -a 100 h5stat_newgrat.h5 # - +# +# Tests to verify HDFFV-10333 +TOOLTEST h5stat_err_refcount.ddl h5stat_err_refcount.h5 +TOOLTEST h5stat_err_old_layout.ddl h5stat_err_old_layout.h5 +TOOLTEST h5stat_err_old_fill.ddl h5stat_err_old_fill.h5 +# +# # Clean up temporary files/directories CLEAN_TESTFILES_AND_TESTDIR diff --git a/tools/testfiles/err_attr_dspace.ddl b/tools/testfiles/err_attr_dspace.ddl new file mode 100644 index 0000000..6c45322 --- /dev/null +++ b/tools/testfiles/err_attr_dspace.ddl @@ -0,0 +1,5 @@ +HDF5 "err_attr_dspace.h5" { +GROUP "/" { +} +} +h5dump error: error getting attribute information diff --git a/tools/testfiles/err_attr_dspace.h5 b/tools/testfiles/err_attr_dspace.h5 new file mode 100644 index 0000000..969c328 Binary files /dev/null and b/tools/testfiles/err_attr_dspace.h5 differ -- cgit v0.12 From d1f8ac331222351a63147ab54271ac9d5b271f0b Mon Sep 17 00:00:00 2001 From: Vailin Choi Date: Wed, 11 Jul 2018 16:02:51 -0500 Subject: Modifications made based on feedback from pull request. --- MANIFEST | 8 ++++++++ release_docs/RELEASE.txt | 11 +++++++++++ tools/test/h5dump/CMakeTests.cmake | 5 +++++ tools/test/h5dump/testh5dump.sh.in | 6 +++--- tools/test/h5stat/CMakeTests.cmake | 18 +++++++++++++++--- tools/test/h5stat/testh5stat.sh.in | 14 ++++++++------ 6 files changed, 50 insertions(+), 12 deletions(-) diff --git a/MANIFEST b/MANIFEST index 9ab179f..e5f06cf 100644 --- a/MANIFEST +++ b/MANIFEST @@ -1562,6 +1562,12 @@ ./tools/test/h5stat/testfiles/h5stat_err1_links.ddl ./tools/test/h5stat/testfiles/h5stat_err1_numattrs.ddl ./tools/test/h5stat/testfiles/h5stat_err2_numattrs.ddl +./tools/test/h5stat/testfiles/h5stat_err_old_fill.h5 +./tools/test/h5stat/testfiles/h5stat_err_old_fill.ddl +./tools/test/h5stat/testfiles/h5stat_err_old_layout.h5 +./tools/test/h5stat/testfiles/h5stat_err_old_layout.ddl +./tools/test/h5stat/testfiles/h5stat_err_refcount.h5 +./tools/test/h5stat/testfiles/h5stat_err_refcount.ddl ./tools/test/h5stat/testfiles/h5stat_filters.ddl ./tools/test/h5stat/testfiles/h5stat_filters-d.ddl ./tools/test/h5stat/testfiles/h5stat_filters-dT.ddl @@ -1597,6 +1603,8 @@ # h5dump test files ./tools/testfiles/charsets.h5 ./tools/testfiles/charsets.ddl +./tools/testfiles/err_attr_dspace.h5 +./tools/testfiles/err_attr_dspace.ddl ./tools/testfiles/family_file00000.h5 ./tools/testfiles/family_file00001.h5 ./tools/testfiles/family_file00002.h5 diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 48fbef0..1204c0b 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -98,6 +98,17 @@ Bug Fixes since HDF5-1.10.2 release Library ------- + - Error checks in h5stat and when decoding messages + + h5stat exited with seg fault/core dumped when + errors are encountered in the internal library. + + Add error checks and --enable-error-stack option to h5stat. + Add range checks when decoding messages: old fill value, old + layout and refcount. + + (VC - 2018/07/11, HDFFV-10333) + - If an HDF5 file contains a malformed compound datatype with a suitably large offset, the type conversion code can run off the end of the type conversion buffer, causing a segmentation diff --git a/tools/test/h5dump/CMakeTests.cmake b/tools/test/h5dump/CMakeTests.cmake index cdd3e6d..132fc2b 100644 --- a/tools/test/h5dump/CMakeTests.cmake +++ b/tools/test/h5dump/CMakeTests.cmake @@ -21,6 +21,7 @@ # -------------------------------------------------------------------- set (HDF5_REFERENCE_FILES ${HDF5_TOOLS_DIR}/testfiles/charsets.ddl + ${HDF5_TOOLS_DIR}/testfiles/err_attr_dspace.ddl ${HDF5_TOOLS_DIR}/testfiles/file_space.ddl ${HDF5_TOOLS_DIR}/testfiles/filter_fail.ddl ${HDF5_TOOLS_DIR}/testfiles/non_existing.ddl @@ -217,6 +218,7 @@ ) set (HDF5_REFERENCE_TEST_FILES ${HDF5_TOOLS_DIR}/testfiles/charsets.h5 + ${HDF5_TOOLS_DIR}/testfiles/err_attr_dspace.h5 ${HDF5_TOOLS_DIR}/testfiles/file_space.h5 ${HDF5_TOOLS_DIR}/testfiles/filter_fail.h5 ${HDF5_TOOLS_DIR}/testfiles/packedbits.h5 @@ -1533,6 +1535,9 @@ # test for non-existing file ADD_H5_TEST (non_existing 1 --enable-error-stack tgroup.h5 non_existing.h5) + # test to verify HDFFV-10333: error similar to H5O_attr_decode in the jira issue + ADD_H5_TEST (err_attr_dspace 1 err_attr_dspace.h5) + ############################################################################## ### P L U G I N T E S T S ############################################################################## diff --git a/tools/test/h5dump/testh5dump.sh.in b/tools/test/h5dump/testh5dump.sh.in index a2ec035..42e4b07 100644 --- a/tools/test/h5dump/testh5dump.sh.in +++ b/tools/test/h5dump/testh5dump.sh.in @@ -1324,9 +1324,6 @@ TOOLTEST tattrreg.ddl --enable-error-stack tattrreg.h5 TOOLTEST4 tattrregR.ddl --enable-error-stack -R tattrreg.h5 TOOLTEST2 tbinregR.exp --enable-error-stack -d /Dataset1 -s 0 -R -y -o tbinregR.txt tdatareg.h5 -# test to verify HDFFV-10333: error similar to H5O_attr_decode in the jira issue -TOOLTEST err_attr_dspace.ddl err_attr_dspace.h5 - # Clean up text output files if test -z "$HDF5_NOCLEANUP"; then rm -f tbinregR.txt @@ -1369,6 +1366,9 @@ TOOLTEST2 tall-6.exp --enable-error-stack -y -o tall-6.txt -d /g1/g1.1/dset1.1.1 # test for non-existing file TOOLTEST3 non_existing.ddl --enable-error-stack tgroup.h5 non_existing.h5 +# test to verify HDFFV-10333: error similar to H5O_attr_decode in the jira issue +TOOLTEST err_attr_dspace.ddl err_attr_dspace.h5 + # Clean up temporary files/directories CLEAN_TESTFILES_AND_TESTDIR diff --git a/tools/test/h5stat/CMakeTests.cmake b/tools/test/h5stat/CMakeTests.cmake index 69bab20..bd55ac1 100644 --- a/tools/test/h5stat/CMakeTests.cmake +++ b/tools/test/h5stat/CMakeTests.cmake @@ -20,6 +20,9 @@ # Copy all the HDF5 files from the test directory into the source directory # -------------------------------------------------------------------- set (HDF5_REFERENCE_FILES + h5stat_err_refcount + h5stat_err_old_layout + h5stat_err_old_fill h5stat_help1 h5stat_help2 h5stat_notexist @@ -54,6 +57,9 @@ h5stat_numattrs4 ) set (HDF5_REFERENCE_TEST_FILES + h5stat_err_refcount.h5 + h5stat_err_old_layout.h5 + h5stat_err_old_fill.h5 h5stat_filters.h5 h5stat_idx.h5 h5stat_tsohm.h5 @@ -160,9 +166,7 @@ ADD_H5_TEST (h5stat_newgrat-UG 0 -G h5stat_newgrat.h5) ADD_H5_TEST (h5stat_newgrat-UA 0 -A h5stat_newgrat.h5) # h5stat_idx.h5 is generated by h5stat_gentest.c - if (HDF5_BUILD_GENERATORS) - ADD_H5_TEST (h5stat_idx 0 h5stat_idx.h5) - endif () + ADD_H5_TEST (h5stat_idx 0 h5stat_idx.h5) # # Tests for -l (--links) option on h5stat_threshold.h5: # -l 0 (incorrect threshold value) @@ -204,3 +208,11 @@ # -A -a 100 ADD_H5_TEST (h5stat_numattrs4 0 -A -a 100 h5stat_newgrat.h5) # +# Tests to verify HDFFV-10333: +# h5stat_err_refcount.h5 is generated by h5stat_gentest.c +# h5stat_err_old_layout.h5 and h5stat_err_old_fill.h5: see explanation in h5stat_gentest.c + ADD_H5_TEST (h5stat_err_refcount 1 h5stat_err_refcount.h5) + ADD_H5_TEST (h5stat_err_old_layout 1 h5stat_err_old_layout.h5) + ADD_H5_TEST (h5stat_err_old_fill 1 h5stat_err_old_fill.h5) +# +# diff --git a/tools/test/h5stat/testh5stat.sh.in b/tools/test/h5stat/testh5stat.sh.in index d178b0d..5082daf 100644 --- a/tools/test/h5stat/testh5stat.sh.in +++ b/tools/test/h5stat/testh5stat.sh.in @@ -69,17 +69,20 @@ test -d $TESTDIR || mkdir $TESTDIR # Comment '#' without space can be used. # -------------------------------------------------------------------- LIST_HDF5_TEST_FILES=" +$SRC_H5STAT_TESTFILES/h5stat_err_refcount.h5 +$SRC_H5STAT_TESTFILES/h5stat_err_old_layout.h5 +$SRC_H5STAT_TESTFILES/h5stat_err_old_fill.h5 $SRC_H5STAT_TESTFILES/h5stat_filters.h5 $SRC_H5STAT_TESTFILES/h5stat_tsohm.h5 $SRC_H5STAT_TESTFILES/h5stat_newgrat.h5 $SRC_H5STAT_TESTFILES/h5stat_idx.h5 $SRC_H5STAT_TESTFILES/h5stat_threshold.h5 -$SRC_H5STAT_TESTFILES/h5stat_err_refcount.h5 -$SRC_H5STAT_TESTFILES/h5stat_err_old_layout.h5 -$SRC_H5STAT_TESTFILES/h5stat_err_old_fill.h5 " LIST_OTHER_TEST_FILES=" +$SRC_H5STAT_TESTFILES/h5stat_err_refcount.ddl +$SRC_H5STAT_TESTFILES/h5stat_err_old_layout.ddl +$SRC_H5STAT_TESTFILES/h5stat_err_old_fill.ddl $SRC_H5STAT_TESTFILES/h5stat_help1.ddl $SRC_H5STAT_TESTFILES/h5stat_help2.ddl $SRC_H5STAT_TESTFILES/h5stat_notexist.ddl @@ -112,9 +115,6 @@ $SRC_H5STAT_TESTFILES/h5stat_numattrs1.ddl $SRC_H5STAT_TESTFILES/h5stat_numattrs2.ddl $SRC_H5STAT_TESTFILES/h5stat_numattrs3.ddl $SRC_H5STAT_TESTFILES/h5stat_numattrs4.ddl -$SRC_H5STAT_TESTFILES/h5stat_err_refcount.ddl -$SRC_H5STAT_TESTFILES/h5stat_err_old_layout.ddl -$SRC_H5STAT_TESTFILES/h5stat_err_old_fill.ddl " # @@ -313,6 +313,8 @@ TOOLTEST h5stat_numattrs4.ddl -A -a 100 h5stat_newgrat.h5 # # # Tests to verify HDFFV-10333 +# h5stat_err_refcount.h5 is generated by h5stat_gentest.c +# h5stat_err_old_layout.h5 and h5stat_err_old_fill.h5: see explanation in h5stat_gentest.c TOOLTEST h5stat_err_refcount.ddl h5stat_err_refcount.h5 TOOLTEST h5stat_err_old_layout.ddl h5stat_err_old_layout.h5 TOOLTEST h5stat_err_old_fill.ddl h5stat_err_old_fill.h5 -- cgit v0.12 From f369590a1005756e2574eeb1fcf4f8d3fb8886fe Mon Sep 17 00:00:00 2001 From: Vailin Choi Date: Thu, 12 Jul 2018 13:55:45 -0500 Subject: Fix test_misc33() in test/tmisc.c Open the test file read-only so that it can be accessed for testing. --- test/tmisc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/tmisc.c b/test/tmisc.c index 07234e9..1e62302 100644 --- a/test/tmisc.c +++ b/test/tmisc.c @@ -5499,7 +5499,7 @@ test_misc33(void) MESSAGE(5, ("Testing that bad offset into the heap returns error")); /* Open the test file */ - fid = H5Fopen(testfile, H5F_ACC_RDWR, H5P_DEFAULT); + fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); CHECK(fid, FAIL, "H5Fopen"); /* Case (1) */ -- cgit v0.12 From 92bf81a0fbad8e263c1e0d3c5791f49b78046afb Mon Sep 17 00:00:00 2001 From: Vailin Choi Date: Thu, 12 Jul 2018 18:25:49 -0500 Subject: Changes based on feedback from pull request. --- src/H5Ofill.c | 4 +--- tools/test/h5dump/h5dumpgentest.c | 4 ++-- tools/test/h5stat/h5stat_gentest.c | 1 - 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/src/H5Ofill.c b/src/H5Ofill.c index 3ce2e4f..da9829b 100644 --- a/src/H5Ofill.c +++ b/src/H5Ofill.c @@ -19,7 +19,6 @@ */ #include "H5Omodule.h" /* This source code file is part of the H5O module */ -#define H5T_FRIEND /*prevent warning from including H5Tpkg */ #include "H5private.h" /* Generic Functions */ @@ -32,7 +31,6 @@ #include "H5Pprivate.h" /* Property lists */ #include "H5Sprivate.h" /* Dataspaces */ -#include "H5Tpkg.h" /* Datatypes */ static void *H5O_fill_old_decode(H5F_t *f, H5O_t *open_oh, unsigned mesg_flags, unsigned *ioflags, size_t p_size, const uint8_t *p); @@ -345,7 +343,7 @@ H5O_fill_old_decode(H5F_t *f, H5O_t *open_oh, if((dt = H5O_msg_read_oh(f, open_oh, H5O_DTYPE_ID, NULL)) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTGET, NULL, "can't read DTYPE message") /* Verify size */ - if(fill->size != dt->shared->size) + if(fill->size != H5T_GET_SIZE(dt)) HGOTO_ERROR(H5E_SYM, H5E_CANTGET, NULL, "inconsistent fill value size") } diff --git a/tools/test/h5dump/h5dumpgentest.c b/tools/test/h5dump/h5dumpgentest.c index f34d479..9e9f6f1 100644 --- a/tools/test/h5dump/h5dumpgentest.c +++ b/tools/test/h5dump/h5dumpgentest.c @@ -10494,8 +10494,8 @@ gent_err_attr_dspace() { hid_t fid = -1; /* File identifier */ hid_t fcpl = -1; /* File access property list */ - hid_t sid; /* Dataspace identifier */ - hid_t aid; /* Attribute identifier */ + hid_t sid = -1; /* Dataspace identifier */ + hid_t aid = -1; /* Attribute identifier */ hsize_t dims = 2; /* Dimensino size */ int wdata[2] = {7, 42}; /* The buffer to write */ int fd = -1; /* The file descriptor */ diff --git a/tools/test/h5stat/h5stat_gentest.c b/tools/test/h5stat/h5stat_gentest.c index 01206dd..2daf24b 100644 --- a/tools/test/h5stat/h5stat_gentest.c +++ b/tools/test/h5stat/h5stat_gentest.c @@ -455,7 +455,6 @@ static void gen_err_refcount(const char *fname) { hid_t fid = -1; /* File identifier */ - hid_t fapl = -1; /* File access property list */ hid_t sid = -1; /* Dataspace message */ hid_t did = -1; /* Dataset identifier */ hid_t gid = -1; /* Group identifier */ -- cgit v0.12 From 5f34de4061470c0fe765aa786c2dbbcdbee13264 Mon Sep 17 00:00:00 2001 From: Hyo-Kyung Lee Date: Thu, 12 Jul 2018 18:45:30 -0500 Subject: HDFFV-10527:corrected typos in comment blocks. --- src/H5FS.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/H5FS.c b/src/H5FS.c index e1a760f..1d90df0 100644 --- a/src/H5FS.c +++ b/src/H5FS.c @@ -135,7 +135,7 @@ HDfprintf(stderr, "%s: Creating free space manager, nclasses = %Zu\n", FUNC, ncl fspace->alignment = alignment; fspace->align_thres = threshold; - /* Check if the free space tracker is supposed to be persistant */ + /* Check if the free space tracker is supposed to be persistent */ if(fs_addr) { /* Allocate space for the free space header */ if(HADDR_UNDEF == (fspace->addr = H5MF_alloc(f, H5FD_MEM_FSPACE_HDR, (hsize_t)fspace->hdr_size))) @@ -883,7 +883,7 @@ H5FS_alloc_sect(H5F_t *f, H5FS_t *fspace) HGOTO_ERROR(H5E_FSPACE, H5E_CANTINIT, FAIL, "can't add free space sections to cache") /* Since space has been allocated for the section info and the sinfo - * has been inserted into the cache, relinquish owership (i.e. float) + * has been inserted into the cache, relinquish ownership (i.e. float) * the section info. */ fspace->sinfo = NULL; -- cgit v0.12 From cb38d210c3ec4945fd47b5861d7db137e0efac55 Mon Sep 17 00:00:00 2001 From: Hyo-Kyung Lee Date: Thu, 12 Jul 2018 18:56:20 -0500 Subject: HDFFV-10527:corrected two more typos. --- src/H5FS.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/H5FS.c b/src/H5FS.c index 1d90df0..113c8ae 100644 --- a/src/H5FS.c +++ b/src/H5FS.c @@ -423,7 +423,7 @@ HDfprintf(stderr, "%s: fspace->tot_sect_count = %Hu, fspace->serial_sect_count = HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n", FUNC, fspace->alloc_sect_size, fspace->sect_size); #endif /* H5FS_DEBUG */ /* If there are sections to serialize, update them */ - /* (if the free space manager is persistant) */ + /* (if the free space manager is persistent) */ if(fspace->serial_sect_count > 0 && H5F_addr_defined(fspace->addr)) { #ifdef H5FS_DEBUG HDfprintf(stderr, "%s: Real sections to store in file\n", FUNC); @@ -794,7 +794,7 @@ HDfprintf(stderr, "%s: Marking free space header as dirty\n", FUNC); /* Sanity check */ HDassert(fspace); - /* Check if the free space manager is persistant */ + /* Check if the free space manager is persistent */ if(H5F_addr_defined(fspace->addr)) /* Mark header as dirty in cache */ if(H5AC_mark_entry_dirty(fspace) < 0) -- cgit v0.12 From 30d3bc2c91b42a0d54595267b3549ed318be2df3 Mon Sep 17 00:00:00 2001 From: Jordan Henderson Date: Fri, 13 Jul 2018 08:09:48 -0500 Subject: Fix error message mentioning wrong MPI function used --- src/H5Dmpio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 81aa799..b9d0db6 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -526,7 +526,7 @@ H5D__mpio_array_gatherv(void *local_array, size_t local_array_num_entries, else { if (MPI_SUCCESS != (mpi_code = MPI_Gatherv(local_array, sendcount, MPI_BYTE, gathered_array, receive_counts_array, displacements_array, MPI_BYTE, root, comm))) - HMPI_GOTO_ERROR(FAIL, "MPI_Allgatherv failed", mpi_code) + HMPI_GOTO_ERROR(FAIL, "MPI_Gatherv failed", mpi_code) } /* end else */ if (sort_func && (allgather || (mpi_rank == root))) -- cgit v0.12 From 0dc6c3c8b5c2e42fa83872c3c753dbf0d7ec4987 Mon Sep 17 00:00:00 2001 From: Jordan Henderson Date: Fri, 13 Jul 2018 09:26:41 -0500 Subject: Add check for actually using the MPI file driver when caching one chunk Better explain rationale behind chunk caching issue fix --- src/H5Dchunk.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index d1528eb..f901cdf 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -2943,9 +2943,23 @@ H5D__chunk_lookup(const H5D_t *dset, const hsize_t *scaled, * address in the file and/or gets re-allocated with a different size). * If we were to cache this information, subsequent reads/writes would * retrieve the invalid information and cause a variety of issues. + * + * This is an ugly and potentially frail check, but the + * H5D__chunk_cinfo_cache_reset() function is not currently available + * to functions outside of this file, so outside functions can not + * invalidate this single chunk cache. Even if the function were available, + * this check prevents us from doing the work of going through and caching + * each chunk in the write operation, when we're only going to invalidate + * the cache at the end of a parallel write anyway. + * + * - JTH */ #ifdef H5_HAVE_PARALLEL - if ( !((H5F_INTENT(dset->oloc.file) & H5F_ACC_RDWR) && dset->shared->dcpl_cache.pline.nused) ) + if ( !( (H5F_HAS_FEATURE(idx_info.f, H5FD_FEAT_HAS_MPI)) + && (H5F_INTENT(dset->oloc.file) & H5F_ACC_RDWR) + && dset->shared->dcpl_cache.pline.nused + ) + ) #endif H5D__chunk_cinfo_cache_update(&dset->shared->cache.chunk.last, udata); } /* end if */ -- cgit v0.12 From a8d6f100cdddbfc42d0c4abfeb4ceb8788b1b087 Mon Sep 17 00:00:00 2001 From: Jordan Henderson Date: Fri, 13 Jul 2018 10:20:16 -0500 Subject: Add note about single chunk caching and serial library --- src/H5Dchunk.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index f901cdf..b7567e4 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -2944,6 +2944,12 @@ H5D__chunk_lookup(const H5D_t *dset, const hsize_t *scaled, * If we were to cache this information, subsequent reads/writes would * retrieve the invalid information and cause a variety of issues. * + * It has been verified that in the serial library, when writing to chunks + * with the real chunk cache disabled and with filters involved, the + * functions within this file are correctly called in such a manner that + * this single chunk cache is always updated correctly. Therefore, this + * check is not needed for the serial library. + * * This is an ugly and potentially frail check, but the * H5D__chunk_cinfo_cache_reset() function is not currently available * to functions outside of this file, so outside functions can not @@ -2952,7 +2958,7 @@ H5D__chunk_lookup(const H5D_t *dset, const hsize_t *scaled, * each chunk in the write operation, when we're only going to invalidate * the cache at the end of a parallel write anyway. * - * - JTH + * - JTH (7/13/2018) */ #ifdef H5_HAVE_PARALLEL if ( !( (H5F_HAS_FEATURE(idx_info.f, H5FD_FEAT_HAS_MPI)) -- cgit v0.12 From ec31438afdaf575368938e930eb3af0865a342b3 Mon Sep 17 00:00:00 2001 From: Binh-Minh Ribler Date: Fri, 13 Jul 2018 13:40:22 -0500 Subject: Fixed HDFFV-10404 Description: Applied the typo fixes from user's report. The previous pull request couldn't be merged because it was too old, and it was too complicated for me to resolve conflicts. Platform tested: Linux/64 (jelly) - very minor --- autogen.sh | 2 +- c++/examples/testh5c++.sh.in | 2 +- c++/src/H5DataType.cpp | 4 ++-- c++/src/H5DcreatProp.cpp | 2 +- c++/src/H5IdComponent.cpp | 2 +- c++/src/H5Library.h | 2 +- c++/src/H5PropList.cpp | 2 +- c++/src/H5PropList.h | 2 +- c++/src/cpp_doc_config | 6 +++--- c++/src/h5c++.in | 6 +++--- c++/test/tfile.cpp | 4 ++-- c++/test/th5s.cpp | 12 ++++++------ configure.ac | 2 +- src/H5.c | 4 ++-- src/H5AC.c | 6 +++--- src/H5ACmpio.c | 8 ++++---- src/H5ACpkg.h | 2 +- src/H5ACpublic.h | 6 +++--- src/H5Aint.c | 4 ++-- src/H5Apkg.h | 2 +- src/H5B.c | 2 +- src/H5B2internal.c | 2 +- src/H5Bprivate.h | 2 +- src/H5C.c | 24 ++++++++++++------------ src/H5Cimage.c | 4 ++-- src/H5Cmpio.c | 2 +- src/H5Cpkg.h | 18 +++++++++--------- src/H5Cprivate.h | 24 ++++++++++++------------ src/H5Dbtree2.c | 2 +- src/H5Dchunk.c | 8 ++++---- src/H5Dint.c | 4 ++-- src/H5Dmpio.c | 4 ++-- src/H5Dvirtual.c | 4 ++-- src/H5Edeprec.c | 10 +++++----- src/H5FDdirect.c | 6 +++--- src/H5FDfamily.c | 2 +- src/H5FDmpio.c | 4 ++-- src/H5FDmulti.c | 2 +- src/H5FL.c | 6 +++--- src/H5FS.c | 8 ++++---- src/H5FScache.c | 6 +++--- src/H5FSsection.c | 14 +++++++------- src/H5Fpkg.h | 2 +- src/H5Fprivate.h | 2 +- src/H5Fsuper.c | 6 +++--- src/H5Gname.c | 2 +- src/H5Gobj.c | 2 +- src/H5HFcache.c | 34 +++++++++++++++++----------------- src/H5HFdtable.c | 4 ++-- src/H5HFhdr.c | 4 ++-- src/H5HFspace.c | 2 +- src/H5HFtiny.c | 2 +- src/H5HGcache.c | 4 ++-- src/H5MF.c | 22 +++++++++++----------- src/H5MFaggr.c | 2 +- src/H5O.c | 2 +- src/H5Oattr.c | 2 +- src/H5Oattribute.c | 2 +- src/H5Ocache_image.c | 2 +- src/H5Ocopy.c | 2 +- src/H5Omessage.c | 6 +++--- src/H5Oshared.c | 2 +- src/H5P.c | 4 ++-- src/H5Pdcpl.c | 4 ++-- src/H5Pdeprec.c | 4 ++-- src/H5Pdxpl.c | 2 +- src/H5Pfapl.c | 20 ++++++++++---------- src/H5Pfcpl.c | 8 ++++---- src/H5Pint.c | 34 +++++++++++++++++----------------- src/H5Ppkg.h | 10 +++++----- src/H5Ppublic.h | 2 +- src/H5SMpkg.h | 2 +- src/H5Sall.c | 2 +- src/H5Shyper.c | 20 ++++++++++---------- src/H5Smpio.c | 7 +++---- src/H5Snone.c | 2 +- src/H5Spoint.c | 2 +- src/H5Sselect.c | 4 ++-- src/H5T.c | 2 +- src/H5Tcommit.c | 2 +- src/H5Tconv.c | 10 +++++----- src/H5Toffset.c | 16 ++++++++-------- src/H5Tpkg.h | 2 +- src/H5Tprivate.h | 2 +- src/H5Znbit.c | 4 ++-- src/H5Zpublic.h | 2 +- src/H5Zscaleoffset.c | 4 ++-- src/H5Ztrans.c | 8 ++++---- src/H5detect.c | 12 ++++++------ src/H5private.h | 4 ++-- src/H5public.h | 4 ++-- src/H5system.c | 10 +++++----- test/cache_common.c | 2 +- test/cache_common.h | 2 +- test/cache_image.c | 24 ++++++++++++------------ test/cache_tagging.c | 22 +++++++++++----------- test/dtransform.c | 2 +- test/enum.c | 2 +- test/mf.c | 16 ++++++++-------- test/mtime.c | 2 +- test/page_buffer.c | 10 +++++----- 101 files changed, 317 insertions(+), 318 deletions(-) diff --git a/autogen.sh b/autogen.sh index 3d33c06..a8ffd32 100755 --- a/autogen.sh +++ b/autogen.sh @@ -12,7 +12,7 @@ # # A script to reconfigure autotools for HDF5, and to recreate other -# generated files specifc to HDF5. +# generated files specific to HDF5. # # IMPORTANT OS X NOTE # diff --git a/c++/examples/testh5c++.sh.in b/c++/examples/testh5c++.sh.in index 42cb0df..907a980 100644 --- a/c++/examples/testh5c++.sh.in +++ b/c++/examples/testh5c++.sh.in @@ -56,7 +56,7 @@ applib=libapp${H5TOOL}.a # short hands # Caution: if some *.h5 files must be cleaned here, list them by names. # Don't use the wildcard form of *.h5 as it will wipe out even *.h5 generated -# by otehr test programs. This will cause a racing condition error when +# by other test programs. This will cause a racing condition error when # parallel make (e.g., gmake -j 4) is used. temp_SRC="$hdf5main $appmain $prog1 $prog2" temp_OBJ=`echo $temp_SRC | sed -e "s/\.${suffix}/.o/g"` diff --git a/c++/src/H5DataType.cpp b/c++/src/H5DataType.cpp index a6b8c24..e460871 100644 --- a/c++/src/H5DataType.cpp +++ b/c++/src/H5DataType.cpp @@ -532,7 +532,7 @@ H5T_conv_t DataType::find(const DataType& dest, H5T_cdata_t **pcdata) const ///\param nelmts - IN: Size of array \a buf ///\param buf - IN/OUT: Array containing pre- and post-conversion /// values -///\param background - IN: Optional backgroud buffer +///\param background - IN: Optional background buffer ///\param plist - IN: Property list - default to PropList::DEFAULT ///\return Pointer to a suitable conversion function ///\exception H5::DataTypeIException @@ -558,7 +558,7 @@ void DataType::convert(const DataType& dest, size_t nelmts, void *buf, void *bac ///\brief Locks a datatype, making it read-only and non-destructible. /// ///\exception H5::DataTypeIException -///\par Descrition +///\par Description /// This is normally done by the library for predefined data /// types so the application doesn't inadvertently change or /// delete a predefined type. diff --git a/c++/src/H5DcreatProp.cpp b/c++/src/H5DcreatProp.cpp index 0c2a8c0..c704f85 100644 --- a/c++/src/H5DcreatProp.cpp +++ b/c++/src/H5DcreatProp.cpp @@ -666,7 +666,7 @@ void DSetCreatPropList::setFletcher32() const ///\param size - IN: Number of bytes reserved in the file for the data ///\exception H5::PropListIException ///\par Description -/// If a dataset is splitted across multiple files then the files +/// If a dataset is split across multiple files then the files /// should be defined in order. The total size of the dataset is /// the sum of the \a size arguments for all the external files. If /// the total size is larger than the size of a dataset then the diff --git a/c++/src/H5IdComponent.cpp b/c++/src/H5IdComponent.cpp index fe2d27e..a041273 100644 --- a/c++/src/H5IdComponent.cpp +++ b/c++/src/H5IdComponent.cpp @@ -322,7 +322,7 @@ IdComponent::~IdComponent() {} // // Implementation of protected functions for HDF5 Reference Interface -// and miscelaneous helpers. +// and miscellaneous helpers. // #ifndef DOXYGEN_SHOULD_SKIP_THIS diff --git a/c++/src/H5Library.h b/c++/src/H5Library.h index 9b4150e..b9be3d0 100644 --- a/c++/src/H5Library.h +++ b/c++/src/H5Library.h @@ -20,7 +20,7 @@ namespace H5 { /*! \class H5Library \brief Class H5Library operates the HDF5 library globably. - It is not neccessary to construct an instance of H5Library to use the + It is not necessary to construct an instance of H5Library to use the methods. */ class H5_DLLCPP H5Library { diff --git a/c++/src/H5PropList.cpp b/c++/src/H5PropList.cpp index ef9e16d..1918d27 100644 --- a/c++/src/H5PropList.cpp +++ b/c++/src/H5PropList.cpp @@ -468,7 +468,7 @@ H5std_string PropList::getProperty(const char* name) const throw PropListIException(inMemFunc("getProperty"), "H5Pget failed"); } - // Return propety value as a string after deleting temp C-string + // Return property value as a string after deleting temp C-string H5std_string prop_strg(prop_strg_C); delete []prop_strg_C; return (prop_strg); diff --git a/c++/src/H5PropList.h b/c++/src/H5PropList.h index e0244c1..d704775 100644 --- a/c++/src/H5PropList.h +++ b/c++/src/H5PropList.h @@ -92,7 +92,7 @@ class H5_DLLCPP PropList : public IdComponent { // Determines whether a property list is a certain class. bool isAClass(const PropList& prop_class) const; - /// Query the existance of a property in a property object. + /// Query the existence of a property in a property object. bool propExist(const char* name) const; bool propExist(const H5std_string& name) const; diff --git a/c++/src/cpp_doc_config b/c++/src/cpp_doc_config index 3943f7b..4bba5d5 100644 --- a/c++/src/cpp_doc_config +++ b/c++/src/cpp_doc_config @@ -1010,7 +1010,7 @@ USE_HTAGS = NO VERBATIM_HEADERS = YES # If the CLANG_ASSISTED_PARSING tag is set to YES, then doxygen will use the -# clang parser (see: http://clang.llvm.org/) for more acurate parsing at the +# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the # cost of reduced performance. This can be particularly helpful with template # rich C++ code for which doxygen's built-in parser lacks the necessary type # information. @@ -1124,7 +1124,7 @@ HTML_STYLESHEET = # defined cascading style sheet that is included after the standard style sheets # created by doxygen. Using this option one can overrule certain style aspects. # This is preferred over using HTML_STYLESHEET since it does not replace the -# standard style sheet and is therefor more robust against future updates. +# standard style sheet and is therefore more robust against future updates. # Doxygen will copy the style sheet file to the output directory. For an example # see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. @@ -2009,7 +2009,7 @@ PREDEFINED = DOXYGEN_SHOULD_SKIP_THIS EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will -# remove all refrences to function-like macros that are alone on a line, have an +# remove all references to function-like macros that are alone on a line, have an # all uppercase name, and do not end with a semicolon. Such function macros are # typically used for boiler-plate code, and will confuse the parser if not # removed. diff --git a/c++/src/h5c++.in b/c++/src/h5c++.in index 00502d9..f068f51 100644 --- a/c++/src/h5c++.in +++ b/c++/src/h5c++.in @@ -38,7 +38,7 @@ HL="@HL@" ## $CLINKER $H5BLD_CPPFLAGS $CPPFLAGS $H5BLD_CXXFLAGS $CXXFLAGS ## ## $LDFLAGS $LIBS $clibpath $link_objs $link_args $shared_link ## ## ## -## These settings can be overriden by setting HDF5_CXXFLAGS, ## +## These settings can be overridden by setting HDF5_CXXFLAGS, ## ## HDF5_CPPFLAGS, HDF5_LDFLAGS, or HDF5_LIBS in the environment. ## ## ## ############################################################################ @@ -140,7 +140,7 @@ usage() { echo " [default: no except when built with only" echo " shared libraries]" echo " You can also add or change paths and flags to the compile line using" - echo " the following environment varibles or by assigning them to their counterparts" + echo " the following environment variables or by assigning them to their counterparts" echo " in the 'Things You Can Modify to Override...'" section of $prog_name echo " " echo " Variable Current value to be replaced" @@ -311,7 +311,7 @@ fi if test "x$do_link" = "xyes"; then shared_link="" - # conditionnaly link with the hl library + # conditionally link with the hl library if test "X$HL" = "Xhl"; then libraries=" $libraries -lhdf5_hl_cpp -lhdf5_cpp -lhdf5_hl -lhdf5 " else diff --git a/c++/test/tfile.cpp b/c++/test/tfile.cpp index ba5b486..c82ab42 100644 --- a/c++/test/tfile.cpp +++ b/c++/test/tfile.cpp @@ -76,7 +76,7 @@ const H5std_string FILE4("tfile4.h5"); * cases. Since there are no operator<< for 'long long' * or int64 in VS C++ ostream, I casted the hsize_t values * passed to verify_val to 'long' as well. If problems - * arises later, this will have to be specificly handled + * arises later, this will have to be specifically handled * with a special routine. *------------------------------------------------------------------------- */ @@ -282,7 +282,7 @@ static void test_file_create() * cases. Since there are no operator<< for 'long long' * or int64 in VS C++ ostream, I casted the hsize_t values * passed to verify_val to 'long' as well. If problems - * arises later, this will have to be specificly handled + * arises later, this will have to be specifically handled * with a special routine. *------------------------------------------------------------------------- */ diff --git a/c++/test/th5s.cpp b/c++/test/th5s.cpp index 9c92b64..d4853da 100644 --- a/c++/test/th5s.cpp +++ b/c++/test/th5s.cpp @@ -98,7 +98,7 @@ int space5_data = 7; * cases. Since there are no operator<< for 'long long' * or int64 in VS C++ ostream, I casted the hssize_t values * passed to verify_val to 'long' as well. If problems - * arises later, this will have to be specificly handled + * arises later, this will have to be specifically handled * with a special routine. * April 12, 2011: Raymond Lu * Starting from the 1.8.7 release, we allow dimension @@ -230,7 +230,7 @@ static void test_h5s_basic() * cases. Since there are no operator<< for 'long long' * or int64 in VS C++ ostream, I casted the hssize_t values * passed to verify_val to 'long' as well. If problems - * arises later, this will have to be specificly handled + * arises later, this will have to be specifically handled * with a special routine. *------------------------------------------------------------------------- */ @@ -294,7 +294,7 @@ static void test_h5s_scalar_write() * cases. Since there are no operator<< for 'long long' * or int64 in VS C++ ostream, I casted the hssize_t values * passed to verify_val to 'long' as well. If problems - * arises later, this will have to be specificly handled + * arises later, this will have to be specifically handled * with a special routine. *------------------------------------------------------------------------- */ @@ -356,7 +356,7 @@ static void test_h5s_scalar_read() * cases. Since there are no operator<< for 'long long' * or int64 in VS C++ ostream, I casted the hssize_t values * passed to verify_val to 'long' as well. If problems - * arises later, this will have to be specificly handled + * arises later, this will have to be specifically handled * with a special routine. *------------------------------------------------------------------------- */ @@ -411,7 +411,7 @@ static void test_h5s_null() * cases. Since there are no operator<< for 'long long' * or int64 in VS C++ ostream, I casted the hssize_t values * passed to verify_val to 'long' as well. If problems - * arises later, this will have to be specificly handled + * arises later, this will have to be specifically handled * with a special routine. *------------------------------------------------------------------------- */ @@ -484,7 +484,7 @@ static void test_h5s_compound_scalar_write() * cases. Since there are no operator<< for 'long long' * or int64 in VS C++ ostream, I casted the hssize_t values * passed to verify_val to 'long' as well. If problems - * arises later, this will have to be specificly handled + * arises later, this will have to be specifically handled * with a special routine. *------------------------------------------------------------------------- */ diff --git a/configure.ac b/configure.ac index 1de69c8..dfb0a4d 100644 --- a/configure.ac +++ b/configure.ac @@ -411,7 +411,7 @@ AC_CHECK_SIZEOF([double]) AC_CHECK_SIZEOF([long double]) ## ---------------------------------------------------------------------- -## Check for non-standard extenstion __FLOAT128 +## Check for non-standard extension __FLOAT128 ## HAVE_FLOAT128=0 HAVE_QUADMATH=0 diff --git a/src/H5.c b/src/H5.c index 552330d..1b13fea 100644 --- a/src/H5.c +++ b/src/H5.c @@ -470,7 +470,7 @@ H5dont_atexit(void) * library, which are supposed to free any unused memory they have * allocated. * - * These should probably be registered dynamicly in a linked list of + * These should probably be registered dynamically in a linked list of * functions to call, but there aren't that many right now, so we * hard-wire them... * @@ -785,7 +785,7 @@ H5check_version(unsigned majnum, unsigned minnum, unsigned relnum) HDfprintf (stderr, "%s", H5libhdf5_settings); break; default: - /* 2 or higer: continue silently */ + /* 2 or higher: continue silently */ break; } /* end switch */ diff --git a/src/H5AC.c b/src/H5AC.c index 5eccf9e..989ee10 100644 --- a/src/H5AC.c +++ b/src/H5AC.c @@ -318,7 +318,7 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get mpi size") if(NULL == (aux_ptr = H5FL_CALLOC(H5AC_aux_t))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate H5AC auxilary structure") + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate H5AC auxiliary structure") aux_ptr->magic = H5AC__H5AC_AUX_T_MAGIC; aux_ptr->mpi_comm = mpi_comm; @@ -432,7 +432,7 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co done: #ifdef H5_HAVE_PARALLEL - /* if there is a failure, try to tidy up the auxilary structure */ + /* if there is a failure, try to tidy up the auxiliary structure */ if(ret_value < 0) { if(aux_ptr != NULL) { if(aux_ptr->d_slist_ptr != NULL) @@ -1878,7 +1878,7 @@ done: * from the cache, clear it, and free it without writing it to * disk. * - * This verion of the function is a complete re-write to + * This version of the function is a complete re-write to * use the new metadata cache. While there isn't all that * much difference between the old and new Purpose sections, * the original version is given below. diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c index 7671abc..b60b933 100644 --- a/src/H5ACmpio.c +++ b/src/H5ACmpio.c @@ -933,7 +933,7 @@ done: * * Purpose: Update the dirty_bytes count for a newly inserted entry. * - * If mpi_rank isnt 0, this simply means adding the size + * If mpi_rank isn't 0, this simply means adding the size * of the entry to the dirty_bytes count. * * If mpi_rank is 0, we must also add the entry to the @@ -1332,7 +1332,7 @@ done: * clean. * * This function is the main routine for handling this - * notification proceedure. It must be called + * notification procedure. It must be called * simultaniously on all processes that have the relevant * file open. To this end, it is called only during a * sync point, with a barrier prior to the call. @@ -1340,7 +1340,7 @@ done: * Note that any metadata entry writes by process 0 will * occur after the barrier and just before this call. * - * Typicaly, calls to this function will be triggered in + * Typically, calls to this function will be triggered in * one of two ways: * * 1) Dirty byte creation exceeds some user specified value. @@ -2128,7 +2128,7 @@ HDfprintf(stdout, "%d:H5AC_propagate...:%u: (u/uu/i/iu/r/ru) = %zu/%u/%zu/%u/%zu /* clear collective access flag on half of the entries in the cache and mark them as independent in case they need to be - evicted later. All ranks are guranteed to mark the same entries + evicted later. All ranks are guaranteed to mark the same entries since we don't modify the order of the collectively accessed entries except through collective access. */ if(H5C_clear_coll_entries(cache_ptr, TRUE) < 0) diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h index 409d914..9bf84bf 100644 --- a/src/H5ACpkg.h +++ b/src/H5ACpkg.h @@ -189,7 +189,7 @@ H5FL_EXTERN(H5AC_aux_t); * is permitted to write to file. * * dirty_bytes_threshold: Integer field containing the dirty bytes - * generation threashold. Whenever dirty byte creation + * generation threshold. Whenever dirty byte creation * exceeds this value, the metadata cache on process 0 * broadcasts a list of the entries it has flushed since * the last broadcast (or since the beginning of execution) diff --git a/src/H5ACpublic.h b/src/H5ACpublic.h index 654a877..a48aa69 100644 --- a/src/H5ACpublic.h +++ b/src/H5ACpublic.h @@ -39,7 +39,7 @@ extern "C" { * structure H5AC_cache_config_t * * H5AC_cache_config_t is a public structure intended for use in public APIs. - * At least in its initial incarnation, it is basicaly a copy of struct + * At least in its initial incarnation, it is basically a copy of struct * H5C_auto_size_ctl_t, minus the report_fcn field, and plus the * dirty_bytes_threshold field. * @@ -212,7 +212,7 @@ extern "C" { * * flash_incr_mode: Instance of the H5C_cache_flash_incr_mode enumerated * type whose value indicates whether and by which algorithm we should - * make flash increases in the size of the cache to accomodate insertion + * make flash increases in the size of the cache to accommodate insertion * of large entries and large increases in the size of a single entry. * * The addition of the flash increment mode was occasioned by performance @@ -379,7 +379,7 @@ extern "C" { * synchronize updates between caches. (See above for outline and * motivation.) * - * This value MUST be consistant across all processes accessing the + * This value MUST be consistent across all processes accessing the * file. This field is ignored unless HDF5 has been compiled for * parallel. * diff --git a/src/H5Aint.c b/src/H5Aint.c index e666e8c..55560c7 100644 --- a/src/H5Aint.c +++ b/src/H5Aint.c @@ -1152,7 +1152,7 @@ H5A__free(H5A_t *attr) HDassert(attr); - /* Free dynamicly allocated items */ + /* Free dynamically allocated items */ if(attr->shared->name) { H5MM_xfree(attr->shared->name); attr->shared->name = NULL; @@ -1235,7 +1235,7 @@ H5A__close(H5A_t *attr) /* Reference count can be 0. It only happens when H5A__create fails. */ if(attr->shared->nrefs <= 1) { - /* Free dynamicly allocated items */ + /* Free dynamically allocated items */ if(H5A__free(attr) < 0) HGOTO_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't release attribute info") diff --git a/src/H5Apkg.h b/src/H5Apkg.h index 3315cc1..af16b55 100644 --- a/src/H5Apkg.h +++ b/src/H5Apkg.h @@ -86,7 +86,7 @@ typedef struct H5A_shared_t { void *data; /* Attribute data (on a temporary basis) */ size_t data_size; /* Size of data on disk */ H5O_msg_crt_idx_t crt_idx; /* Attribute's creation index in the object header */ - unsigned nrefs; /* Ref count for times this object is refered */ + unsigned nrefs; /* Ref count for times this object is referred */ } H5A_shared_t; /* Define the main attribute structure */ diff --git a/src/H5B.c b/src/H5B.c index 1f19d7b..53d5529 100644 --- a/src/H5B.c +++ b/src/H5B.c @@ -769,7 +769,7 @@ H5B__insert_child(H5B_t *bt, unsigned *bt_flags, unsigned idx, * the specified type. * * On return, if LT_KEY_CHANGED is non-zero, then LT_KEY is - * the new native left key. Similarily for RT_KEY_CHANGED + * the new native left key. Similarly for RT_KEY_CHANGED * and RT_KEY. * * If the node splits, then MD_KEY contains the key that diff --git a/src/H5B2internal.c b/src/H5B2internal.c index 95ed769..7f6b80a 100644 --- a/src/H5B2internal.c +++ b/src/H5B2internal.c @@ -1192,7 +1192,7 @@ H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hbool_t *depth_decreased, if(swap_loc) idx = 0; else { - /* Count from the orginal index value again */ + /* Count from the original index value again */ n = orig_n; /* Reset "found" flag - the record may have shifted during the diff --git a/src/H5Bprivate.h b/src/H5Bprivate.h index 270b4e6..e203b87 100644 --- a/src/H5Bprivate.h +++ b/src/H5Bprivate.h @@ -122,7 +122,7 @@ typedef struct H5B_class_t { H5B_ins_t (*insert)(H5F_t*, haddr_t, void*, hbool_t*, void*, void*, void*, hbool_t*, haddr_t*); - /* min insert uses min leaf, not new(), similarily for max insert */ + /* min insert uses min leaf, not new(), similarly for max insert */ hbool_t follow_min; hbool_t follow_max; diff --git a/src/H5C.c b/src/H5C.c index 729b267..c4d4eed 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -793,8 +793,8 @@ H5C_prep_for_file_close(H5F_t *f) * close, and since the close warning is issued after all * non FSM related space allocations and just before the * first sync point on close, this call will leave the caches - * in a consistant state across the processes if they were - * consistant before. + * in a consistent state across the processes if they were + * consistent before. * * 2) Since the FSM settle routines are only invoked once during * file close, invoking them now will prevent their invocation @@ -2255,7 +2255,7 @@ H5C_protect(H5F_t * f, if(entry_ptr != NULL) { if(entry_ptr->ring != ring) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occured for cache entry") + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occurred for cache entry") HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); @@ -3095,7 +3095,7 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) * * All this is a bit awkward, but until the metadata cache entries * are contiguous, with only one dirty flag, we have to let the supplied - * functions deal with the reseting the is_dirty flag. + * functions deal with the resetting the is_dirty flag. */ if(entry_ptr->clear_on_unprotect) { /* Sanity check */ @@ -5029,7 +5029,7 @@ H5C__flash_increase_cache_size(H5C_t * cache_ptr, if ( (cache_ptr->resize_ctl).rpt_fcn != NULL ) { /* get the hit rate for the reporting function. Should still - * be good as we havent reset the hit rate statistics. + * be good as we haven't reset the hit rate statistics. */ if(H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate") @@ -5272,7 +5272,7 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags) cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG; evict_flags = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG; - /* The flush proceedure here is a bit strange. + /* The flush procedure here is a bit strange. * * In the outer while loop we make at least one pass through the * cache, and then repeat until either all the pinned entries in @@ -5421,7 +5421,7 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags) * * While this optimization used to be easy, with the possibility * of new entries being added to the slist in the midst of the - * flush, we must keep the slist in cannonical form at all + * flush, we must keep the slist in canonical form at all * times. */ if(((!entry_ptr->flush_me_last) || @@ -6280,7 +6280,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) entry_addr = entry_ptr->addr; /* Internal cache data structures should now be up to date, and - * consistant with the status of the entry. + * consistent with the status of the entry. * * Now discard the entry if appropriate. */ @@ -6301,10 +6301,10 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr); /* If the entry is not a prefetched entry, verify that the flush - * dependency parents addresses array has been transfered. + * dependency parents addresses array has been transferred. * * If the entry is prefetched, the free_isr routine will dispose of - * the flush dependency parents adresses array if necessary. + * the flush dependency parents addresses array if necessary. */ if(!entry_ptr->prefetched) { HDassert(0 == entry_ptr->fd_parent_count); @@ -8587,7 +8587,7 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) * tests will be necessary. */ if(cache_ptr->aux_ptr != NULL) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occured in parallel case") + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occurred in parallel case") #endif /* If required, resize the buffer and update the entry and the cache @@ -8787,7 +8787,7 @@ H5C_remove_entry(void *_entry) cache->entry_watched_for_removal = NULL; /* Internal cache data structures should now be up to date, and - * consistant with the status of the entry. + * consistent with the status of the entry. * * Now clean up internal cache fields if appropriate. */ diff --git a/src/H5Cimage.c b/src/H5Cimage.c index 51de5c4..26b6506 100644 --- a/src/H5Cimage.c +++ b/src/H5Cimage.c @@ -1494,7 +1494,7 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) * if the underlying file alignment is greater than 1. * * Clean this up eventually by extending the size of the cache - * image block to the next alignement boundary, and then setting + * image block to the next alignment boundary, and then setting * the image_data_len to the actual size of the cache_image. * * On the off chance that there is some other way to get a @@ -1554,7 +1554,7 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) * metadata cache image superblock extension message, set * cache_ptr->image_ctl.generate_image to FALSE. This will * allow the file close to continue normally without the - * unecessary generation of the metadata cache image. + * unnecessary generation of the metadata cache image. */ if(cache_ptr->num_entries_in_image > 0) { if(H5C__prep_for_file_close__setup_image_entries_array(cache_ptr) < 0) diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c index e342d69..ecaed62 100644 --- a/src/H5Cmpio.c +++ b/src/H5Cmpio.c @@ -380,7 +380,7 @@ H5C_apply_candidate_list(H5F_t * f, * Note that we are doing things in this round about manner so as * to preserve the order of the LRU list to the best of our ability. * If we don't do this, my experiments indicate that we will have a - * noticably poorer hit ratio as a result. + * noticeably poorer hit ratio as a result. */ if(H5C__flush_candidate_entries(f, entries_to_flush, entries_to_clear) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush candidates failed") diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index c5dc8f3..98d7a01 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -58,7 +58,7 @@ * We maintain doubly linked lists of instances of H5C_cache_entry_t for a * variety of reasons -- protected list, LRU list, and the clean and dirty * LRU lists at present. The following macros support linking and unlinking - * of instances of H5C_cache_entry_t by both their regular and auxilary next + * of instances of H5C_cache_entry_t by both their regular and auxiliary next * and previous pointers. * * The size and length fields are also maintained. @@ -2006,7 +2006,7 @@ if ( ( (cache_ptr)->index_size != \ * a bit more performance out of the cache. * * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause dificulties with + * white space in the macro. If they cause difficulties with * the pre-processor, I'll have to remove them. * * JRM - 7/28/04 @@ -2117,7 +2117,7 @@ if ( ( (cache_ptr)->index_size != \ * a bit more performance out of the cache. * * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause dificulties with + * white space in the macro. If they cause difficulties with * pre-processor, I'll have to remove them. * * JRM - 7/28/04 @@ -2375,7 +2375,7 @@ if ( ( (cache_ptr)->index_size != \ * a bit more performance out of the cache. * * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause dificulties with + * white space in the macro. If they cause difficulties with * pre-processor, I'll have to remove them. * * JRM - 7/28/04 @@ -2513,7 +2513,7 @@ if ( ( (cache_ptr)->index_size != \ * a bit more performance out of the cache. * * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause dificulties with + * white space in the macro. If they cause difficulties with * pre-processor, I'll have to remove them. * * JRM - 7/28/04 @@ -3057,7 +3057,7 @@ if ( ( (cache_ptr)->index_size != \ * squeeze a bit more performance out of the cache. * * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause dificulties with + * white space in the macro. If they cause difficulties with * pre-processor, I'll have to remove them. * * JRM - 7/28/04 @@ -3892,7 +3892,7 @@ typedef struct H5C_tag_info_t { * 2) A pinned entry can be accessed or modified at any time. * This places an additional burden on the associated pre-serialize * and serialize callbacks, which must ensure the the entry is in - * a consistant state before creating an image of it. + * a consistent state before creating an image of it. * * 3) A pinned entry can be marked as dirty (and possibly * change size) while it is unprotected. @@ -4604,7 +4604,7 @@ typedef struct H5C_tag_info_t { * improper behavior if the next entry in the list is the target of one on * these operations. * - * The following fields are use to count such occurances. They are used + * The following fields are use to count such occurrences. They are used * both in tests (to verify that the scan has been restarted), and to * obtain estimates of how frequently these restarts occur. * @@ -4624,7 +4624,7 @@ typedef struct H5C_tag_info_t { * than the target entry as the result of call(s) to the * pre_serialize or serialize callbacks. * - * Note that at present, this condition can only be triggerd + * Note that at present, this condition can only be triggered * by a call to H5C_serialize_single_entry(). * * The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index 522b3cf..38a86ee 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -87,7 +87,7 @@ #define H5C__MIN_MAX_CACHE_SIZE ((size_t)(1024)) /* Default max cache size and min clean size are give here to make - * them generally accessable. + * them generally accessible. */ #define H5C__DEFAULT_MAX_CACHE_SIZE ((size_t)(4 * 1024 * 1024)) #define H5C__DEFAULT_MIN_CLEAN_SIZE ((size_t)(2 * 1024 * 1024)) @@ -901,7 +901,7 @@ typedef struct H5C_class_t { H5C_get_fsf_size_t fsf_size; } H5C_class_t; -/* Type defintions of callback functions used by the cache as a whole */ +/* Type definitions of callback functions used by the cache as a whole */ typedef herr_t (*H5C_write_permitted_func_t)(const H5F_t *f, hbool_t *write_permitted_ptr); typedef herr_t (*H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr, @@ -925,7 +925,7 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr, * flush dependency -- with the implied setup and takedown overhead and * added complexity. Further, the flush ordering between rings need only * be enforced on flush operations, and thus the use of flush dependencies - * instead would apply unecessary constraints on flushes under normal + * instead would apply unnecessary constraints on flushes under normal * operating circumstances. * * As of this writing, all metadata entries pretaining to data sets and @@ -1092,7 +1092,7 @@ typedef int H5C_ring_t; * 2) A pinned entry can be accessed or modified at any time. * This places an extra burden on the pre-serialize and * serialize callbacks, which must ensure that a pinned - * entry is consistant and ready to write to disk before + * entry is consistent and ready to write to disk before * generating an image. * * 3) A pinned entry can be marked as dirty (and possibly @@ -1152,7 +1152,7 @@ typedef int H5C_ring_t; * flush_immediately: Boolean flag used only in Phdf5 -- and then only * for H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED. * - * When a destributed metadata write is triggered at a + * When a distributed metadata write is triggered at a * sync point, this field is used to mark entries that * must be flushed before leaving the sync point. At all * other times, this field should be set to FALSE. @@ -1430,7 +1430,7 @@ typedef int H5C_ring_t; * The flush dependency height of any entry involved in a * flush dependency relationship is defined to be the * longest flush dependency path from that entry to an entry - * with no flush depenency children. + * with no flush dependency children. * * Since the image_fd_height is used to order entries in the * cache image so that fd parents preceed fd children, for @@ -1468,8 +1468,8 @@ typedef int H5C_ring_t; * * Further, if the prefetched entry is a flush dependency parent, * all its flush dependency children (which must also be - * prefetched entries), must be tranfered to the new cache - * entry returned by the deserailization callback. + * prefetched entries), must be transferred to the new cache + * entry returned by the deserialization callback. * * Finally, if the prefetched entry is a flush dependency child, * this flush dependency must be destroyed prior to the @@ -1737,7 +1737,7 @@ typedef struct H5C_cache_entry_t { * The flush dependency height of any entry involved in a * flush dependency relationship is defined to be the * longest flush dependency path from that entry to an entry - * with no flush depenency children. + * with no flush dependency children. * * Since the image_fd_height is used to order entries in the * cache image so that fd parents preceed fd children, for @@ -1946,7 +1946,7 @@ typedef struct H5C_image_entry_t { * * flash_incr_mode: Instance of the H5C_cache_flash_incr_mode enumerated * type whose value indicates whether and by what algorithm we should - * make flash increases in the size of the cache to accomodate insertion + * make flash increases in the size of the cache to accommodate insertion * of large entries and large increases in the size of a single entry. * * The addition of the flash increment mode was occasioned by performance @@ -2182,9 +2182,9 @@ typedef struct H5C_auto_size_ctl_t { * current value, any value in excess of 255 will be the functional * equivalent of H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE. * - * flags: Unsigned integer containing flags controling which aspects of the + * flags: Unsigned integer containing flags controlling which aspects of the * cache image functinality is actually executed. The primary impetus - * behind this field is to allow developement of tests for partial + * behind this field is to allow development of tests for partial * implementations that will require little if any modification to run * with the full implementation. In normal operation, all flags should * be set. diff --git a/src/H5Dbtree2.c b/src/H5Dbtree2.c index f9074be..d60e79e 100644 --- a/src/H5Dbtree2.c +++ b/src/H5Dbtree2.c @@ -378,7 +378,7 @@ H5D__bt2_unfilt_encode(uint8_t *raw, const void *_record, void *_ctx) { H5D_bt2_ctx_t *ctx = (H5D_bt2_ctx_t *)_ctx; /* Callback context structure */ const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* The native record */ - unsigned u; /* Local index varible */ + unsigned u; /* Local index variable */ FUNC_ENTER_STATIC_NOERR diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 7216c49..e3f6410 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -695,7 +695,7 @@ H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims, /* Compute the # of chunks in dataset dimensions */ for(u = 0, layout->nchunks = 1, layout->max_nchunks = 1; u < ndims; u++) { - /* Round up to the next integer # of chunks, to accomodate partial chunks */ + /* Round up to the next integer # of chunks, to accommodate partial chunks */ layout->chunks[u] = ((curr_dims[u] + layout->dim[u]) - 1) / layout->dim[u]; if(H5S_UNLIMITED == max_dims[u]) layout->max_chunks[u] = H5S_UNLIMITED; @@ -3016,7 +3016,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t reset) } /* end if */ else { /* - * If we are reseting and something goes wrong after this + * If we are resetting and something goes wrong after this * point then it's too late to recover because we may have * destroyed the original data by calling H5Z_pipeline(). * The only safe option is to continue with the reset @@ -4449,7 +4449,7 @@ H5D__chunk_update_old_edge_chunks(H5D_t *dset, hsize_t old_dim[]) /* Start off with this dimension marked as not needing to be modified */ new_full_dim[op_dim] = FALSE; - /* Calulate offset of first previously incomplete chunk in this + /* Calculate offset of first previously incomplete chunk in this * dimension */ old_edge_chunk_sc[op_dim] = (old_dim[op_dim] / chunk_dim[op_dim]); @@ -4970,7 +4970,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, const hsize_t *old_dim) HGOTO_DONE(SUCCEED) } /* end if */ - /* Round up to the next integer # of chunks, to accomodate partial chunks */ + /* Round up to the next integer # of chunks, to accommodate partial chunks */ /* Use current dims because the indices have already been updated! -NAF */ /* (also compute the number of elements per chunk) */ /* (also copy the chunk dimensions into 'hsize_t' array for creating dataspace) */ diff --git a/src/H5Dint.c b/src/H5Dint.c index b9d9cce..2f67226 100644 --- a/src/H5Dint.c +++ b/src/H5Dint.c @@ -604,7 +604,7 @@ H5D__cache_dataspace_info(const H5D_t *dset) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't cache dataspace dimensions") dset->shared->ndims = (unsigned)sndims; - /* Compute the inital 'power2up' values */ + /* Compute the initial 'power2up' values */ for(u = 0; u < dset->shared->ndims; u++) { hsize_t scaled_power2up; /* Scaled value, rounded to next power of 2 */ @@ -782,7 +782,7 @@ H5D__update_oh_info(H5F_t *file, H5D_t *dset, hid_t dapl_id) H5O_fill_t old_fill_prop; /* Copy of fill value property, for writing as "old" fill value */ /* Shallow copy the fill value property */ - /* (we only want to make certain that the shared component isnt' modified) */ + /* (we only want to make certain that the shared component isn't modified) */ HDmemcpy(&old_fill_prop, fill_prop, sizeof(old_fill_prop)); /* Reset shared component info */ diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 107b751..bc37840 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -706,7 +706,7 @@ H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf io_option = H5D_MULTI_CHUNK_IO; /* via default path. branch by num threshold */ else { - unsigned one_link_chunk_io_threshold; /* Threshhold to use single collective I/O for all chunks */ + unsigned one_link_chunk_io_threshold; /* Threshold to use single collective I/O for all chunks */ int mpi_size; /* Number of processes in MPI job */ if(H5D__mpio_get_sum_chunk(io_info, fm, &sum_chunk) < 0) @@ -2765,7 +2765,7 @@ H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t *io_info, const H5D_ty if (H5S_encode(chunk_info->fspace, &mod_data_p, &mod_data_size) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "unable to encode dataspace") - /* Intialize iterator for memory selection */ + /* Initialize iterator for memory selection */ if (H5S_select_iter_init(mem_iter, chunk_info->mspace, type_info->src_type_size) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize memory selection information") mem_iter_init = TRUE; diff --git a/src/H5Dvirtual.c b/src/H5Dvirtual.c index c7c0775..7f1ac86 100644 --- a/src/H5Dvirtual.c +++ b/src/H5Dvirtual.c @@ -796,7 +796,7 @@ H5D__virtual_copy(H5F_t *f_dst, H5O_layout_t *layout_dst) if(f_dst == f_src) { /* Increase reference count on global heap object */ if((heap_rc = H5HG_link(f_dst, (H5HG_t *)&(layout_dst->u.virt.serial_list_hobjid), 1)) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTMODIFY, FAIL, "unable to adjust global heap refence count") + HGOTO_ERROR(H5E_DATASET, H5E_CANTMODIFY, FAIL, "unable to adjust global heap reference count") } /* end if */ else #endif /* NOT_YET */ @@ -847,7 +847,7 @@ H5D__virtual_delete(H5F_t *f, H5O_storage_t *storage) #ifdef NOT_YET /* Unlink the global heap block */ if((heap_rc = H5HG_link(f, (H5HG_t *)&(storage->u.virt.serial_list_hobjid), -1)) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTMODIFY, FAIL, "unable to adjust global heap refence count") + HGOTO_ERROR(H5E_DATASET, H5E_CANTMODIFY, FAIL, "unable to adjust global heap reference count") if(heap_rc == 0) #endif /* NOT_YET */ /* Delete the global heap block */ diff --git a/src/H5Edeprec.c b/src/H5Edeprec.c index 9ff9f1f..fc3eb9c 100644 --- a/src/H5Edeprec.c +++ b/src/H5Edeprec.c @@ -227,7 +227,7 @@ done: /*------------------------------------------------------------------------- * Function: H5Eclear1 * - * Purpose: This function is for backward compatbility. + * Purpose: This function is for backward compatibility. * Clears the error stack for the specified error stack. * * Return: Non-negative on success/Negative on failure @@ -258,7 +258,7 @@ done: /*------------------------------------------------------------------------- * Function: H5Eprint1 * - * Purpose: This function is for backward compatbility. + * Purpose: This function is for backward compatibility. * Prints the error stack in some default way. This is just a * convenience function for H5Ewalk() with a function that * prints error messages. Users are encouraged to write there @@ -296,7 +296,7 @@ done: /*------------------------------------------------------------------------- * Function: H5Ewalk1 * - * Purpose: This function is for backward compatbility. + * Purpose: This function is for backward compatibility. * Walks the error stack for the current thread and calls some * function for each error along the way. * @@ -335,7 +335,7 @@ done: /*------------------------------------------------------------------------- * Function: H5Eget_auto1 * - * Purpose: This function is for backward compatbility. + * Purpose: This function is for backward compatibility. * Returns the current settings for the automatic error stack * traversal function and its data for specific error stack. * Either (or both) arguments may be null in which case the @@ -386,7 +386,7 @@ done: /*------------------------------------------------------------------------- * Function: H5Eset_auto1 * - * Purpose: This function is for backward compatbility. + * Purpose: This function is for backward compatibility. * Turns on or off automatic printing of errors for certain * error stack. When turned on (non-null FUNC pointer) any * API function which returns an error indication will first diff --git a/src/H5FDdirect.c b/src/H5FDdirect.c index 811ea8e..906ec28 100644 --- a/src/H5FDdirect.c +++ b/src/H5FDdirect.c @@ -951,7 +951,7 @@ H5FD_direct_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UN do { /* Read the aligned data in file first. Not able to handle interrupted * system calls and partial results like sec2 driver does because the - * data may no longer be aligned. It's expecially true when the data in + * data may no longer be aligned. It's especially true when the data in * file is smaller than ALLOC_SIZE. */ HDmemset(copy_buf, 0, alloc_size); @@ -1138,9 +1138,9 @@ H5FD_direct_write(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_U /* * Read the aligned data first if the aligned region doesn't fall - * entirely in the range to be writen. Not able to handle interrupted + * entirely in the range to be written. Not able to handle interrupted * system calls and partial results like sec2 driver does because the - * data may no longer be aligned. It's expecially true when the data in + * data may no longer be aligned. It's especially true when the data in * file is smaller than ALLOC_SIZE. Only read the entire section if * both ends are misaligned, otherwise only read the block on the * misaligned end. diff --git a/src/H5FDfamily.c b/src/H5FDfamily.c index 3c1c7bb..e52a71a 100644 --- a/src/H5FDfamily.c +++ b/src/H5FDfamily.c @@ -548,7 +548,7 @@ H5FD_family_sb_encode(H5FD_t *_file, char *name/*out*/, unsigned char *buf/*out* /*------------------------------------------------------------------------- * Function: H5FD_family_sb_decode * - * Purpose: This function has 2 seperate purpose. One is to decodes the + * Purpose: This function has 2 separate purpose. One is to decodes the * superblock information for this driver. The NAME argument is * the eight-character (plus null termination) name stored in i * the file. The FILE argument is updated according to the diff --git a/src/H5FDmpio.c b/src/H5FDmpio.c index ee3277d..87f8b6a 100644 --- a/src/H5FDmpio.c +++ b/src/H5FDmpio.c @@ -1462,7 +1462,7 @@ H5FD_mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, /* Only look for MPI views for raw data transfers */ if(type == H5FD_MEM_DRAW) { - H5FD_mpio_xfer_t xfer_mode; /* I/O tranfer mode */ + H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode */ /* Get the transfer mode from the API context */ if(H5CX_get_io_xfer_mode(&xfer_mode) < 0) @@ -1717,7 +1717,7 @@ H5FD_mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, #endif int size_i; hbool_t use_view_this_time = FALSE; - H5FD_mpio_xfer_t xfer_mode; /* I/O tranfer mode */ + H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode */ herr_t ret_value = SUCCEED; FUNC_ENTER_NOAPI_NOINIT diff --git a/src/H5FDmulti.c b/src/H5FDmulti.c index 8ae23d2..aa1b118 100644 --- a/src/H5FDmulti.c +++ b/src/H5FDmulti.c @@ -263,7 +263,7 @@ H5FD_multi_term(void) /*------------------------------------------------------------------------- * Function: H5Pset_fapl_split * - * Purpose: Compatability function. Makes the multi driver act like the + * Purpose: Compatibility function. Makes the multi driver act like the * old split driver which stored meta data in one file and raw * data in another file. * diff --git a/src/H5FL.c b/src/H5FL.c index 0e67414..89a580a 100644 --- a/src/H5FL.c +++ b/src/H5FL.c @@ -18,7 +18,7 @@ * Purpose: Manage priority queues of free-lists (of blocks of bytes). * These are used in various places in the library which allocate and * free differently blocks of bytes repeatedly. Usually the same size - * of block is allocated and freed repeatly in a loop, while writing out + * of block is allocated and freed repeatedly in a loop, while writing out * chunked data for example, but the blocks may also be of different sizes * from different datasets and an attempt is made to optimize access to * the proper free list of blocks by using these priority queues to @@ -499,7 +499,7 @@ H5FL_reg_calloc(H5FL_reg_head_t *head H5FL_TRACK_PARAMS) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Clear to zeros */ - /* (Accomodate tracking information, if present) */ + /* (Accommodate tracking information, if present) */ HDmemset(ret_value,0,head->size - H5FL_TRACK_SIZE); done: @@ -2220,7 +2220,7 @@ H5FL_fac_calloc(H5FL_fac_head_t *head H5FL_TRACK_PARAMS) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Clear to zeros */ - /* (Accomodate tracking information, if present) */ + /* (Accommodate tracking information, if present) */ HDmemset(ret_value,0,head->size - H5FL_TRACK_SIZE); done: diff --git a/src/H5FS.c b/src/H5FS.c index e1a760f..113c8ae 100644 --- a/src/H5FS.c +++ b/src/H5FS.c @@ -135,7 +135,7 @@ HDfprintf(stderr, "%s: Creating free space manager, nclasses = %Zu\n", FUNC, ncl fspace->alignment = alignment; fspace->align_thres = threshold; - /* Check if the free space tracker is supposed to be persistant */ + /* Check if the free space tracker is supposed to be persistent */ if(fs_addr) { /* Allocate space for the free space header */ if(HADDR_UNDEF == (fspace->addr = H5MF_alloc(f, H5FD_MEM_FSPACE_HDR, (hsize_t)fspace->hdr_size))) @@ -423,7 +423,7 @@ HDfprintf(stderr, "%s: fspace->tot_sect_count = %Hu, fspace->serial_sect_count = HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n", FUNC, fspace->alloc_sect_size, fspace->sect_size); #endif /* H5FS_DEBUG */ /* If there are sections to serialize, update them */ - /* (if the free space manager is persistant) */ + /* (if the free space manager is persistent) */ if(fspace->serial_sect_count > 0 && H5F_addr_defined(fspace->addr)) { #ifdef H5FS_DEBUG HDfprintf(stderr, "%s: Real sections to store in file\n", FUNC); @@ -794,7 +794,7 @@ HDfprintf(stderr, "%s: Marking free space header as dirty\n", FUNC); /* Sanity check */ HDassert(fspace); - /* Check if the free space manager is persistant */ + /* Check if the free space manager is persistent */ if(H5F_addr_defined(fspace->addr)) /* Mark header as dirty in cache */ if(H5AC_mark_entry_dirty(fspace) < 0) @@ -883,7 +883,7 @@ H5FS_alloc_sect(H5F_t *f, H5FS_t *fspace) HGOTO_ERROR(H5E_FSPACE, H5E_CANTINIT, FAIL, "can't add free space sections to cache") /* Since space has been allocated for the section info and the sinfo - * has been inserted into the cache, relinquish owership (i.e. float) + * has been inserted into the cache, relinquish ownership (i.e. float) * the section info. */ fspace->sinfo = NULL; diff --git a/src/H5FScache.c b/src/H5FScache.c index 875a383..fa04ba1 100644 --- a/src/H5FScache.c +++ b/src/H5FScache.c @@ -457,7 +457,7 @@ H5FS__cache_hdr_pre_serialize(H5F_t *f, void *_thing, * H5F_addr_defined(fspace->addr) * * will both be TRUE. If this contition does not hold, then - * either the free space info is not persistant + * either the free space info is not persistent * (!H5F_addr_defined(fspace->addr)???) or the section info * contains no free space data that must be written to file * ( fspace->serial_sect_count == 0 ). @@ -487,7 +487,7 @@ H5FS__cache_hdr_pre_serialize(H5F_t *f, void *_thing, * * Case 1) If either fspace->serial_sect_count == 0 or * ! H5F_addr_defined(fspace->addr) do nothing as either - * the free space manager data is not persistant, or the + * the free space manager data is not persistent, or the * section info is empty. * * Otherwise, allocate space for the section info in real @@ -1454,7 +1454,7 @@ H5FS__sinfo_serialize_sect_cb(void *_item, void H5_ATTR_UNUSED *key, void *_udat /* Call 'serialize' callback for this section */ if(sect_cls->serialize) { if((*sect_cls->serialize)(sect_cls, sect, *udata->image) < 0) - HGOTO_ERROR(H5E_FSPACE, H5E_CANTSERIALIZE, FAIL, "can't syncronize section") + HGOTO_ERROR(H5E_FSPACE, H5E_CANTSERIALIZE, FAIL, "can't synchronize section") /* Update offset in serialization buffer */ (*udata->image) += sect_cls->serial_size; diff --git a/src/H5FSsection.c b/src/H5FSsection.c index 11cd722..a58347f 100644 --- a/src/H5FSsection.c +++ b/src/H5FSsection.c @@ -2432,7 +2432,7 @@ done: * returns to 1) above. * * Similarly, if it allocates space for its own header, it - * can go into an infinte loop as it: + * can go into an infinite loop as it: * * 1) allocates space for the header * @@ -2479,7 +2479,7 @@ done: * enabled when the free space managers are read. To allow * for this, we must ensure that space allocated for the * free space manager header and section info is either larger - * than a page, or resides completely withing a page. + * than a page, or resides completely within a page. * * Do this by allocating space for the free space header and * section info starting at page boundaries, and extending @@ -2532,7 +2532,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace, HDassert(fspace->sect_addr == HADDR_UNDEF); HDassert(fspace->alloc_sect_size == 0); - /* persistant free space managers must be enabled */ + /* persistent free space managers must be enabled */ HDassert(f->shared->fs_persist); /* At present, all free space strategies enable the free space managers. @@ -2576,7 +2576,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace, f, hdr_alloc_size, &eoa_frag_addr, &eoa_frag_size))) HGOTO_ERROR(H5E_FSPACE, H5E_CANTALLOC, FAIL, "can't allocate file space for hdr") - /* if the file alignement is 1, there should be no + /* if the file alignment is 1, there should be no * eoa fragment. Otherwise, drop any fragment on the floor. */ HDassert((eoa_frag_size == 0) || (f->shared->alignment != 1)); @@ -2616,7 +2616,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace, f, sinfo_alloc_size, &eoa_frag_addr, &eoa_frag_size))) HGOTO_ERROR(H5E_FSPACE, H5E_CANTALLOC, FAIL, "can't allocate file space") - /* if the file alignement is 1, there should be no + /* if the file alignment is 1, there should be no * eoa fragment. Otherwise, drop the fragment on the floor. */ HDassert((eoa_frag_size == 0) || (f->shared->alignment != 1)); @@ -2637,7 +2637,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace, * On reflection, no. * * On a regular file close, any eviction will not change the - * the contents of the free space manger(s), as all entries + * the contents of the free space manager(s), as all entries * should have correct file space allocated by the time this * function is called. * @@ -2658,7 +2658,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace, HGOTO_ERROR(H5E_FSPACE, H5E_CANTMARKDIRTY, FAIL, "unable to mark free space header as dirty") /* since space has been allocated for the section info and the sinfo - * has been inserted into the cache, relinquish owership (i.e. float) + * has been inserted into the cache, relinquish ownership (i.e. float) * the section info. */ fspace->sinfo = NULL; diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h index f815005..2ab41de 100644 --- a/src/H5Fpkg.h +++ b/src/H5Fpkg.h @@ -321,7 +321,7 @@ struct H5F_file_t { haddr_t fs_addr[H5F_MEM_PAGE_NTYPES]; /* Address of free space manager info for each type */ H5FS_t *fs_man[H5F_MEM_PAGE_NTYPES]; /* Free space manager for each file space type */ hbool_t first_alloc_dealloc; /* TRUE iff free space managers */ - /* are persistant and have not */ + /* are persistent and have not */ /* been used accessed for either */ /* allocation or deallocation */ /* since file open. */ diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h index f06d4d0..979ba7d 100644 --- a/src/H5Fprivate.h +++ b/src/H5Fprivate.h @@ -557,7 +557,7 @@ typedef struct H5F_t H5F_t; #define H5F_FILE_SPACE_PAGE_SIZE_DEF 4096 /* For paged aggregation: minimum value for file space page size */ #define H5F_FILE_SPACE_PAGE_SIZE_MIN 512 -/* For paged aggregation: maxiumum value for file space page size: 1 gigabyte */ +/* For paged aggregation: maximum value for file space page size: 1 gigabyte */ #define H5F_FILE_SPACE_PAGE_SIZE_MAX 1024*1024*1024 /* For paged aggregation: drop free-space with size <= this threshold for small meta section */ diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c index 97a6d6f..9339b3d 100644 --- a/src/H5Fsuper.c +++ b/src/H5Fsuper.c @@ -823,13 +823,13 @@ H5F__super_read(H5F_t *f, H5P_genplist_t *fa_plist, hbool_t initial_read) f->shared->eoa_pre_fsm_fsalloc = fsinfo.eoa_pre_fsm_fsalloc; /* f->shared->eoa_pre_fsm_fsalloc must always be HADDR_UNDEF - * in the absence of persistant free space managers. + * in the absence of persistent free space managers. */ /* If the following two conditions are true: * (1) skipping EOF check (skip_eof_check) * (2) dropping free-space to the floor (null_fsm_addr) * skip the asserts as "eoa_pre_fsm_fsalloc" may be undefined - * for a crashed file with persistant free space managers. + * for a crashed file with persistent free space managers. * #1 and #2 are enabled when the tool h5clear --increment * option is used. */ @@ -839,7 +839,7 @@ H5F__super_read(H5F_t *f, H5P_genplist_t *fa_plist, hbool_t initial_read) } /* end if */ /* As "eoa_pre_fsm_fsalloc" may be undefined for a crashed file - * with persistant free space managers, therefore, set + * with persistent free space managers, therefore, set * "first_alloc_dealloc" when the condition * "dropping free-space to the floor is true. * This will ensure that no action is done to settle things on file diff --git a/src/H5Gname.c b/src/H5Gname.c index 1ca06b9..c1156ca 100644 --- a/src/H5Gname.c +++ b/src/H5Gname.c @@ -1058,7 +1058,7 @@ done: * Function: H5G_name_replace * * Purpose: Search the list of open IDs and replace names according to a - * particular operation. The operation occured on the + * particular operation. The operation occurred on the * SRC_FILE/SRC_FULL_PATH_R object. The new name (if there is * one) is NEW_NAME_R. Additional entry location information * (currently only needed for the 'move' operation) is passed in diff --git a/src/H5Gobj.c b/src/H5Gobj.c index af0e72c..e93896c 100644 --- a/src/H5Gobj.c +++ b/src/H5Gobj.c @@ -222,7 +222,7 @@ H5G__obj_create_real(H5F_t *f, const H5O_ginfo_t *ginfo, size_t pline_size = 0; /* Size of the pipeline message */ size_t link_size; /* Size of a link message */ - /* Calculate message size infomation, for creating group's object header */ + /* Calculate message size information, for creating group's object header */ linfo_size = H5O_msg_size_f(f, gcpl_id, H5O_LINFO_ID, linfo, (size_t)0); HDassert(linfo_size); diff --git a/src/H5HFcache.c b/src/H5HFcache.c index 84d5c5f..0c5d3aa 100644 --- a/src/H5HFcache.c +++ b/src/H5HFcache.c @@ -702,12 +702,12 @@ H5HF__cache_hdr_pre_serialize(H5F_t *f, void *_thing, haddr_t addr, size_t len, * * Do this with a call to H5HF__cache_verify_hdr_descendants_clean(). * - * Note that decendants need not be clean if the pre_serialize call + * Note that descendants need not be clean if the pre_serialize call * is made during a cache serialization instead of an entry or cache * flush. * * Note also that with the recent change in the definition of flush - * dependency, not all decendants need be clean -- only direct flush + * dependency, not all descendants need be clean -- only direct flush * dependency children. * * Finally, observe that the H5HF__cache_verify_hdr_descendants_clean() @@ -2643,11 +2643,11 @@ H5HF__cache_dblock_fsf_size(const void *_thing, hsize_t *fsf_size) * * The implementation of flush dependencies has been changed. * Prior to this change, a flush dependency parent could be - * flushed if and only if all its flush dependency decendants + * flushed if and only if all its flush dependency descendants * were clean. In the new definition, a flush dependency * parent can be flushed if all its immediate flush dependency * children are clean, regardless of any other dirty - * decendants. + * descendants. * * Further, metadata cache entries are now allowed to have * multiple flush dependency parents. @@ -2815,7 +2815,7 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr, H5_BEGIN_TAG(hdr->heap_addr) if(NULL == (root_iblock = (H5HF_indirect_t *)H5AC_protect(f, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.") + HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() failed.") H5_END_TAG @@ -2889,7 +2889,7 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr, H5_BEGIN_TAG(hdr->heap_addr) if(NULL == (iblock = (H5HF_indirect_t *)H5AC_protect(f, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.") + HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() failed.") H5_END_TAG @@ -2912,7 +2912,7 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr, if(unprotect_root_iblock) { HDassert(root_iblock); if(H5AC_unprotect(f, H5AC_FHEAP_IBLOCK, root_iblock_addr, root_iblock, H5AC__NO_FLAGS_SET) < 0) - HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() faild.") + HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() failed.") } /* end if */ } /* end else */ } /* end if */ @@ -2982,7 +2982,7 @@ done: * Function: H5HF__cache_verify_iblock_descendants_clean * * Purpose: Sanity checking routine that verifies that all indirect - * and direct blocks that are decendents of the supplied + * and direct blocks that are descendants of the supplied * instance of H5HF_indirect_t are clean. Set *clean * to TRUE if this is the case, and to FALSE otherwise. * @@ -3009,11 +3009,11 @@ done: * * The implementation of flush dependencies has been changed. * Prior to this change, a flush dependency parent could be - * flushed if and only if all its flush dependency decendants + * flushed if and only if all its flush dependency descendants * were clean. In the new definition, a flush dependency * parent can be flushed if all its immediate flush dependency * children are clean, regardless of any other dirty - * decendants. + * descendants. * * Further, metadata cache entries are now allowed to have * multiple flush dependency parents. @@ -3126,11 +3126,11 @@ done: * * The implementation of flush dependencies has been changed. * Prior to this change, a flush dependency parent could be - * flushed if and only if all its flush dependency decendants + * flushed if and only if all its flush dependency descendants * were clean. In the new definition, a flush dependency * parent can be flushed if all its immediate flush dependency * children are clean, regardless of any other dirty - * decendants. + * descendants. * * Further, metadata cache entries are now allowed to have * multiple flush dependency parents. @@ -3290,11 +3290,11 @@ done: * * The implementation of flush dependencies has been changed. * Prior to this change, a flush dependency parent could be - * flushed if and only if all its flush dependency decendants + * flushed if and only if all its flush dependency descendants * were clean. In the new definition, a flush dependency * parent can be flushed if all its immediate flush dependency * children are clean, regardless of any other dirty - * decendants. + * descendants. * * Further, metadata cache entries are now allowed to have * multiple flush dependency parents. @@ -3468,7 +3468,7 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, haddr_t fd_parent_addr, H5_BEGIN_TAG(iblock->hdr->heap_addr) if(NULL == (child_iblock = (H5HF_indirect_t *) H5AC_protect(f, H5AC_FHEAP_IBLOCK, child_iblock_addr, NULL, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.") + HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() failed.") H5_END_TAG @@ -3480,7 +3480,7 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, haddr_t fd_parent_addr, /* pointer to the entry. This is very slimy -- */ /* come up with a better solution. */ if(H5AC_get_entry_ptr_from_addr(f, child_iblock_addr, (void **)(&child_iblock)) < 0) - HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "H5AC_get_entry_ptr_from_addr() faild.") + HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "H5AC_get_entry_ptr_from_addr() failed.") HDassert(child_iblock); } /* end else */ } /* end if */ @@ -3519,7 +3519,7 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, haddr_t fd_parent_addr, /* if we protected the child iblock, unprotect it now */ if(unprotect_child_iblock) { if(H5AC_unprotect(f, H5AC_FHEAP_IBLOCK, child_iblock_addr, child_iblock, H5AC__NO_FLAGS_SET) < 0) - HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() faild.") + HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() failed.") } /* end if */ } /* end if */ } /* end if */ diff --git a/src/H5HFdtable.c b/src/H5HFdtable.c index 563e8c5..6e9429a 100644 --- a/src/H5HFdtable.c +++ b/src/H5HFdtable.c @@ -338,7 +338,7 @@ HDfprintf(stderr, "%s: end_row = %u, end_col = %u, end_entry = %u\n", "H5HF_sect /* Check for multi-row span */ if(start_row != end_row) { - /* Accomodate partial starting row */ + /* Accommodate partial starting row */ if(start_col > 0) { acc_span_size = dtable->row_block_size[start_row] * (dtable->cparam.width - start_col); @@ -352,7 +352,7 @@ HDfprintf(stderr, "%s: end_row = %u, end_col = %u, end_entry = %u\n", "H5HF_sect start_row++; } /* end while */ - /* Accomodate partial ending row */ + /* Accommodate partial ending row */ acc_span_size += dtable->row_block_size[start_row] * (end_col + 1); } /* end if */ diff --git a/src/H5HFhdr.c b/src/H5HFhdr.c index b1b8574..8166d88 100644 --- a/src/H5HFhdr.c +++ b/src/H5HFhdr.c @@ -46,13 +46,13 @@ /* Limit on the size of the max. direct block size */ /* (This is limited to 32-bits currently, because I think it's unlikely to * need to be larger, the 32-bit limit for H5VM_log2_of2(n), and - * some offsets/sizes are encoded with a maxiumum of 32-bits - QAK) + * some offsets/sizes are encoded with a maximum of 32-bits - QAK) */ #define H5HF_MAX_DIRECT_SIZE_LIMIT ((hsize_t)2 * 1024 * 1024 * 1024) /* Limit on the width of the doubling table */ /* (This is limited to 16-bits currently, because I think it's unlikely to - * need to be larger, and its encoded with a maxiumum of 16-bits - QAK) + * need to be larger, and its encoded with a maximum of 16-bits - QAK) */ #define H5HF_WIDTH_LIMIT (64 * 1024) #endif /* NDEBUG */ diff --git a/src/H5HFspace.c b/src/H5HFspace.c index 5d659a6..5260ae2 100644 --- a/src/H5HFspace.c +++ b/src/H5HFspace.c @@ -311,7 +311,7 @@ H5HF__space_revert_root(const H5HF_hdr_t *hdr) /* Only need to scan the sections if the free space has been initialized */ if(hdr->fspace) - /* Iterate over all sections, reseting the parent pointers in 'single' sections */ + /* Iterate over all sections, resetting the parent pointers in 'single' sections */ if(H5FS_sect_iterate(hdr->f, hdr->fspace, H5HF_space_revert_root_cb, NULL) < 0) HGOTO_ERROR(H5E_FSPACE, H5E_BADITER, FAIL, "can't iterate over sections to reset parent pointers") diff --git a/src/H5HFtiny.c b/src/H5HFtiny.c index 79462e9..5cf1c08 100644 --- a/src/H5HFtiny.c +++ b/src/H5HFtiny.c @@ -109,7 +109,7 @@ H5HF_tiny_init(H5HF_hdr_t *hdr) /* Check if tiny objects need an extra byte for their length */ /* (account for boundary condition when length of an object would need an * extra byte, but using that byte means that the extra length byte is - * unneccessary) + * unnecessary) */ if((hdr->id_len - 1) <= H5HF_TINY_LEN_SHORT) { hdr->tiny_max_len = hdr->id_len - 1; diff --git a/src/H5HGcache.c b/src/H5HGcache.c index 50b2669..3a770ae 100644 --- a/src/H5HGcache.c +++ b/src/H5HGcache.c @@ -300,7 +300,7 @@ H5HG__cache_heap_deserialize(const void *_image, size_t len, void *_udata, image += heap->obj[0].size; } /* end if */ else { - size_t need; + size_t need = 0; unsigned idx; uint8_t *begin = image; @@ -420,7 +420,7 @@ H5HG__cache_heap_image_len(const void *_thing, size_t *image_len) * * Purpose: Given an appropriately sized buffer and an instance of * H5HG_heap_t, serialize the global heap for writing to file, - * and copy the serialized verion into the buffer. + * and copy the serialized version into the buffer. * * * Return: Success: SUCCEED diff --git a/src/H5MF.c b/src/H5MF.c index 64e91c8..2f60080 100644 --- a/src/H5MF.c +++ b/src/H5MF.c @@ -1692,7 +1692,7 @@ HDfprintf(stderr, "%s: Entering\n", FUNC); } /* end if */ /* Set the ring type in the API context. In most cases, we will - * need H5AC_RING_RDFSM, so initialy set the ring in + * need H5AC_RING_RDFSM, so initially set the ring in * the context to that value. We will alter this later if needed. */ H5AC_set_ring(H5AC_RING_RDFSM, &orig_ring); @@ -1788,7 +1788,7 @@ HDfprintf(stderr, "%s: Entering\n", FUNC); HDassert(f->shared->sblock); /* Set the ring type in the API context. In most cases, we will - * need H5AC_RING_RDFSM, so initialy set the ring in + * need H5AC_RING_RDFSM, so initially set the ring in * the context to that value. We will alter this later if needed. */ H5AC_set_ring(H5AC_RING_RDFSM, &orig_ring); @@ -1956,7 +1956,7 @@ HDfprintf(stderr, "%s: Entering\n", FUNC); HDassert(f->shared->sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2); /* Set the ring type in the API context. In most cases, we will - * need H5AC_RING_RDFSM, so initialy set the ring in + * need H5AC_RING_RDFSM, so initially set the ring in * the context to that value. We will alter this later if needed. */ H5AC_set_ring(H5AC_RING_RDFSM, &orig_ring); @@ -2244,7 +2244,7 @@ H5MF_get_freespace(H5F_t *f, hsize_t *tot_space, hsize_t *meta_size) HDassert(f->shared->lf); /* Set the ring type in the API context. In most cases, we will - * need H5AC_RING_RDFSM, so initialy set the ring in + * need H5AC_RING_RDFSM, so initially set the ring in * the context to that value. We will alter this later if needed. */ H5AC_set_ring(H5AC_RING_RDFSM, &orig_ring); @@ -2419,7 +2419,7 @@ H5MF_get_free_sections(H5F_t *f, H5FD_mem_t type, size_t nsects, H5F_sect_info_t sect_udata.sect_idx = 0; /* Set the ring type in the API context. In most cases, we will - * need H5AC_RING_RDFSM, so initialy set the ring in + * need H5AC_RING_RDFSM, so initially set the ring in * the context to that value. We will alter this later if needed. */ H5AC_set_ring(H5AC_RING_RDFSM, &orig_ring); @@ -2712,7 +2712,7 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hbool_t *fsm_settled) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "can't free aggregators") /* Set the ring type in the DXPL. In most cases, we will - * need H5AC_RING_MDFSM first, so initialy set the ring in + * need H5AC_RING_MDFSM first, so initially set the ring in * the DXPL to that value. We will alter this later if * needed. */ @@ -2829,7 +2829,7 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hbool_t *fsm_settled) * extension messages will choke if the target message is * unexpectedly either absent or present. * - * Update: This is probably unecessary, as I gather that the + * Update: This is probably unnecessary, as I gather that the * file space manager info message is guaranteed to exist. * Leave it in for now, but consider removing it. */ @@ -3083,7 +3083,7 @@ done: * of these free space manager(s) only to have the allocation * result in the free space manager being empty and thus * obliging us to free the space again. Thus there is the - * potential for an infinte loop if we want to avoid saving + * potential for an infinite loop if we want to avoid saving * empty free space managers. * * Similarly, it is possible that we could allocate space @@ -3280,7 +3280,7 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hbool_t *fsm_settled) * extension message. * * As of this writing, we are solving this problem by - * simply not supporting persistant FSMs with the split + * simply not supporting persistent FSMs with the split * and multi file drivers. * * Current plans are to do away with the multi file @@ -3296,7 +3296,7 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hbool_t *fsm_settled) /* ******************* PROBLEM: ******************** * - * If the file has an alignement other than 1, and if + * If the file has an alignment other than 1, and if * the EOA is not a multiple of this alignment, allocating sapce * for the section via the VFD info has the potential of generating * a fragment that will be added to the free space manager. This @@ -3433,7 +3433,7 @@ H5MF__fsm_type_is_self_referential(H5F_t *f, H5F_mem_page_t fsm_type) /* In principle, fsm_type should always be less than * H5F_MEM_PAGE_LARGE_SUPER whenever paged aggregation * is not enabled. However, since there is code that does - * not observe this prinicple, force the result to FALSE if + * not observe this principle, force the result to FALSE if * fsm_type is greater than or equal to H5F_MEM_PAGE_LARGE_SUPER. */ if(fsm_type >= H5F_MEM_PAGE_LARGE_SUPER) diff --git a/src/H5MFaggr.c b/src/H5MFaggr.c index 5ab1834..3db7f73 100644 --- a/src/H5MFaggr.c +++ b/src/H5MFaggr.c @@ -407,7 +407,7 @@ H5MF__aggr_try_extend(H5F_t *f, H5F_blk_aggr_t *aggr, if(f->shared->feature_flags & aggr->feature_flag) { /* * If the block being tested adjoins the beginning of the aggregator - * block, check if the aggregator can accomodate the extension. + * block, check if the aggregator can accommodate the extension. */ if(H5F_addr_eq(blk_end, aggr->addr)) { haddr_t eoa; /* EOA for the file */ diff --git a/src/H5O.c b/src/H5O.c index c5db814..9b832f1 100644 --- a/src/H5O.c +++ b/src/H5O.c @@ -946,7 +946,7 @@ done: * Function: H5Odisable_mdc_flushes * * Purpose: To "cork" an object: - * --keep dirty entries assoicated with the object in the metadata cache + * --keep dirty entries associated with the object in the metadata cache * * Return: Success: Non-negative * Failure: Negative diff --git a/src/H5Oattr.c b/src/H5Oattr.c index f2c31d7..a62a3a3 100644 --- a/src/H5Oattr.c +++ b/src/H5Oattr.c @@ -249,7 +249,7 @@ done: if(NULL == ret_value) if(attr) { if(attr->shared) { - /* Free any dynamicly allocated items */ + /* Free any dynamically allocated items */ if(H5A__free(attr) < 0) HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, NULL, "can't release attribute info") diff --git a/src/H5Oattribute.c b/src/H5Oattribute.c index 9d125e8..e8a8433 100644 --- a/src/H5Oattribute.c +++ b/src/H5Oattribute.c @@ -1093,7 +1093,7 @@ H5O_attr_rename_mod_cb(H5O_t *oh, H5O_mesg_t *mesg/*in,out*/, mesg->native = NULL; /* Delete old attribute */ - /* (doesn't decrement the link count on shared components becuase + /* (doesn't decrement the link count on shared components because * the "native" pointer has been reset) */ if(H5O_release_mesg(udata->f, oh, mesg, FALSE) < 0) diff --git a/src/H5Ocache_image.c b/src/H5Ocache_image.c index ad64297..591ac4a 100644 --- a/src/H5Ocache_image.c +++ b/src/H5Ocache_image.c @@ -313,7 +313,7 @@ H5O__mdci_delete(H5F_t *f, H5O_t *open_oh, void *_mesg) * space allocations / deallocations prior to the free of the * cache image. Verify this to the extent possible. * - * If the hack to work around the persistant self referential + * If the hack to work around the persistent self referential * free space manager issue is NOT in use, just call H5MF_xfree() * to release the cache iamge. In principle, we should be able * to just reduce the EOA to the base address of the cache diff --git a/src/H5Ocopy.c b/src/H5Ocopy.c index e6865f1..2e628f4 100644 --- a/src/H5Ocopy.c +++ b/src/H5Ocopy.c @@ -1106,7 +1106,7 @@ H5O__copy_header(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */, HDassert(H5F_addr_defined(oloc_src->addr)); HDassert(oloc_dst->file); - /* Intialize copy info before errors can be thrown */ + /* Initialize copy info before errors can be thrown */ HDmemset(&cpy_info, 0, sizeof(H5O_copy_t)); /* Get the copy property list */ diff --git a/src/H5Omessage.c b/src/H5Omessage.c index 930be9b..e156dcd 100644 --- a/src/H5Omessage.c +++ b/src/H5Omessage.c @@ -239,7 +239,7 @@ done: * Purpose: Modifies an existing message or creates a new message. * * The UPDATE_FLAGS argument are flags that allow the caller - * to skip updating the modification time or reseting the message + * to skip updating the modification time or resetting the message * data. This is useful when several calls to H5O_msg_write will be * made in a sequence. * @@ -296,7 +296,7 @@ done: * Purpose: Modifies an existing message or creates a new message. * * The UPDATE_FLAGS argument are flags that allow the caller - * to skip updating the modification time or reseting the message + * to skip updating the modification time or resetting the message * data. This is useful when several calls to H5O_msg_write will be * made in a sequence. * @@ -343,7 +343,7 @@ done: * Purpose: Modifies an existing message or creates a new message. * * The UPDATE_FLAGS argument are flags that allow the caller - * to skip updating the modification time or reseting the message + * to skip updating the modification time or resetting the message * data. This is useful when several calls to H5O_msg_write will be * made in a sequence. * diff --git a/src/H5Oshared.c b/src/H5Oshared.c index 047f90e..328ba4b 100644 --- a/src/H5Oshared.c +++ b/src/H5Oshared.c @@ -640,7 +640,7 @@ done: /*------------------------------------------------------------------------- * Function: H5O__shared_post_copy_file * - * Purpose: Delate a shared message and replace with a new one. + * Purpose: Delete a shared message and replace with a new one. * The function is needed at cases such as coping a shared reg_ref attribute. * When a shared reg_ref attribute is copied from one file to * another, the values in file need to be replaced. The only way diff --git a/src/H5P.c b/src/H5P.c index f163e9e..733af42 100644 --- a/src/H5P.c +++ b/src/H5P.c @@ -399,7 +399,7 @@ done: GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The 'set' callback function may be useful to range check the value being - set for the property or may perform some tranformation/translation of the + set for the property or may perform some transformation/translation of the value set. The 'get' callback would then [probably] reverse the transformation, etc. A single 'get' or 'set' callback could handle multiple properties by performing different actions based on the property @@ -581,7 +581,7 @@ done: GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The 'set' callback function may be useful to range check the value being - set for the property or may perform some tranformation/translation of the + set for the property or may perform some transformation/translation of the value set. The 'get' callback would then [probably] reverse the transformation, etc. A single 'get' or 'set' callback could handle multiple properties by performing different actions based on the property diff --git a/src/H5Pdcpl.c b/src/H5Pdcpl.c index d69b063..cb13ff3 100644 --- a/src/H5Pdcpl.c +++ b/src/H5Pdcpl.c @@ -1332,7 +1332,7 @@ done: /*------------------------------------------------------------------------- * Function: H5P__dcrt_ext_file_list_get * - * Purpose: Copies an external file lsit property when it's retrieved from a property list + * Purpose: Copies an external file list property when it's retrieved from a property list * * Return: Success: Non-negative * Failure: Negative @@ -2245,7 +2245,7 @@ H5Pset_virtual(hid_t dcpl_id, hid_t vspace_id, const char *src_file_name, done: /* Set VDS layout information in property list */ - /* (Even on faliure, so there's not a mangled layout struct in the list) */ + /* (Even on failure, so there's not a mangled layout struct in the list) */ if(retrieved_layout) { if(H5P_poke(plist, H5D_CRT_LAYOUT_NAME, &virtual_layout) < 0) { HDONE_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set layout") diff --git a/src/H5Pdeprec.c b/src/H5Pdeprec.c index 7f96333..226d206 100644 --- a/src/H5Pdeprec.c +++ b/src/H5Pdeprec.c @@ -202,7 +202,7 @@ GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The 'set' callback function may be useful to range check the value being - set for the property or may perform some tranformation/translation of the + set for the property or may perform some transformation/translation of the value set. The 'get' callback would then [probably] reverse the transformation, etc. A single 'get' or 'set' callback could handle multiple properties by performing different actions based on the property @@ -383,7 +383,7 @@ done: GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The 'set' callback function may be useful to range check the value being - set for the property or may perform some tranformation/translation of the + set for the property or may perform some transformation/translation of the value set. The 'get' callback would then [probably] reverse the transformation, etc. A single 'get' or 'set' callback could handle multiple properties by performing different actions based on the property diff --git a/src/H5Pdxpl.c b/src/H5Pdxpl.c index b56c137..e6eed32 100644 --- a/src/H5Pdxpl.c +++ b/src/H5Pdxpl.c @@ -1035,7 +1035,7 @@ done: * for the type conversion buffer and background buffer and * optionally supply pointers to application-allocated buffers. * If the buffer size is smaller than the entire amount of data - * being transfered between application and file, and a type + * being transferred between application and file, and a type * conversion buffer or background buffer is required then * strip mining will be used. * diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c index eded286..7ba9c11 100644 --- a/src/H5Pfapl.c +++ b/src/H5Pfapl.c @@ -396,7 +396,7 @@ static const hbool_t H5F_def_coll_md_write_flag_g = H5F_ACS_COLL_MD_WRITE_FLAG_D static const H5AC_cache_image_config_t H5F_def_mdc_initCacheImageCfg_g = H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_DEF; /* Default metadata cache image settings */ static const size_t H5F_def_page_buf_size_g = H5F_ACS_PAGE_BUFFER_SIZE_DEF; /* Default page buffer size */ static const unsigned H5F_def_page_buf_min_meta_perc_g = H5F_ACS_PAGE_BUFFER_MIN_META_PERC_DEF; /* Default page buffer minimum metadata size */ -static const unsigned H5F_def_page_buf_min_raw_perc_g = H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_DEF; /* Default page buffer minumum raw data size */ +static const unsigned H5F_def_page_buf_min_raw_perc_g = H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_DEF; /* Default page buffer mininum raw data size */ /*------------------------------------------------------------------------- @@ -1713,7 +1713,7 @@ H5Pget_mdc_image_config(hid_t plist_id, H5AC_cache_image_config_t *config_ptr) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Unknown image config version.") /* If we ever support multiple versions of H5AC_cache_config_t, we - * will have to get the cannonical version here, and then translate + * will have to get the canonical version here, and then translate * to the version of the structure supplied. */ @@ -1808,7 +1808,7 @@ H5Pget_mdc_config(hid_t plist_id, H5AC_cache_config_t *config_ptr) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Unknown config version.") /* If we ever support multiple versions of H5AC_cache_config_t, we - * will have to get the cannonical version here, and then translate + * will have to get the canonical version here, and then translate * to the version of the structure supplied. */ @@ -2322,7 +2322,7 @@ done: * necessary to represent features used. * (This is the "make certain to take advantage of * in the file format" use case (maybe is smaller - * or scales better than an ealier version, which would otherwise + * or scales better than an earlier version, which would otherwise * be used)) * * LOW = H5F_FORMAT_1_2_0, HIGH = H5F_FORMAT_1_6_0 => creates objects @@ -2549,7 +2549,7 @@ H5Pset_file_image(hid_t fapl_id, void *buf_ptr, size_t buf_len) /* validate parameters */ if(!(((buf_ptr == NULL) && (buf_len == 0)) || ((buf_ptr != NULL) && (buf_len > 0)))) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "inconsistant buf_ptr and buf_len") + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "inconsistent buf_ptr and buf_len") /* Get the plist structure */ if(NULL == (fapl = H5P_object_verify(fapl_id, H5P_FILE_ACCESS))) @@ -2652,7 +2652,7 @@ H5Pget_file_image(hid_t fapl_id, void **buf_ptr_ptr, size_t *buf_len_ptr) if(H5P_peek(fapl, H5F_ACS_FILE_IMAGE_INFO_NAME, &image_info) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get file image info") - /* verify file image field consistancy */ + /* verify file image field consistency */ HDassert(((image_info.buffer != NULL) && (image_info.size > 0)) || ((image_info.buffer == NULL) && (image_info.size == 0))); @@ -2728,7 +2728,7 @@ H5Pset_file_image_callbacks(hid_t fapl_id, H5FD_file_image_callbacks_t *callback if(H5P_peek(fapl, H5F_ACS_FILE_IMAGE_INFO_NAME, &info) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get old file image info") - /* verify file image field consistancy */ + /* verify file image field consistency */ HDassert(((info.buffer != NULL) && (info.size > 0)) || ((info.buffer == NULL) && (info.size == 0))); @@ -2804,7 +2804,7 @@ H5Pget_file_image_callbacks(hid_t fapl_id, H5FD_file_image_callbacks_t *callback if(H5P_peek(fapl, H5F_ACS_FILE_IMAGE_INFO_NAME, &info) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get file image info") - /* verify file image field consistancy */ + /* verify file image field consistency */ HDassert(((info.buffer != NULL) && (info.size > 0)) || ((info.buffer == NULL) && (info.size == 0))); @@ -2858,7 +2858,7 @@ H5P__file_image_info_copy(void *value) info = (H5FD_file_image_info_t *)value; - /* verify file image field consistancy */ + /* verify file image field consistency */ HDassert(((info->buffer != NULL) && (info->size > 0)) || ((info->buffer == NULL) && (info->size == 0))); @@ -2932,7 +2932,7 @@ H5P__file_image_info_free(void *value) info = (H5FD_file_image_info_t *)value; - /* Verify file image field consistancy */ + /* Verify file image field consistency */ HDassert(((info->buffer != NULL) && (info->size > 0)) || ((info->buffer == NULL) && (info->size == 0))); diff --git a/src/H5Pfcpl.c b/src/H5Pfcpl.c index 6b0d2c0..f90dae5 100644 --- a/src/H5Pfcpl.c +++ b/src/H5Pfcpl.c @@ -545,7 +545,7 @@ H5Pset_sym_k(hid_t plist_id, unsigned ik, unsigned lk) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "istore IK value exceeds maximum B-tree entries"); if(H5P_get(plist, H5F_CRT_BTREE_RANK_NAME, btree_k) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree interanl nodes"); + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree internal nodes"); btree_k[H5B_SNODE_ID] = ik; if(H5P_set(plist, H5F_CRT_BTREE_RANK_NAME, btree_k) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set rank for btree nodes"); @@ -650,10 +650,10 @@ H5Pset_istore_k(hid_t plist_id, unsigned ik) /* Set value */ if(H5P_get(plist, H5F_CRT_BTREE_RANK_NAME, btree_k) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree interanl nodes"); + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree internal nodes"); btree_k[H5B_CHUNK_ID] = ik; if(H5P_set(plist, H5F_CRT_BTREE_RANK_NAME, btree_k) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set rank for btree interanl nodes"); + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set rank for btree internal nodes"); done: FUNC_LEAVE_API(ret_value) @@ -698,7 +698,7 @@ H5Pget_istore_k(hid_t plist_id, unsigned *ik /*out */ ) /* Get value */ if(ik) { if(H5P_get(plist, H5F_CRT_BTREE_RANK_NAME, btree_k) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree interanl nodes"); + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree internal nodes"); *ik = btree_k[H5B_CHUNK_ID]; } /* end if */ diff --git a/src/H5Pint.c b/src/H5Pint.c index 0e78463..6a0cc14 100644 --- a/src/H5Pint.c +++ b/src/H5Pint.c @@ -1398,7 +1398,7 @@ H5P_free_del_name_cb(void *item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED * NAME H5P_access_class PURPOSE - Internal routine to increment or decrement list & class dependancies on a + Internal routine to increment or decrement list & class dependencies on a property list class USAGE herr_t H5P_access_class(pclass,mod) @@ -1407,7 +1407,7 @@ H5P_free_del_name_cb(void *item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED * RETURNS Returns non-negative on success, negative on failure. DESCRIPTION - Increment/Decrement the class or list dependancies for a given class. + Increment/Decrement the class or list dependencies for a given class. This routine is the final arbiter on decisions about actually releasing a class in memory, such action is only taken when the reference counts for both dependent classes & lists reach zero. @@ -1425,19 +1425,19 @@ H5P_access_class(H5P_genclass_t *pclass, H5P_class_mod_t mod) HDassert(mod > H5P_MOD_ERR && mod < H5P_MOD_MAX); switch(mod) { - case H5P_MOD_INC_CLS: /* Increment the dependant class count*/ + case H5P_MOD_INC_CLS: /* Increment the dependent class count*/ pclass->classes++; break; - case H5P_MOD_DEC_CLS: /* Decrement the dependant class count*/ + case H5P_MOD_DEC_CLS: /* Decrement the dependent class count*/ pclass->classes--; break; - case H5P_MOD_INC_LST: /* Increment the dependant list count*/ + case H5P_MOD_INC_LST: /* Increment the dependent list count*/ pclass->plists++; break; - case H5P_MOD_DEC_LST: /* Decrement the dependant list count*/ + case H5P_MOD_DEC_LST: /* Decrement the dependent list count*/ pclass->plists--; break; @@ -1586,12 +1586,12 @@ H5P_create_class(H5P_genclass_t *par_class, const char *name, H5P_plist_type_t t /* Allocate room for the class */ if(NULL == (pclass = H5FL_CALLOC(H5P_genclass_t))) - HGOTO_ERROR(H5E_PLIST, H5E_CANTALLOC, NULL, "propery list class allocation failed") + HGOTO_ERROR(H5E_PLIST, H5E_CANTALLOC, NULL, "property list class allocation failed") /* Set class state */ pclass->parent = par_class; if(NULL == (pclass->name = H5MM_xstrdup(name))) - HGOTO_ERROR(H5E_PLIST, H5E_CANTALLOC, NULL, "propery list class name allocation failed") + HGOTO_ERROR(H5E_PLIST, H5E_CANTALLOC, NULL, "property list class name allocation failed") pclass->type = type; pclass->nprops = 0; /* Classes are created without properties initially */ pclass->plists = 0; /* No properties lists of this class yet */ @@ -2026,7 +2026,7 @@ done: GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The 'set' callback function may be useful to range check the value being - set for the property or may perform some tranformation/translation of the + set for the property or may perform some transformation/translation of the value set. The 'get' callback would then [probably] reverse the transformation, etc. A single 'get' or 'set' callback could handle multiple properties by performing different actions based on the property @@ -2258,7 +2258,7 @@ done: GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The 'set' callback function may be useful to range check the value being - set for the property or may perform some tranformation/translation of the + set for the property or may perform some transformation/translation of the value set. The 'get' callback would then [probably] reverse the transformation, etc. A single 'get' or 'set' callback could handle multiple properties by performing different actions based on the property @@ -2499,7 +2499,7 @@ done: GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The 'set' callback function may be useful to range check the value being - set for the property or may perform some tranformation/translation of the + set for the property or may perform some transformation/translation of the value set. The 'get' callback would then [probably] reverse the transformation, etc. A single 'get' or 'set' callback could handle multiple properties by performing different actions based on the property @@ -2636,7 +2636,7 @@ H5P__do_prop(H5P_genplist_t *plist, const char *name, H5P_do_plist_op_t plist_op /* Find property in changed list */ if(NULL != (prop = (H5P_genprop_t *)H5SL_search(plist->props, name))) { - /* Call the 'found in propery list' callback */ + /* Call the 'found in property list' callback */ if((*plist_op)(plist, name, prop, udata) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTOPERATE, FAIL, "can't operate on property") } /* end if */ @@ -4020,7 +4020,7 @@ H5P_iterate_plist(const H5P_genplist_t *plist, hbool_t iter_all_prop, int *idx, /* Check for iterating over all properties, or just non-default ones */ if(iter_all_prop) { - /* Walk up the class hiearchy */ + /* Walk up the class hierarchy */ tclass = plist->pclass; while(tclass != NULL) { /* Iterate over properties in property list class */ @@ -4737,7 +4737,7 @@ H5P_copy_prop_pclass(hid_t dst_id, hid_t src_id, const char *name) /* Sanity check */ HDassert(name); - /* Get propery list classes */ + /* Get property list classes */ if(NULL == (src_pclass = (H5P_genclass_t *)H5I_object(src_id))) HGOTO_ERROR(H5E_PLIST, H5E_NOTFOUND, FAIL, "source property class object doesn't exist") if(NULL == (dst_pclass = (H5P_genclass_t *)H5I_object(dst_id))) @@ -4854,8 +4854,8 @@ done: GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The property list class 'close' callback routine is not called from - here, it must have been check for and called properly prior to this routine - being called + here, it must have been checked for and called properly prior to this routine + being called. EXAMPLES REVISION LOG --------------------------------------------------------------------------*/ @@ -4981,7 +4981,7 @@ H5P_close(void *_plist) tclass=tclass->parent; } /* end while */ - /* Decrement class's dependant property list value! */ + /* Decrement class's dependent property list value! */ if(H5P_access_class(plist->pclass,H5P_MOD_DEC_LST) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "Can't decrement class ref count") diff --git a/src/H5Ppkg.h b/src/H5Ppkg.h index 13463ae..60b2363 100644 --- a/src/H5Ppkg.h +++ b/src/H5Ppkg.h @@ -51,10 +51,10 @@ typedef enum { /* Define enum for modifications to class */ typedef enum { H5P_MOD_ERR=(-1), /* Indicate an error */ - H5P_MOD_INC_CLS, /* Increment the dependant class count*/ - H5P_MOD_DEC_CLS, /* Decrement the dependant class count*/ - H5P_MOD_INC_LST, /* Increment the dependant list count*/ - H5P_MOD_DEC_LST, /* Decrement the dependant list count*/ + H5P_MOD_INC_CLS, /* Increment the dependent class count*/ + H5P_MOD_DEC_CLS, /* Decrement the dependent class count*/ + H5P_MOD_INC_LST, /* Increment the dependent list count*/ + H5P_MOD_DEC_LST, /* Decrement the dependent list count*/ H5P_MOD_INC_REF, /* Increment the ID reference count*/ H5P_MOD_DEC_REF, /* Decrement the ID reference count*/ H5P_MOD_MAX /* Upper limit on class modifications */ @@ -89,7 +89,7 @@ struct H5P_genclass_t { size_t nprops; /* Number of properties in class */ unsigned plists; /* Number of property lists that have been created since the last modification to the class */ unsigned classes; /* Number of classes that have been derived since the last modification to the class */ - unsigned ref_count; /* Number of oustanding ID's open on this class object */ + unsigned ref_count; /* Number of outstanding ID's open on this class object */ hbool_t deleted; /* Whether this class has been deleted and is waiting for dependent classes & proplists to close */ unsigned revision; /* Revision number of a particular class (global) */ H5SL_t *props; /* Skip list containing properties */ diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index a2c0418..58f3d18 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -139,7 +139,7 @@ typedef enum H5D_mpio_actual_chunk_opt_mode_t { typedef enum H5D_mpio_actual_io_mode_t { /* The following four values are conveniently defined as a bit field so that - * we can switch from the default to indpendent or collective and then to + * we can switch from the default to independent or collective and then to * mixed without having to check the original value. * * NO_COLLECTIVE means that either collective I/O wasn't requested or that diff --git a/src/H5SMpkg.h b/src/H5SMpkg.h index a9a4fd9..bb458a7 100644 --- a/src/H5SMpkg.h +++ b/src/H5SMpkg.h @@ -96,7 +96,7 @@ #define H5SM_B2_SPLIT_PERCENT 100 #define H5SM_B2_MERGE_PERCENT 40 -#define H5SM_LIST_VERSION 0 /* Verion of Shared Object Header Message List Indexes */ +#define H5SM_LIST_VERSION 0 /* Version of Shared Object Header Message List Indexes */ /****************************/ /* Package Typedefs */ diff --git a/src/H5Sall.c b/src/H5Sall.c index 11dd8e1..28395b1 100644 --- a/src/H5Sall.c +++ b/src/H5Sall.c @@ -432,7 +432,7 @@ H5S__all_copy(H5S_t *dst, const H5S_t H5_ATTR_UNUSED *src, hbool_t H5_ATTR_UNUSE TRUE if the selection fits within the extent, FALSE if it does not and Negative on an error. DESCRIPTION - Determines if the current selection at the current offet fits within the + Determines if the current selection at the current offset fits within the extent for the dataspace. Offset is irrelevant for this type of selection. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS diff --git a/src/H5Shyper.c b/src/H5Shyper.c index 47eb573..5653209 100644 --- a/src/H5Shyper.c +++ b/src/H5Shyper.c @@ -853,7 +853,7 @@ H5S__hyper_iter_next(H5S_sel_iter_t *iter, size_t nelem) /* Check if we are finished with the spans in the tree */ if(curr_dim >= 0) { - /* Walk back down the iterator positions, reseting them */ + /* Walk back down the iterator positions, resetting them */ while(curr_dim < fast_dim) { HDassert(curr_span); HDassert(curr_span->down); @@ -1037,7 +1037,7 @@ H5S__hyper_iter_next_block(H5S_sel_iter_t *iter) /* Check if we are finished with the spans in the tree */ if(curr_dim >= 0) { - /* Walk back down the iterator positions, reseting them */ + /* Walk back down the iterator positions, resetting them */ while(curr_dim < fast_dim) { HDassert(curr_span); HDassert(curr_span->down); @@ -1704,7 +1704,7 @@ done: RETURNS TRUE if the selection fits within the extent, FALSE if it does not DESCRIPTION - Determines if the current selection at the current offet fits within the + Determines if the current selection at the current offset fits within the extent for the dataspace. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS @@ -1762,7 +1762,7 @@ done: TRUE if the selection fits within the extent, FALSE if it does not and Negative on an error. DESCRIPTION - Determines if the current selection at the current offet fits within the + Determines if the current selection at the current offset fits within the extent for the dataspace. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS @@ -2052,7 +2052,7 @@ H5S__hyper_serialize_helper(const H5S_hyper_span_info_t *spans, HDassert(rank < H5O_LAYOUT_NDIMS); HDassert(p && pp); - /* Walk through the list of spans, recursing or outputing them */ + /* Walk through the list of spans, recursing or outputting them */ curr = spans->head; while(curr != NULL) { /* Recurse if this node has down spans */ @@ -2458,7 +2458,7 @@ H5S__hyper_span_blocklist(const H5S_hyper_span_info_t *spans, hsize_t start[], HDassert(numblocks && *numblocks > 0); HDassert(buf && *buf); - /* Walk through the list of spans, recursing or outputing them */ + /* Walk through the list of spans, recursing or outputting them */ curr = spans->head; while(curr != NULL && *numblocks > 0) { /* Recurse if this node has down spans */ @@ -3509,7 +3509,7 @@ done: NAME H5S__hyper_add_span_element_helper PURPOSE - Add a single elment to a span tree + Add a single element to a span tree USAGE herr_t H5S_hyper_add_span_element_helper(prev_span, span_tree, rank, coords) H5S_hyper_span_info_t *span_tree; IN/OUT: Pointer to span tree to append to @@ -3713,7 +3713,7 @@ done: NAME H5S_hyper_add_span_element PURPOSE - Add a single elment to a span tree + Add a single element to a span tree USAGE herr_t H5S_hyper_add_span_element(space, span_tree, rank, coords) H5S_t *space; IN/OUT: Pointer to dataspace to add coordinate to @@ -8101,7 +8101,7 @@ H5S__hyper_get_seq_list_gen(const H5S_t *space, H5S_sel_iter_t *iter, /* Check if we have more spans in the tree */ if(curr_dim >= 0) { - /* Walk back down the iterator positions, reseting them */ + /* Walk back down the iterator positions, resetting them */ while((unsigned)curr_dim < fast_dim) { HDassert(curr_span); HDassert(curr_span->down); @@ -8280,7 +8280,7 @@ H5S__hyper_get_seq_list_gen(const H5S_t *space, H5S_sel_iter_t *iter, break; } /* end if */ else { - /* Walk back down the iterator positions, reseting them */ + /* Walk back down the iterator positions, resetting them */ while((unsigned)curr_dim < fast_dim) { HDassert(curr_span); HDassert(curr_span->down); diff --git a/src/H5Smpio.c b/src/H5Smpio.c index 5c65119..db81ffc 100644 --- a/src/H5Smpio.c +++ b/src/H5Smpio.c @@ -909,10 +909,9 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, else inner_type = outer_type; } /* end for */ -/*************************** -* End of loop, walking -* thru dimensions. -***************************/ +/****************************************** +* End of loop, walking through dimensions. +*******************************************/ /* At this point inner_type is actually the outermost type, even for 0-trip loop */ *new_type = inner_type; diff --git a/src/H5Snone.c b/src/H5Snone.c index 19d0b5f..a46a71a 100644 --- a/src/H5Snone.c +++ b/src/H5Snone.c @@ -408,7 +408,7 @@ H5S_none_copy(H5S_t *dst, const H5S_t H5_ATTR_UNUSED *src, hbool_t H5_ATTR_UNUSE TRUE if the selection fits within the extent, FALSE if it does not and Negative on an error. DESCRIPTION - Determines if the current selection at the current offet fits within the + Determines if the current selection at the current offset fits within the extent for the dataspace. Offset is irrelevant for this type of selection. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS diff --git a/src/H5Spoint.c b/src/H5Spoint.c index 442bb2a..fb8e311 100644 --- a/src/H5Spoint.c +++ b/src/H5Spoint.c @@ -676,7 +676,7 @@ done: TRUE if the selection fits within the extent, FALSE if it does not and Negative on an error. DESCRIPTION - Determines if the current selection at the current offet fits within the + Determines if the current selection at the current offset fits within the extent for the dataspace. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS diff --git a/src/H5Sselect.c b/src/H5Sselect.c index 4462295..08ac255 100644 --- a/src/H5Sselect.c +++ b/src/H5Sselect.c @@ -361,7 +361,7 @@ H5S_get_select_npoints(const H5S_t *space) TRUE if the selection fits within the extent, FALSE if it does not and Negative on an error. DESCRIPTION - Determines if the current selection at the current offet fits within the + Determines if the current selection at the current offset fits within the extent for the dataspace. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS @@ -403,7 +403,7 @@ done: TRUE if the selection fits within the extent, FALSE if it does not and Negative on an error. DESCRIPTION - Determines if the current selection at the current offet fits within the + Determines if the current selection at the current offset fits within the extent for the dataspace. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS diff --git a/src/H5T.c b/src/H5T.c index c220890..fa97506 100644 --- a/src/H5T.c +++ b/src/H5T.c @@ -309,7 +309,7 @@ static H5T_path_t *H5T__path_find_real(const H5T_t *src, const H5T_t *dst, /* Library Private Variables */ /*****************************/ -/* The native endianess of the platform */ +/* The native endianness of the platform */ H5T_order_t H5T_native_order_g = H5T_ORDER_ERROR; diff --git a/src/H5Tcommit.c b/src/H5Tcommit.c index 4e4a551..1719f8f 100644 --- a/src/H5Tcommit.c +++ b/src/H5Tcommit.c @@ -385,7 +385,7 @@ H5T__commit(H5F_t *file, H5T_t *type, hid_t tcpl_id) if(H5T_set_version(file, type) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set version of datatype") - /* Calculate message size infomation, for creating object header */ + /* Calculate message size information, for creating object header */ dtype_size = H5O_msg_size_f(file, tcpl_id, H5O_DTYPE_ID, type, (size_t)0); HDassert(dtype_size); diff --git a/src/H5Tconv.c b/src/H5Tconv.c index f3af9e1..803a6da 100644 --- a/src/H5Tconv.c +++ b/src/H5Tconv.c @@ -109,7 +109,7 @@ * least as large as the destination. Overflows can occur when * the destination is narrower than the source. * - * xF: Integers to float-point(float or double) values where the desination + * xF: Integers to float-point(float or double) values where the destination * is at least as wide as the source. This case cannot generate * overflows. * @@ -2102,7 +2102,7 @@ H5T__conv_struct_subset(const H5T_cdata_t *cdata) * * For each element do * For I=1..NELMTS do - * If sizeof detination type <= sizeof source type then + * If sizeof destination type <= sizeof source type then * Convert member to destination type; * Move member as far left as possible; * @@ -2314,7 +2314,7 @@ done: * function. The algorithm is basically: * * For each member of the struct - * If sizeof detination type <= sizeof source type then + * If sizeof destination type <= sizeof source type then * Convert member to destination type for all elements * Move memb to BKG buffer for all elements * Else @@ -2421,7 +2421,7 @@ H5T__conv_struct_opt(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, offset -= src_memb->size; if(dst_memb->size > src->shared->size-offset) { cdata->priv = H5T_conv_struct_free(priv); - HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "convertion is unsupported by this function") + HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "conversion is unsupported by this function") } /* end if */ } /* end if */ } /* end for */ @@ -4250,7 +4250,7 @@ H5T__conv_f_f(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, size_t nelmts, /* * The exponent is too small to fit in the exponent field, * but by shifting the mantissa to the right we can - * accomodate that value. The mantissa of course is no + * accommodate that value. The mantissa of course is no * longer normalized. */ mrsh += (size_t)(1 - expo); diff --git a/src/H5Toffset.c b/src/H5Toffset.c index 668e730..c0e94fc 100644 --- a/src/H5Toffset.c +++ b/src/H5Toffset.c @@ -33,12 +33,12 @@ static herr_t H5T_set_offset(const H5T_t *dt, size_t offset); * Function: H5Tget_offset * * Purpose: Retrieves the bit offset of the first significant bit. The - * signficant bits of an atomic datum can be offset from the + * significant bits of an atomic datum can be offset from the * beginning of the memory for that datum by an amount of * padding. The `offset' property specifies the number of bits * of padding that appear to the "right of" the value. That is, * if we have a 32-bit datum with 16-bits of precision having - * the value 0x1122 then it will be layed out in memory as (from + * the value 0x1122 then it will be laid out in memory as (from * small byte address toward larger byte addresses): * * Big Big Little Little @@ -84,12 +84,12 @@ done: * Function: H5T_get_offset * * Purpose: Retrieves the bit offset of the first significant bit. The - * signficant bits of an atomic datum can be offset from the + * significant bits of an atomic datum can be offset from the * beginning of the memory for that datum by an amount of * padding. The `offset' property specifies the number of bits * of padding that appear to the "right of" the value. That is, * if we have a 32-bit datum with 16-bits of precision having - * the value 0x1122 then it will be layed out in memory as (from + * the value 0x1122 then it will be laid out in memory as (from * small byte address toward larger byte addresses): * * Big Big Little Little @@ -134,12 +134,12 @@ done: * Function: H5Tset_offset * * Purpose: Sets the bit offset of the first significant bit. The - * signficant bits of an atomic datum can be offset from the + * significant bits of an atomic datum can be offset from the * beginning of the memory for that datum by an amount of * padding. The `offset' property specifies the number of bits * of padding that appear to the "right of" the value. That is, * if we have a 32-bit datum with 16-bits of precision having - * the value 0x1122 then it will be layed out in memory as (from + * the value 0x1122 then it will be laid out in memory as (from * small byte address toward larger byte addresses): * * Big Big Little Little @@ -203,12 +203,12 @@ done: * Function: H5T_set_offset * * Purpose: Sets the bit offset of the first significant bit. The - * signficant bits of an atomic datum can be offset from the + * significant bits of an atomic datum can be offset from the * beginning of the memory for that datum by an amount of * padding. The `offset' property specifies the number of bits * of padding that appear to the "right of" the value. That is, * if we have a 32-bit datum with 16-bits of precision having - * the value 0x1122 then it will be layed out in memory as (from + * the value 0x1122 then it will be laid out in memory as (from * small byte address toward larger byte addresses): * * Big Big Little Little diff --git a/src/H5Tpkg.h b/src/H5Tpkg.h index 96132b0..e1b996c 100644 --- a/src/H5Tpkg.h +++ b/src/H5Tpkg.h @@ -295,7 +295,7 @@ typedef enum H5T_state_t { H5T_STATE_OPEN /*named constant, open object header */ } H5T_state_t; - /* This struct is shared between all occurances of an open named type */ + /* This struct is shared between all occurrences of an open named type */ typedef struct H5T_shared_t { hsize_t fo_count; /* number of references to this file object */ H5T_state_t state; /*current state of the type */ diff --git a/src/H5Tprivate.h b/src/H5Tprivate.h index 8051b6d..89cdcfd 100644 --- a/src/H5Tprivate.h +++ b/src/H5Tprivate.h @@ -100,7 +100,7 @@ typedef struct H5T_subset_info_t { /* Forward declarations for prototype arguments */ struct H5O_t; -/* The native endianess of the platform */ +/* The native endianness of the platform */ H5_DLLVAR H5T_order_t H5T_native_order_g; /* Private functions */ diff --git a/src/H5Znbit.c b/src/H5Znbit.c index c0afc38..fe0041c 100644 --- a/src/H5Znbit.c +++ b/src/H5Znbit.c @@ -558,7 +558,7 @@ H5Z_set_parms_array(const H5T_t *type, unsigned *cd_values_index, H5T_t *dtype_base = NULL; /* Array datatype's base datatype */ H5T_class_t dtype_base_class; /* Array datatype's base datatype's class */ size_t dtype_size; /* Array datatype's size (in bytes) */ - htri_t is_vlstring; /* flag indicating if datatype is varible-length string */ + htri_t is_vlstring; /* flag indicating if datatype is variable-length string */ herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT @@ -667,7 +667,7 @@ H5Z_set_parms_compound(const H5T_t *type, unsigned *cd_values_index, size_t dtype_member_offset; /* Compound datatype's current member datatype's offset (in bytes) */ size_t dtype_next_member_offset;/* Compound datatype's next member datatype's offset (in bytes) */ size_t dtype_size; /* Compound datatype's size (in bytes) */ - htri_t is_vlstring; /* flag indicating if datatype is varible-length string */ + htri_t is_vlstring; /* flag indicating if datatype is variable-length string */ unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ diff --git a/src/H5Zpublic.h b/src/H5Zpublic.h index fcb2d37..0df05f0 100644 --- a/src/H5Zpublic.h +++ b/src/H5Zpublic.h @@ -50,7 +50,7 @@ typedef int H5Z_filter_t; * unlimited amount, but currently each * filter uses a bit in a 32-bit field, * so the format would have to be - * changed to accomodate that) + * changed to accommodate that) */ /* Flags for filter definition (stored) */ diff --git a/src/H5Zscaleoffset.c b/src/H5Zscaleoffset.c index d139696..cdf31a4 100644 --- a/src/H5Zscaleoffset.c +++ b/src/H5Zscaleoffset.c @@ -988,7 +988,7 @@ H5Z_set_local_scaleoffset(hid_t dcpl_id, hid_t type_id, hid_t space_id) if(status == H5D_FILL_VALUE_UNDEFINED) cd_values[H5Z_SCALEOFFSET_PARM_FILAVAIL] = H5Z_SCALEOFFSET_FILL_UNDEFINED; else { - int need_convert = FALSE; /* Flag indicating convertion of byte order */ + int need_convert = FALSE; /* Flag indicating conversion of byte order */ cd_values[H5Z_SCALEOFFSET_PARM_FILAVAIL] = H5Z_SCALEOFFSET_FILL_DEFINED; @@ -1047,7 +1047,7 @@ H5Z_filter_scaleoffset(unsigned flags, size_t cd_nelmts, const unsigned cd_value uint32_t minbits = 0; /* minimum number of bits to store values */ unsigned long long minval= 0; /* minimum value of input buffer */ enum H5Z_scaleoffset_t type; /* memory type corresponding to dataset datatype */ - int need_convert = FALSE; /* flag indicating convertion of byte order */ + int need_convert = FALSE; /* flag indicating conversion of byte order */ unsigned char *outbuf = NULL; /* pointer to new output buffer */ unsigned buf_offset = 21; /* buffer offset because of parameters stored in file */ unsigned i; /* index */ diff --git a/src/H5Ztrans.c b/src/H5Ztrans.c index d4b59a6..67646a0 100644 --- a/src/H5Ztrans.c +++ b/src/H5Ztrans.c @@ -329,7 +329,7 @@ static void H5Z_print(H5Z_node *tree, FILE *stream); } /* The difference of this macro from H5Z_XFORM_DO_OP3 is that it handles the operations when the left operand is empty, like -x or +x. - * The reason that it's seperated from H5Z_XFORM_DO_OP3 is because compilers don't accept operations like *x or /x. So in H5Z_do_op, + * The reason that it's separated from H5Z_XFORM_DO_OP3 is because compilers don't accept operations like *x or /x. So in H5Z_do_op, * these two macros are called in different ways. (SLU 2012/3/20) */ #define H5Z_XFORM_DO_OP6(OP) \ @@ -967,7 +967,7 @@ done: /*------------------------------------------------------------------------- * Function: H5Z_new_node - * Purpose: Create and initilize a new H5Z_node structure. + * Purpose: Create and initialize a new H5Z_node structure. * Return: Success: Valid H5Z_node ptr * NULLure: NULL * Programmer: Bill Wendling @@ -999,7 +999,7 @@ done: * Purpose: If the transform is trivial, this function applies it. * Otherwise, it calls H5Z_xform_eval_full to do the full * transform. - * Return: SUCCEED if transform applied succesfully, FAIL otherwise + * Return: SUCCEED if transform applied successfully, FAIL otherwise * Programmer: Leon Arber * 5/1/04 * Modifications: @@ -1485,7 +1485,7 @@ H5Z_xform_reduce_tree(H5Z_node* tree) * Purpose: If the root of the tree passed in points to a simple * arithmetic operation and the left and right subtrees are both * integer or floating point values, this function does that - * operation, free the left and rigt subtrees, and replaces + * operation, free the left and right subtrees, and replaces * the root with the result of the operation. * Return: None. * Programmer: Leon Arber diff --git a/src/H5detect.c b/src/H5detect.c index 1c5554e..6ad45aa 100644 --- a/src/H5detect.c +++ b/src/H5detect.c @@ -43,7 +43,7 @@ static const char *FileHeader = "\n\ * system or configure has detected those Unix * features which aren't available. We're not * running on a Vax or other machine with mixed - * endianess. + * endianness. * * Modifications: * @@ -695,7 +695,7 @@ H5T__init_native(void)\n\ FUNC_ENTER_PACKAGE\n"); for(i = 0; i < nd; i++) { - /* The native endianess of this machine */ + /* The native endianness of this machine */ /* The INFO.perm now contains `-1' for bytes that aren't used and * are always zero. This happens on the Cray for `short' where * sizeof(short) is 8, but only the low-order 4 bytes are ever used. @@ -1079,8 +1079,8 @@ fix_order(int n, int last, int *perm, const char **mesg) } else { /* * Bi-endian machines like VAX. - * (NOTE: This is not an actual determination of the VAX-endianess. - * It could have some other endianess and fall into this + * (NOTE: This is not an actual determination of the VAX-endianness. + * It could have some other endianness and fall into this * case - JKM & QAK) */ HDassert(0 == n % 2); @@ -1111,7 +1111,7 @@ fix_order(int n, int last, int *perm, const char **mesg) * * This function assumes that the exponent occupies higher * order bits than the mantissa and that the most significant - * bit of the mantissa is next to the least signficant bit + * bit of the mantissa is next to the least significant bit * of the exponent. * * @@ -1271,7 +1271,7 @@ mark. Bits of integer types are printed as\n\ If the most significant bit of the normalized\n\ mantissa (always a `1' except for `0.0') is\n\ not stored then an `implicit=yes' appears\n\ -under the field description. In thie case,\n\ +under the field description. In this case,\n\ the radix point is still assumed to be\n\ before the first `M' but after the implicit\n\ bit.\n"; diff --git a/src/H5private.h b/src/H5private.h index 6334f39..8974e46 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -116,7 +116,7 @@ #endif /* - * flock() in sys/file.h is used for the implemention of file locking. + * flock() in sys/file.h is used for the implementation of file locking. */ #if defined(H5_HAVE_FLOCK) && defined(H5_HAVE_SYS_FILE_H) # include @@ -132,7 +132,7 @@ /* * Unix ioctls. These are used by h5ls (and perhaps others) to determine a - * resonable output width. + * reasonable output width. */ #ifdef H5_HAVE_SYS_IOCTL_H # include diff --git a/src/H5public.h b/src/H5public.h index 9157366..a4c80cd 100644 --- a/src/H5public.h +++ b/src/H5public.h @@ -115,7 +115,7 @@ extern "C" { /* * Status return values. Failed integer functions in HDF5 result almost * always in a negative value (unsigned failing functions sometimes return - * zero for failure) while successfull return is non-negative (often zero). + * zero for failure) while successful return is non-negative (often zero). * The negative failure value is most commonly -1, but don't bet on it. The * proper way to detect failure is something like: * @@ -298,7 +298,7 @@ typedef enum { } H5_iter_order_t; /* Iteration callback values */ -/* (Actually, any postive value will cause the iterator to stop and pass back +/* (Actually, any positive value will cause the iterator to stop and pass back * that positive value to the function that called the iterator) */ #define H5_ITER_ERROR (-1) diff --git a/src/H5system.c b/src/H5system.c index 719b7e0..186d8fa 100644 --- a/src/H5system.c +++ b/src/H5system.c @@ -294,13 +294,13 @@ HDfprintf(FILE *stream, const char *fmt, ...) unsigned short x = (unsigned short)va_arg(ap, unsigned int); n = fprintf(stream, format_templ, x); } else if(!*modifier) { - unsigned int x = va_arg(ap, unsigned int); /*lint !e732 Loss of sign not really occuring */ + unsigned int x = va_arg(ap, unsigned int); /*lint !e732 Loss of sign not really occurring */ n = fprintf(stream, format_templ, x); } else if(!HDstrcmp(modifier, "l")) { - unsigned long x = va_arg(ap, unsigned long); /*lint !e732 Loss of sign not really occuring */ + unsigned long x = va_arg(ap, unsigned long); /*lint !e732 Loss of sign not really occurring */ n = fprintf(stream, format_templ, x); } else { - uint64_t x = va_arg(ap, uint64_t); /*lint !e732 Loss of sign not really occuring */ + uint64_t x = va_arg(ap, uint64_t); /*lint !e732 Loss of sign not really occurring */ n = fprintf(stream, format_templ, x); } break; @@ -333,7 +333,7 @@ HDfprintf(FILE *stream, const char *fmt, ...) case 'a': { - haddr_t x = va_arg(ap, haddr_t); /*lint !e732 Loss of sign not really occuring */ + haddr_t x = va_arg(ap, haddr_t); /*lint !e732 Loss of sign not really occurring */ if(H5F_addr_defined(x)) { len = 0; @@ -387,7 +387,7 @@ HDfprintf(FILE *stream, const char *fmt, ...) case 's': case 'p': { - char *x = va_arg(ap, char*); /*lint !e64 Type mismatch not really occuring */ + char *x = va_arg(ap, char*); /*lint !e64 Type mismatch not really occurring */ n = fprintf(stream, format_templ, x); } break; diff --git a/test/cache_common.c b/test/cache_common.c index 5596601..2cc188e 100644 --- a/test/cache_common.c +++ b/test/cache_common.c @@ -6413,7 +6413,7 @@ validate_mdc_config(hid_t file_id, * the supplied external configuration. * * The cache also sets the initial_size field to the current - * cache max size instead of the value initialy supplied. + * cache max size instead of the value initially supplied. * Depending on circumstances, this may or may not match * the original. Hence the compare_init parameter. */ diff --git a/test/cache_common.h b/test/cache_common.h index 8999e44..9c66357 100644 --- a/test/cache_common.h +++ b/test/cache_common.h @@ -180,7 +180,7 @@ typedef struct flush_op hbool_t flag; /* boolean flag passed into the * function implementing the flush * operation. The meaning of the - * flag is dependant upon the flush + * flag is dependent upon the flush * operation: * * FLUSH_OP__DIRTY: TRUE iff the diff --git a/test/cache_image.c b/test/cache_image.c index 58b0b8f..5967ecc 100644 --- a/test/cache_image.c +++ b/test/cache_image.c @@ -511,7 +511,7 @@ delete_datasets(hid_t file_id, int min_dset, int max_dset) * FAPL entry when opening the file, and verify that the * metadata cache is notified. * - * If config_fsm is TRUE, setup the persistant free space + * If config_fsm is TRUE, setup the persistent free space * manager. Note that this flag may only be set if * create_file is also TRUE. * @@ -654,7 +654,7 @@ open_hdf5_file(hbool_t create_file, hbool_t mdci_sbem_expected, if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); - /* setup the persistant free space manager if indicated */ + /* setup the persistent free space manager if indicated */ if ( ( pass ) && ( config_fsm ) ) { fcpl_id = H5Pcreate(H5P_FILE_CREATE); @@ -5149,9 +5149,9 @@ cache_image_smoke_check_5(void) * 13) Close the file. * * 14) Get the size of the file. Verify that it is less - * than 20 KB. Without deletions and persistant free + * than 20 KB. Without deletions and persistent free * space managers, size size is about 167 MB, so this - * is sufficient to verify that the persistant free + * is sufficient to verify that the persistent free * space managers are more or less doing their job. * * Note that in the absence of paged allocation, file @@ -5473,9 +5473,9 @@ cache_image_smoke_check_6(void) /* 14) Get the size of the file. Verify that it is less - * than 20 KB. Without deletions and persistant free + * than 20 KB. Without deletions and persistent free * space managers, size size is about 167 MB, so this - * is sufficient to verify that the persistant free + * is sufficient to verify that the persistent free * space managers are more or less doing their job. * * Note that in the absence of paged allocation, file @@ -7160,7 +7160,7 @@ cache_image_api_error_check_4(void) * called before any activity on the metadata cache. * This is a potential problem, as satisfying the * H5Fget_free_sections() call requires access to all - * free space managers. When persistant free space + * free space managers. When persistent free space * managers are enabled, this will require calling * H5MF_tidy_self_referential_fsm_hack(). This is a * non issue in the absence of a cache image. However, @@ -7179,7 +7179,7 @@ cache_image_api_error_check_4(void) * The test is set up as follows: * * 1) Create a HDF5 file with a cache image requested - * and persistant free space managers enabled. + * and persistent free space managers enabled. * * 2) Create some data sets, and then delete some of * of those near the beginning of the file. @@ -7273,7 +7273,7 @@ get_free_sections_test(void) /* 1) Create a HDF5 file with a cache image requested - * and persistant free space managers enabled. + * and persistent free space managers enabled. */ if ( pass ) { @@ -7666,13 +7666,13 @@ get_free_sections_test(void) * the dirty metadata, the second scan will fail, as valid * versions of the dirty metadata will not be available. * - * To make the test more useful, enable persistant free + * To make the test more useful, enable persistent free * space managers. * * The test is set up as follows: * * 1) Create a HDF5 file without a cache image requested - * and persistant free space managers enabled. + * and persistent free space managers enabled. * * 2) Create some data sets and verify them. * @@ -7753,7 +7753,7 @@ evict_on_close_test(void) /* 1) Create a HDF5 file without a cache image requested - * and persistant free space managers enabled. + * and persistent free space managers enabled. */ if ( pass ) { diff --git a/test/cache_tagging.c b/test/cache_tagging.c index d34cad3..119ba62 100644 --- a/test/cache_tagging.c +++ b/test/cache_tagging.c @@ -1514,7 +1514,7 @@ check_attribute_rename_tags(hid_t fcpl, int type) haddr_t g_tag = 0; hsize_t dims1[2] = {DIMS, DIMS}; /* dimensions */ hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dimensions */ - hbool_t persistant_fsms = FALSE; + hbool_t persistent_fsms = FALSE; /* Testing Macro */ TESTING("tag application during attribute renaming"); @@ -1523,8 +1523,8 @@ check_attribute_rename_tags(hid_t fcpl, int type) /* Setup */ /* ===== */ - /* check to see if the FCPL specified persistant free space managers */ - if(H5Pget_file_space_strategy(fcpl, NULL, &persistant_fsms, NULL) < 0) + /* check to see if the FCPL specified persistent free space managers */ + if(H5Pget_file_space_strategy(fcpl, NULL, &persistent_fsms, NULL) < 0) TEST_ERROR; /* Allocate array */ @@ -1621,14 +1621,14 @@ check_attribute_rename_tags(hid_t fcpl, int type) */ if ( verify_tag(fid, H5AC_FSPACE_HDR_ID, H5AC__FREESPACE_TAG) < 0 ) TEST_ERROR; - /* If the free space managers are persistant, the + /* If the free space managers are persistent, the * H5MF_tidy_self_referential_fsm_hack() must have been run. * Since this function floats all self referential free space * managers, the H5FD_MEM_SUPER FSM will not be in the metadata * cache. */ - if(!persistant_fsms && verify_tag(fid, H5AC_FSPACE_HDR_ID, H5AC__FREESPACE_TAG) < 0) TEST_ERROR; - if(!persistant_fsms && verify_tag(fid, H5AC_FSPACE_SINFO_ID, H5AC__FREESPACE_TAG) < 0) TEST_ERROR; + if(!persistent_fsms && verify_tag(fid, H5AC_FSPACE_HDR_ID, H5AC__FREESPACE_TAG) < 0) TEST_ERROR; + if(!persistent_fsms && verify_tag(fid, H5AC_FSPACE_SINFO_ID, H5AC__FREESPACE_TAG) < 0) TEST_ERROR; /* verify btree header and leaf node belonging to group */ if ( verify_tag(fid, H5AC_BT2_HDR_ID, g_tag) < 0 ) TEST_ERROR; @@ -1693,7 +1693,7 @@ check_attribute_delete_tags(hid_t fcpl, int type) haddr_t g_tag = 0; hsize_t dims1[2] = {DIMS, DIMS}; /* dimensions */ hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dimensions */ - hbool_t persistant_fsms = FALSE; + hbool_t persistent_fsms = FALSE; /* Testing Macro */ TESTING("tag application during attribute delete"); @@ -1702,8 +1702,8 @@ check_attribute_delete_tags(hid_t fcpl, int type) /* Setup */ /* ===== */ - /* check to see if the FCPL specified persistant free space managers */ - if ( H5Pget_file_space_strategy(fcpl, NULL, &persistant_fsms, NULL) < 0 ) + /* check to see if the FCPL specified persistent free space managers */ + if ( H5Pget_file_space_strategy(fcpl, NULL, &persistent_fsms, NULL) < 0 ) TEST_ERROR; /* Allocate array */ @@ -1781,13 +1781,13 @@ check_attribute_delete_tags(hid_t fcpl, int type) TEST_ERROR; #if 0 - /* If the free space managers are persistant, the + /* If the free space managers are persistent, the * H5MF_tidy_self_referential_fsm_hack() must have been run. * Since this function floats all self referential free space * managers, the H5FD_MEM_SUPER FSM will not be in the metadata * cache. */ - if ( ( ! persistant_fsms ) && + if ( ( ! persistent_fsms ) && ( verify_tag(fid, H5AC_FSPACE_HDR_ID, H5AC__FREESPACE_TAG) < 0 ) ) TEST_ERROR; #endif diff --git a/test/dtransform.c b/test/dtransform.c index 0381bb8..f022699 100644 --- a/test/dtransform.c +++ b/test/dtransform.c @@ -744,7 +744,7 @@ test_getset(const hid_t dxpl_id_c_to_f) HDfree(ptrgetTest); ptrgetTest = NULL; - TESTING("data transform, read after reseting of transform property") + TESTING("data transform, read after resetting of transform property") if(H5Pset_data_transform(dxpl_id_c_to_f, simple) < 0) TEST_ERROR diff --git a/test/enum.c b/test/enum.c index 4e20713..588e9b3 100644 --- a/test/enum.c +++ b/test/enum.c @@ -374,7 +374,7 @@ test_tr2(hid_t file) E1_WHITE, E1_BLACK, E1_GREEN, E1_BLUE, E1_RED}; c_e1 data2[10]; - TESTING("O(log N) converions"); + TESTING("O(log N) conversions"); if((cwg = H5Gcreate2(file, "test_tr2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR diff --git a/test/mf.c b/test/mf.c index 9203804..04b7c0b 100644 --- a/test/mf.c +++ b/test/mf.c @@ -6114,21 +6114,21 @@ test_mf_fs_persist_split(void) TESTING("File's free-space managers are persistent for split-file"); - /* for now, we don't support persistant free space managers + /* for now, we don't support persistent free space managers * with the split file driver. */ SKIPPED(); - HDfprintf(stdout, " Persistant FSMs disabled in multi file driver.\n"); + HDfprintf(stdout, " Persistent FSMs disabled in multi file driver.\n"); return 0; /* <========== note return */ /* File creation property list template */ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0) - /* for now, we don't support persistant free space managers + /* for now, we don't support persistent free space managers * with the split file driver. */ SKIPPED(); - HDfprintf(stdout, " Persistant FSMs disabled in multi file driver.\n"); + HDfprintf(stdout, " Persistent FSMs disabled in multi file driver.\n"); return 0; /* <========== note return */ /* File creation property list template */ @@ -6436,18 +6436,18 @@ test_mf_fs_persist_multi(void) TESTING("File's free-space managers are persistent for multi-file"); - /* for now, we don't support persistant free space managers + /* for now, we don't support persistent free space managers * with the multi file driver. */ SKIPPED(); - HDfprintf(stdout, " Persistant FSMs disabled in multi file driver.\n"); + HDfprintf(stdout, " Persistent FSMs disabled in multi file driver.\n"); return 0; /* <========== note return */ - /* for now, we don't support persistant free space managers + /* for now, we don't support persistent free space managers * with the multi file driver. */ SKIPPED(); - HDfprintf(stdout, " Persistant FSMs disabled in multi file driver.\n"); + HDfprintf(stdout, " Persistent FSMs disabled in multi file driver.\n"); return 0; /* <========== note return */ /* File creation property list template */ diff --git a/test/mtime.c b/test/mtime.c index 2cd2d31..80730eb 100644 --- a/test/mtime.c +++ b/test/mtime.c @@ -139,7 +139,7 @@ main(void) if(oi1.ctime != MTIME1) { H5_FAILED(); /* If this fails, examine H5Omtime.c. Modification time is very - * system dependant (e.g., on Windows DST must be hardcoded). */ + * system dependent (e.g., on Windows DST must be hardcoded). */ puts(" Old modification time incorrect"); goto error; } diff --git a/test/page_buffer.c b/test/page_buffer.c index a6e85ee..3c61ab0 100644 --- a/test/page_buffer.c +++ b/test/page_buffer.c @@ -619,7 +619,7 @@ test_raw_data_handling(hid_t orig_fapl, const char *env_h5_drvr) if(HADDR_UNDEF == (addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int)*(size_t)num_elements))) FAIL_STACK_ERROR; - /* intialize all the elements to have a value of -1 */ + /* initialize all the elements to have a value of -1 */ for(i=0 ; i Date: Sun, 15 Jul 2018 23:31:09 -0500 Subject: Fixed division-by-zero issues Description: Fixed HDFFV-10481 and HDFFV-10477, division by 0. Fixed another occurrence beside what were reported. Also, changed a local variable to avoid an unnecessary cast. Platforms tested: Linux/64 (jelly) Linux/32 (jam) Darwin (osx1010test) --- src/H5Dbtree.c | 23 +++++++++++++++-------- src/H5Dchunk.c | 3 +++ src/H5Dint.c | 37 +++++++++++++++++++++---------------- 3 files changed, 39 insertions(+), 24 deletions(-) diff --git a/src/H5Dbtree.c b/src/H5Dbtree.c index c23f089..04f5419 100644 --- a/src/H5Dbtree.c +++ b/src/H5Dbtree.c @@ -666,12 +666,13 @@ done: static herr_t H5D__btree_decode_key(const H5B_shared_t *shared, const uint8_t *raw, void *_key) { - const H5O_layout_chunk_t *layout; /* Chunk layout description */ - H5D_btree_key_t *key = (H5D_btree_key_t *) _key; /* Pointer to decoded key */ - hsize_t tmp_offset; /* Temporary coordinate offset, from file */ - unsigned u; /* Local index variable */ + const H5O_layout_chunk_t *layout; /* Chunk layout description */ + H5D_btree_key_t *key = (H5D_btree_key_t *) _key; /* Pointer to decoded key */ + hsize_t tmp_offset; /* Temporary coordinate offset, from file */ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_STATIC_NOERR + FUNC_ENTER_STATIC /* check args */ HDassert(shared); @@ -684,16 +685,22 @@ H5D__btree_decode_key(const H5B_shared_t *shared, const uint8_t *raw, void *_key /* decode */ UINT32DECODE(raw, key->nbytes); UINT32DECODE(raw, key->filter_mask); - for(u = 0; u < layout->ndims; u++) { + for(u = 0; u < layout->ndims; u++) + { + if (layout->dim[u] == 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, + "chunk size must be > 0, dim = %u ", u) + /* Retrieve coordinate offset */ - UINT64DECODE(raw, tmp_offset); + UINT64DECODE(raw, tmp_offset); HDassert(0 == (tmp_offset % layout->dim[u])); /* Convert to a scaled offset */ key->scaled[u] = tmp_offset / layout->dim[u]; } /* end for */ - FUNC_LEAVE_NOAPI(SUCCEED) +done: + FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__btree_decode_key() */ diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index e3f6410..7b8377d 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -947,6 +947,9 @@ H5D__chunk_init(H5F_t *f, const H5D_t *dset, hid_t dapl_id) hsize_t scaled_power2up; /* Scaled value, rounded to next power of 2 */ /* Initial scaled dimension sizes */ + if(dset->shared->layout.u.chunk.dim[u] == 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, + "chunk size must be > 0, dim = %u ", u) rdcc->scaled_dims[u] = dset->shared->curr_dims[u] / dset->shared->layout.u.chunk.dim[u]; if( !(scaled_power2up = H5VM_power2up(rdcc->scaled_dims[u])) ) diff --git a/src/H5Dint.c b/src/H5Dint.c index 2f67226..0c7fee0 100644 --- a/src/H5Dint.c +++ b/src/H5Dint.c @@ -2580,10 +2580,11 @@ done: herr_t H5D__set_extent(H5D_t *dset, const hsize_t *size) { - hsize_t curr_dims[H5S_MAX_RANK]; /* Current dimension sizes */ - htri_t changed; /* Whether the dataspace changed size */ - size_t u, v; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + hsize_t curr_dims[H5S_MAX_RANK]; /* Current dimension sizes */ + htri_t changed; /* Whether the dataspace changed size */ + size_t u, v; /* Local index variable */ + unsigned dim_idx; /* Dimension index */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE_VOL_TAG(dset->oloc.addr) @@ -2620,11 +2621,11 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size) hbool_t update_chunks = FALSE; /* Flag to indicate chunk cache update is needed */ /* Determine if we are shrinking and/or expanding any dimensions */ - for(u = 0; u < (size_t)dset->shared->ndims; u++) { + for(dim_idx = 0; dim_idx < dset->shared->ndims; dim_idx++) { /* Check for various status changes */ - if(size[u] < curr_dims[u]) + if(size[dim_idx] < curr_dims[dim_idx]) shrink = TRUE; - if(size[u] > curr_dims[u]) + if(size[dim_idx] > curr_dims[dim_idx]) expand = TRUE; /* Chunked storage specific checks */ @@ -2632,30 +2633,34 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size) hsize_t scaled; /* Scaled value */ /* Compute the scaled dimension size value */ - scaled = size[u] / dset->shared->layout.u.chunk.dim[u]; + if(dset->shared->layout.u.chunk.dim[dim_idx] == 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, + "chunk size must be > 0, dim = %u ", dim_idx) + + scaled = size[dim_idx] / dset->shared->layout.u.chunk.dim[dim_idx]; /* Check if scaled dimension size changed */ - if(scaled != dset->shared->cache.chunk.scaled_dims[u]) { + if(scaled != dset->shared->cache.chunk.scaled_dims[dim_idx]) { hsize_t scaled_power2up; /* Scaled value, rounded to next power of 2 */ /* Update the scaled dimension size value for the current dimension */ - dset->shared->cache.chunk.scaled_dims[u] = scaled; + dset->shared->cache.chunk.scaled_dims[dim_idx] = scaled; /* Check if algorithm for computing hash values will change */ if((scaled > dset->shared->cache.chunk.nslots && - dset->shared->cache.chunk.scaled_dims[u] <= dset->shared->cache.chunk.nslots) + dset->shared->cache.chunk.scaled_dims[dim_idx] <= dset->shared->cache.chunk.nslots) || (scaled <= dset->shared->cache.chunk.nslots && - dset->shared->cache.chunk.scaled_dims[u] > dset->shared->cache.chunk.nslots)) + dset->shared->cache.chunk.scaled_dims[dim_idx] > dset->shared->cache.chunk.nslots)) update_chunks = TRUE; if(!(scaled_power2up = H5VM_power2up(scaled))) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get the next power of 2") /* Check if the number of bits required to encode the scaled size value changed */ - if(dset->shared->cache.chunk.scaled_power2up[u] != scaled_power2up) { + if(dset->shared->cache.chunk.scaled_power2up[dim_idx] != scaled_power2up) { /* Update the 'power2up' & 'encode_bits' values for the current dimension */ - dset->shared->cache.chunk.scaled_power2up[u] = scaled_power2up; - dset->shared->cache.chunk.scaled_encode_bits[u] = H5VM_log2_gen(scaled_power2up); + dset->shared->cache.chunk.scaled_power2up[dim_idx] = scaled_power2up; + dset->shared->cache.chunk.scaled_encode_bits[dim_idx] = H5VM_log2_gen(scaled_power2up); /* Indicate that the cached chunk indices need to be updated */ update_chunks = TRUE; @@ -2664,7 +2669,7 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size) } /* end if */ /* Update the cached copy of the dataset's dimensions */ - dset->shared->curr_dims[u] = size[u]; + dset->shared->curr_dims[dim_idx] = size[dim_idx]; } /* end for */ /*------------------------------------------------------------------------- -- cgit v0.12 From 47780cb4e03f0b4308f28e5c8b5c5935fda73df1 Mon Sep 17 00:00:00 2001 From: Allen Byrne Date: Mon, 16 Jul 2018 13:01:53 -0500 Subject: TRILAB-31 Working locally --- MANIFEST | 3 + bin/runbkgprog | 87 ++++++++++++++++ config/cmake/UseJava.cmake | 26 +++-- test/CMakeTests.cmake | 113 +++++++++++++++----- test/ShellTests.cmake | 244 ++++++++++++++++++++++++++++++++++++++++++++ test/flushrefreshTest.cmake | 172 +++++++++++++++++++++++++++++++ 6 files changed, 605 insertions(+), 40 deletions(-) create mode 100755 bin/runbkgprog create mode 100644 test/ShellTests.cmake create mode 100644 test/flushrefreshTest.cmake diff --git a/MANIFEST b/MANIFEST index 79c2ef6..02c9928 100644 --- a/MANIFEST +++ b/MANIFEST @@ -85,6 +85,7 @@ ./bin/release ./bin/restore.sh ./bin/runtest _DO_NOT_DISTRIBUTE_ +./bin/runbkgprog _DO_NOT_DISTRIBUTE_ ./bin/snapshot ./bin/snapshot_version _DO_NOT_DISTRIBUTE_ ./bin/switch_maint_mode _DO_NOT_DISTRIBUTE_ @@ -3248,6 +3249,8 @@ ./test/CMakeLists.txt ./test/CMakeTests.cmake ./test/CMakeVFDTests.cmake +./test/flushrefreshTest.cmake +./test/ShellTests.cmake ./testpar/CMakeLists.txt ./testpar/CMakeTests.cmake ./tools/CMakeLists.txt diff --git a/bin/runbkgprog b/bin/runbkgprog new file mode 100755 index 0000000..69fa2d0 --- /dev/null +++ b/bin/runbkgprog @@ -0,0 +1,87 @@ +#!/usr/bin/perl -w +require 5.003; +$indent=4; + +# +# Copyright by The HDF Group. +# Copyright by the Board of Trustees of the University of Illinois. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. +# + +# Run program in background +# +use warnings; +use strict; + +use Carp; +use Time::HiRes; +use POSIX 'setsid'; + +my $child_pid; +my $child_proc; +my $cmd = $ARGV[0]; +my $debug = 1; + +print "\nStart child process\n"; +start_child(); +print "\nStarted child process\n"; + +sub start_child { + die "cannot execute cmd: $cmd" unless -x $cmd; + if ($^O eq 'MSWin32') { # Windows + require Win32::Process; + Win32::Process::Create($child_proc, $cmd, $cmd, 0, 0, ".") || confess "Could not spawn child: $!"; + $child_pid = $child_proc->GetProcessID(); + } + else { # Unix + $SIG{CHLD} = 'IGNORE'; + $child_pid = fork(); + unless (defined $child_pid) { + confess "Could not spawn child (Unix): $!"; + } + if ($child_pid == 0) { # child + unless ($debug) { + open STDIN, "<", "/dev/null" or die "Can't read /dev/null: $!"; + open STDOUT, ">", "/dev/null" or die "Can't write /dev/null: $!"; + } + setsid or warn "setsid cannot start a new session: $!"; + unless ($debug) { + open STDERR, '>&STDOUT' or die "Can't dup stdout: $!"; + } + local $| = 1; + unless (exec($cmd)) { + confess "Could not start child: $cmd: $!"; + CORE::exit(0); + } + } + # parent + $SIG{CHLD} = 'DEFAULT'; + } + # catch early child exit, e.g. if program path is incorrect + sleep(1.0); + POSIX::waitpid(-1, POSIX::WNOHANG()); # clean up any defunct child process + if (kill(0,$child_pid)) { + print "\nStarted child process id $child_pid\n"; + } + else { + warn "Child process exited quickly: $cmd: process $child_pid"; + } +} + +sub stop_child +{ + if ($^O eq 'MSWin32') { # Windows + Win32::Process::KillProcess($child_pid,0); + } + else { # Unix + kill 9, $child_pid || warn "could not kill process $child_pid: $!"; + } + print "Stopped child process id $child_pid\n"; +} diff --git a/config/cmake/UseJava.cmake b/config/cmake/UseJava.cmake index 10ffe47..754d742 100644 --- a/config/cmake/UseJava.cmake +++ b/config/cmake/UseJava.cmake @@ -383,7 +383,7 @@ # Create C header files from java classes. These files provide the connective glue # that allow your Java and C code to interact. # -# This command will no longer be supported starting with version 1.10 of the JDK due +# This command will no longer be supported starting with version 10 of the JDK due # to the `suppression of javah tool `_. # Command ``add_jar(GENERATE_NATIVE_HEADERS)`` must be used instead. # @@ -447,7 +447,7 @@ endfunction () function(__java_lcat VAR) foreach(_line ${ARGN}) - set(${VAR} "${${VAR}}${_line}\n") + string(APPEND ${VAR} "${_line}\n") endforeach() set(${VAR} "${${VAR}}" PARENT_SCOPE) @@ -528,7 +528,7 @@ function(add_jar _TARGET_NAME) if (_add_jar_GENERATE_NATIVE_HEADERS) # Raise an error if JDK version is less than 1.8 because javac -h is not supported # by earlier versions. - if ("${Java_VERSION}" VERSION_LESS 1.8) + if (Java_VERSION VERSION_LESS 1.8) message (FATAL_ERROR "ADD_JAR: GENERATE_NATIVE_HEADERS is not supported with this version of Java.") endif() cmake_parse_arguments (_add_jar_GENERATE_NATIVE_HEADERS "" "DESTINATION" "" ${_add_jar_GENERATE_NATIVE_HEADERS}) @@ -569,7 +569,7 @@ function(add_jar _TARGET_NAME) endif() foreach (JAVA_INCLUDE_DIR ${CMAKE_JAVA_INCLUDE_PATH}) - set(CMAKE_JAVA_INCLUDE_PATH_FINAL "${CMAKE_JAVA_INCLUDE_PATH_FINAL}${CMAKE_JAVA_INCLUDE_FLAG_SEP}${JAVA_INCLUDE_DIR}") + string(APPEND CMAKE_JAVA_INCLUDE_PATH_FINAL "${CMAKE_JAVA_INCLUDE_FLAG_SEP}${JAVA_INCLUDE_DIR}") endforeach() set(CMAKE_JAVA_CLASS_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/${_TARGET_NAME}.dir") @@ -641,7 +641,7 @@ function(add_jar _TARGET_NAME) if (TARGET ${_JAVA_INCLUDE_JAR}) get_target_property(_JAVA_JAR_PATH ${_JAVA_INCLUDE_JAR} JAR_FILE) if (_JAVA_JAR_PATH) - set(CMAKE_JAVA_INCLUDE_PATH_FINAL "${CMAKE_JAVA_INCLUDE_PATH_FINAL}${CMAKE_JAVA_INCLUDE_FLAG_SEP}${_JAVA_JAR_PATH}") + string(APPEND CMAKE_JAVA_INCLUDE_PATH_FINAL "${CMAKE_JAVA_INCLUDE_FLAG_SEP}${_JAVA_JAR_PATH}") list(APPEND CMAKE_JAVA_INCLUDE_PATH ${_JAVA_JAR_PATH}) list(APPEND _JAVA_DEPENDS ${_JAVA_INCLUDE_JAR}) list(APPEND _JAVA_COMPILE_DEPENDS ${_JAVA_INCLUDE_JAR}) @@ -649,7 +649,7 @@ function(add_jar _TARGET_NAME) message(SEND_ERROR "add_jar: INCLUDE_JARS target ${_JAVA_INCLUDE_JAR} is not a jar") endif () else () - set(CMAKE_JAVA_INCLUDE_PATH_FINAL "${CMAKE_JAVA_INCLUDE_PATH_FINAL}${CMAKE_JAVA_INCLUDE_FLAG_SEP}${_JAVA_INCLUDE_JAR}") + string(APPEND CMAKE_JAVA_INCLUDE_PATH_FINAL "${CMAKE_JAVA_INCLUDE_FLAG_SEP}${_JAVA_INCLUDE_JAR}") list(APPEND CMAKE_JAVA_INCLUDE_PATH "${_JAVA_INCLUDE_JAR}") list(APPEND _JAVA_DEPENDS "${_JAVA_INCLUDE_JAR}") list(APPEND _JAVA_COMPILE_DEPENDS "${_JAVA_INCLUDE_JAR}") @@ -794,11 +794,9 @@ function(add_jar _TARGET_NAME) if (_GENERATE_NATIVE_HEADERS) # create an INTERFACE library encapsulating include directory for generated headers add_library (${_GENERATE_NATIVE_HEADERS_TARGET} INTERFACE) - target_include_directories (${_GENERATE_NATIVE_HEADERS_TARGET} - INTERFACE - "${_GENERATE_NATIVE_HEADERS_OUTPUT_DIR}" - ${JNI_INCLUDE_DIRS} - ) + target_include_directories (${_GENERATE_NATIVE_HEADERS_TARGET} INTERFACE + "${_GENERATE_NATIVE_HEADERS_OUTPUT_DIR}" + ${JNI_INCLUDE_DIRS}) # this INTERFACE library depends on jar generation add_dependencies (${_GENERATE_NATIVE_HEADERS_TARGET} ${_TARGET_NAME}) @@ -1375,10 +1373,10 @@ function(create_javadoc _target) endfunction() function (create_javah) - if ("${Java_VERSION}" VERSION_GREATER_EQUAL 1.10) + if (Java_VERSION VERSION_GREATER_EQUAL 10) message (FATAL_ERROR "create_javah: not supported with this Java version. Use add_jar(GENERATE_NATIVE_HEADERS) instead.") - elseif ("${Java_VERSION}" VERSION_GREATER_EQUAL 1.8) - message (DEPRECATION "create_javah: this command will no longer be supported starting with version 1.10 of JDK. Update your project by using command add_jar(GENERATE_NATIVE_HEADERS) instead.") + elseif (Java_VERSION VERSION_GREATER_EQUAL 1.8) + message (DEPRECATION "create_javah: this command will no longer be supported starting with version 10 of JDK. Update your project by using command add_jar(GENERATE_NATIVE_HEADERS) instead.") endif() cmake_parse_arguments(_create_javah diff --git a/test/CMakeTests.cmake b/test/CMakeTests.cmake index 39d9d0d..34e21f7 100644 --- a/test/CMakeTests.cmake +++ b/test/CMakeTests.cmake @@ -228,29 +228,30 @@ add_test (NAME H5TEST-clear-testhdf5-objects WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) +set_tests_properties (H5TEST-clear-testhdf5-objects PROPERTIES FIXTURES_SETUP clear_testhdf5) if (HDF5_ENABLE_USING_MEMCHECKER) add_test (NAME H5TEST-testhdf5-base COMMAND $ -x heap -x file -x select) set_tests_properties (H5TEST-testhdf5-base PROPERTIES - DEPENDS H5TEST-clear-testhdf5-objects + FIXTURES_REQUIRED clear_testhdf5 ENVIRONMENT "HDF5_ALARM_SECONDS=3600;srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) add_test (NAME H5TEST-testhdf5-heap COMMAND $ -o heap) set_tests_properties (H5TEST-testhdf5-heap PROPERTIES - DEPENDS H5TEST-clear-testhdf5-objects + FIXTURES_REQUIRED clear_testhdf5 ENVIRONMENT "HDF5_ALARM_SECONDS=3600;srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) add_test (NAME H5TEST-testhdf5-file COMMAND $ -o file) set_tests_properties (H5TEST-testhdf5-file PROPERTIES - DEPENDS H5TEST-clear-testhdf5-objects + FIXTURES_REQUIRED clear_testhdf5 ENVIRONMENT "HDF5_ALARM_SECONDS=3600;srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) add_test (NAME H5TEST-testhdf5-select COMMAND $ -o select) set_tests_properties (H5TEST-testhdf5-select PROPERTIES - DEPENDS H5TEST-clear-testhdf5-objects + FIXTURES_REQUIRED clear_testhdf5 ENVIRONMENT "HDF5_ALARM_SECONDS=3600;srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) @@ -266,7 +267,7 @@ else () -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake" ) set_tests_properties (H5TEST-testhdf5 PROPERTIES - DEPENDS H5TEST-clear-testhdf5-objects + FIXTURES_REQUIRED clear_testhdf5 ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) @@ -295,6 +296,7 @@ else () WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST-shared ) + set_tests_properties (H5TEST-shared-clear-testhdf5-objects PROPERTIES FIXTURES_SETUP shared_clear_testhdf5) add_test (NAME H5TEST-shared-testhdf5 COMMAND "${CMAKE_COMMAND}" -D "TEST_PROGRAM=$" -D "TEST_ARGS:STRING=" @@ -306,7 +308,7 @@ else () -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake" ) set_tests_properties (H5TEST-shared-testhdf5 PROPERTIES - DEPENDS H5TEST-shared-clear-testhdf5-objects + FIXTURES_REQUIRED shared_clear_testhdf5 ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST-shared" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST-shared ) @@ -543,6 +545,7 @@ add_test (NAME H5TEST-clear-objects WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) +set_tests_properties (H5TEST-clear-objects PROPERTIES FIXTURES_SETUP clear_objects) set (H5TEST_SEPARATE_TESTS testhdf5 @@ -554,7 +557,7 @@ foreach (test ${H5_TESTS}) if (HDF5_ENABLE_USING_MEMCHECKER) add_test (NAME H5TEST-${test} COMMAND $) set_tests_properties (H5TEST-${test} PROPERTIES - DEPENDS H5TEST-clear-objects + FIXTURES_REQUIRED clear_objects ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) @@ -576,7 +579,7 @@ foreach (test ${H5_TESTS}) ) endif () set_tests_properties (H5TEST-${test} PROPERTIES - DEPENDS H5TEST-clear-objects + FIXTURES_REQUIRED clear_objects ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) @@ -599,6 +602,7 @@ if (BUILD_SHARED_LIBS) WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST-shared ) + set_tests_properties (H5TEST-shared-clear-objects PROPERTIES FIXTURES_SETUP shared_clear_objects) foreach (test ${H5_TESTS}) if (NOT ${test} IN_LIST H5TEST_SEPARATE_TESTS) @@ -619,7 +623,7 @@ if (BUILD_SHARED_LIBS) ) endif () set_tests_properties (H5TEST-shared-${test} PROPERTIES - DEPENDS H5TEST-shared-clear-objects + FIXTURES_REQUIRED shared_clear_objects ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST-shared" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST-shared ) @@ -642,6 +646,7 @@ if (NOT CYGWIN) WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) + set_tests_properties (H5TEST-clear-cache-objects PROPERTIES FIXTURES_SETUP cache_clear_objects) if (HDF5_ENABLE_USING_MEMCHECKER) add_test (NAME H5TEST-cache COMMAND $) else () @@ -657,7 +662,7 @@ if (NOT CYGWIN) ) endif () set_tests_properties (H5TEST-cache PROPERTIES - DEPENDS H5TEST-clear-cache-objects + FIXTURES_REQUIRED cache_clear_objects ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST;HDF5TestExpress=${HDF_TEST_EXPRESS}" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) @@ -673,9 +678,10 @@ add_test ( WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) +set_tests_properties (H5TEST-clear-cache_image-objects PROPERTIES FIXTURES_SETUP cache_image_clear_objects) add_test (NAME H5TEST-cache_image COMMAND $) set_tests_properties (H5TEST-cache_image PROPERTIES - DEPENDS H5TEST-clear-cache_image-objects + FIXTURES_REQUIRED cache_image_clear_objects ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST;HDF5TestExpress=${HDF_TEST_EXPRESS}" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) @@ -690,6 +696,7 @@ if (BUILD_SHARED_LIBS) WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST-shared ) + set_tests_properties (H5TEST-clear-objects PROPERTIES FIXTURES_SETUP shared_cache_clear_objects) if (HDF5_ENABLE_USING_MEMCHECKER) add_test (NAME H5TEST-shared-cache COMMAND $) else () @@ -705,7 +712,7 @@ if (BUILD_SHARED_LIBS) ) endif () set_tests_properties (H5TEST-shared-cache PROPERTIES - DEPENDS H5TEST-shared-clear-cache-objects + FIXTURES_REQUIRED shared_cache_clear_objects ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST-shared;HDF5TestExpress=${HDF_TEST_EXPRESS}" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST-shared ) @@ -756,9 +763,10 @@ add_test ( WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) +set_tests_properties (H5TEST-clear-filenotclosed-objects PROPERTIES FIXTURES_SETUP filenotclosed_clear_objects) add_test (NAME H5TEST-filenotclosed COMMAND $) set_tests_properties (H5TEST-filenotclosed PROPERTIES - DEPENDS H5TEST-clear-filenotclosed-objects + FIXTURES_REQUIRED filenotclosed_clear_objects ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) @@ -773,6 +781,7 @@ if (HDF5_ENABLE_DEPRECATED_SYMBOLS) WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) + set_tests_properties (H5TEST-clear-err_compat-objects PROPERTIES FIXTURES_SETUP err_compat_clear_objects) add_test (NAME H5TEST-err_compat COMMAND "${CMAKE_COMMAND}" -D "TEST_PROGRAM=$" -D "TEST_ARGS:STRING=" @@ -784,7 +793,7 @@ if (HDF5_ENABLE_DEPRECATED_SYMBOLS) -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake" ) set_tests_properties (H5TEST-err_compat PROPERTIES - DEPENDS H5TEST-clear-err_compat-objects + FIXTURES_REQUIRED err_compat_clear_objects ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) @@ -799,6 +808,7 @@ add_test (NAME H5TEST-clear-error_test-objects WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) +set_tests_properties (H5TEST-clear-error_test-objects PROPERTIES FIXTURES_SETUP error_test_clear_objects) if (HDF5_USE_16_API_DEFAULT) add_test ( NAME H5TEST-error_test-SKIPPED @@ -816,7 +826,7 @@ else () -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake" ) set_tests_properties (H5TEST-error_test PROPERTIES - DEPENDS H5TEST-clear-error_test-objects + FIXTURES_REQUIRED error_test_clear_objects ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST;HDF5_PLUGIN_PRELOAD=::" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) @@ -834,6 +844,7 @@ add_test (NAME H5TEST-clear-links_env-objects WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) +set_tests_properties (H5TEST-clear-links_env-objects PROPERTIES FIXTURES_SETUP links_env_clear_objects) add_test (NAME H5TEST-links_env COMMAND "${CMAKE_COMMAND}" -D "TEST_PROGRAM=$" -D "TEST_ARGS:STRING=" @@ -846,7 +857,7 @@ add_test (NAME H5TEST-links_env COMMAND "${CMAKE_COMMAND}" -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake" ) set_tests_properties (H5TEST-links_env PROPERTIES - DEPENDS H5TEST-clear-links_env-objects + FIXTURES_REQUIRED links_env_clear_objects ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST ) @@ -869,6 +880,7 @@ if (BUILD_SHARED_LIBS) WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST-shared ) + set_tests_properties (H5TEST-shared-clear-err_compat-objects PROPERTIES FIXTURES_SETUP shared_err_compat_clear_objects) add_test (NAME H5TEST-shared-err_compat COMMAND "${CMAKE_COMMAND}" -D "TEST_PROGRAM=$" -D "TEST_ARGS:STRING=" @@ -880,7 +892,7 @@ if (BUILD_SHARED_LIBS) -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake" ) set_tests_properties (H5TEST-shared-err_compat PROPERTIES - DEPENDS H5TEST-shared-clear-err_compat-objects + FIXTURES_REQUIRED shared_err_compat_clear_objects ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST-shared" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST-shared ) @@ -895,6 +907,7 @@ if (BUILD_SHARED_LIBS) WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST-shared ) + set_tests_properties (H5TEST-shared-clear-error_test-objects PROPERTIES FIXTURES_SETUP shared_error_test_clear_objects) if (HDF5_USE_16_API_DEFAULT) add_test ( NAME H5TEST-shared-error_test-SKIPPED @@ -912,7 +925,7 @@ if (BUILD_SHARED_LIBS) -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake" ) set_tests_properties (H5TEST-shared-error_test PROPERTIES - DEPENDS H5TEST-shared-clear-error_test-objects + FIXTURES_REQUIRED shared_error_test_clear_objects ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST-shared;HDF5_PLUGIN_PRELOAD=::" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST-shared ) @@ -930,6 +943,7 @@ if (BUILD_SHARED_LIBS) WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST-shared ) + set_tests_properties (H5TEST-shared-clear-links_env-objects PROPERTIES FIXTURES_SETUP shared_links_env_clear_objects) add_test (NAME H5TEST-shared-links_env COMMAND "${CMAKE_COMMAND}" -D "TEST_PROGRAM=$" -D "TEST_ARGS:STRING=" @@ -942,7 +956,7 @@ if (BUILD_SHARED_LIBS) -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake" ) set_tests_properties (H5TEST-shared-links_env PROPERTIES - DEPENDS H5TEST-shared-clear-links_env-objects + FIXTURES_REQUIRED shared_links_env_clear_objects ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST-shared" WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST-shared ) @@ -982,13 +996,60 @@ set_tests_properties (H5PLUGIN-filter_plugin PROPERTIES #) ############################################################################## -############################################################################## -### S W M R T E S T S -############################################################################## -# testflushrefresh.sh: flushrefresh -# test_usecases.sh: use_append_chunk, use_append_mchunks, use_disable_mdc_flushes -# testswmr.sh: swmr* -# testvdsswmr.sh: vds_swmr* +if (TEST_SHELL_SCRIPTS) + include (ShellTests.cmake) +endif() + +if (ENABLE_EXTENDED_TESTS) + ############################################################################## + ### S W M R T E S T S + ############################################################################## + # testflushrefresh.sh: flushrefresh + # test_usecases.sh: use_append_chunk, use_append_mchunks, use_disable_mdc_flushes + # testswmr.sh: swmr* + # testvdsswmr.sh: vds_swmr* + +# add_test (NAME H5Test-swmr_check_compat_vfd COMMAND $) + +#-- Adding test for flushrefresh + file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/H5TEST/flushrefresh_test") + if (BUILD_SHARED_LIBS) + file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/H5TEST-shared/flushrefresh_test") + endif () + find_package (Perl) + if (PERL_FOUND) + add_test (NAME H5TEST-clear-testflushrefresh-objects + COMMAND ${CMAKE_COMMAND} + -E remove + flushrefresh.txt + flushrefresh.txt.err + flushrefresh.h5 + WORKING_DIRECTORY + ${HDF5_TEST_BINARY_DIR}/H5TEST/flushrefresh_test + ) + set_tests_properties (H5TEST-clear-testflushrefresh-objects PROPERTIES FIXTURES_SETUP testflushrefresh_clear_objects) + add_test (NAME H5TEST-testflushrefresh COMMAND "${CMAKE_COMMAND}" + -D "TEST_PROGRAM=$" + -D "TEST_ARGS1:STRING=flushrefresh_VERIFICATION_START" + -D "TEST_ARGS2:STRING=flushrefresh_VERIFICATION_DONE" + -D "TEST_ERR:STRING=flushrefresh_ERROR" + -D "TEST_EXPECT=0" + -D "TEST_OUTPUT=flushrefresh.out" + -D "TEST_REFERENCE=flushrefresh.txt" + -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/H5TEST/flushrefresh_test" + -D "PERL_SCRIPT=${HDF5_SOURCE_DIR}/bin/runbkgprog" + -D "PERL_EXECUTABLE=${PERL_EXECUTABLE}" + -P "${HDF5_TEST_SOURCE_DIR}/flushrefreshTest.cmake" + ) + set_tests_properties (H5TEST-testflushrefresh PROPERTIES + FIXTURES_REQUIRED testflushrefresh_clear_objects + ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST/flushrefresh_test" + WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST/flushrefresh_test + ) + else () + message (STATUS "Cannot execute TEST flushrefresh - perl not found") + endif () +endif() ############################################################################## ############################################################################## diff --git a/test/ShellTests.cmake b/test/ShellTests.cmake new file mode 100644 index 0000000..fe997b2 --- /dev/null +++ b/test/ShellTests.cmake @@ -0,0 +1,244 @@ + +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. +# + +############################################################################## +############################################################################## +### T E S T I N G S H E L L S C R I P T S ### +############################################################################## + +if (UNIX) + + find_program (SH_PROGRAM sh) + if (SH_PROGRAM) + + ############################################################################## + # configure scripts to test dir + ############################################################################## + find_package (Perl) + if (PERL_FOUND) + configure_file(${HDF5_TEST_SOURCE_DIR}/testflushrefresh.sh.in ${HDF5_TEST_BINARY_DIR}/H5TEST/testflushrefresh.sh @ONLY) + endif () + configure_file(${HDF5_TEST_SOURCE_DIR}/test_usecases.sh.in ${HDF5_TEST_BINARY_DIR}/H5TEST/test_usecases.sh @ONLY) + configure_file(${HDF5_TEST_SOURCE_DIR}/testcheck_version.sh.in ${HDF5_TEST_BINARY_DIR}/H5TEST/testcheck_version.sh @ONLY) + configure_file(${HDF5_TEST_SOURCE_DIR}/testswmr.sh.in ${HDF5_TEST_BINARY_DIR}/H5TEST/testswmr.sh @ONLY) + configure_file(${HDF5_TEST_SOURCE_DIR}/testvdsswmr.sh.in ${HDF5_TEST_BINARY_DIR}/H5TEST/testvdsswmr.sh @ONLY) + + ############################################################################## + # copy test programs to test dir + ############################################################################## + add_custom_command ( + TARGET swmr_check_compat_vfd + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/swmr_check_compat_vfd" + ) + + add_custom_command ( + TARGET swmr_check_compat_vfd + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "${HDF5_SOURCE_DIR}/bin/output_filter.sh" "${HDF5_TEST_BINARY_DIR}/H5TEST/bin/output_filter.sh" + ) + + add_custom_command ( + TARGET tcheck_version + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "${HDF5_BINARY_DIR}/H5pubconf.h" "${HDF5_TEST_BINARY_DIR}/src/H5pubconf.h" + ) + + add_custom_command ( + TARGET tcheck_version + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "${HDF5_BINARY_DIR}/libhdf5.settings" "${HDF5_TEST_BINARY_DIR}/src/libhdf5.settings" + ) + + add_custom_command ( + TARGET tcheck_version + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "${HDF5_SOURCE_DIR}/src/H5public.h" "${HDF5_TEST_BINARY_DIR}/src/H5public.h" + ) + + file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/H5TEST/flushrefresh_test") + add_custom_command ( + TARGET flushrefresh + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/flushrefresh" + ) + + #shell script creates dir + #file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/H5TEST/usecases_test") + add_custom_command ( + TARGET use_append_mchunks + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/use_append_mchunks" + ) + add_custom_command ( + TARGET use_disable_mdc_flushes + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/use_disable_mdc_flushes" + ) + add_custom_command ( + TARGET twriteorder + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/twriteorder" + ) + add_custom_command ( + TARGET use_append_chunk + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/use_append_chunk" + ) + + file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/H5TEST/swmr_test") + add_custom_command ( + TARGET swmr_generator + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/swmr_test/swmr_generator" + ) + add_custom_command ( + TARGET swmr_start_write + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/swmr_test/swmr_start_write" + ) + add_custom_command ( + TARGET swmr_reader + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/swmr_test/swmr_reader" + ) + add_custom_command ( + TARGET swmr_writer + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/swmr_test/swmr_writer" + ) + add_custom_command ( + TARGET swmr_remove_reader + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/swmr_test/swmr_remove_reader" + ) + add_custom_command ( + TARGET swmr_remove_writer + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/swmr_test/swmr_remove_writer" + ) + add_custom_command ( + TARGET swmr_addrem_writer + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/swmr_test/swmr_addrem_writer" + ) + add_custom_command ( + TARGET swmr_sparse_reader + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/swmr_test/swmr_sparse_reader" + ) + add_custom_command ( + TARGET swmr_sparse_writer + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/swmr_test/swmr_sparse_writer" + ) + + file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/H5TEST/vds_swmr_test") + add_custom_command ( + TARGET vds_swmr_gen + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/vds_swmr_gen" + ) + add_custom_command ( + TARGET vds_swmr_writer + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/vds_swmr_writer" + ) + add_custom_command ( + TARGET vds_swmr_reader + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different "$" "${HDF5_TEST_BINARY_DIR}/H5TEST/vds_swmr_reader" + ) + + + + ############################################################################## + ############################################################################## + ### A D D I T I O N A L T E S T S ### + ############################################################################## + ############################################################################## + # H5_CHECK_TESTS + #--------------- + # tcheck_version + # atomic_writer + # atomic_reader + # filenotclosed + # flushrefresh + ############################################################################## + # autotools script tests + # error_test and err_compat are built at the same time as the other tests, but executed by testerror.sh. + # NOT CONVERTED tcheck_version is used by testcheck_version.sh. + # NOT CONVERTED accum_swmr_reader is used by accum.c. + # NOT CONVERTED atomic_writer and atomic_reader are standalone programs. + # links_env is used by testlinks_env.sh + # filenotclosed is used by test_filenotclosed.sh + # NOT CONVERTED flushrefresh is used by testflushrefresh.sh. + # NOT CONVERTED use_append_chunk, use_append_mchunks and use_disable_mdc_flushes are used by test_usecases.sh + # NOT CONVERTED swmr_* files (besides swmr.c) are used by testswmr.sh. + # NOT CONVERTED vds_swmr_* files are used by testvdsswmr.sh + # NOT CONVERTED 'make check' doesn't run them directly, so they are not included in TEST_PROG. + # NOT CONVERTED Also build testmeta, which is used for timings test. It builds quickly, + # NOT CONVERTED and this lets automake keep all its test programs in one place. + ############################################################################## + + ############################################################################## + ### S W M R T E S T S + ############################################################################## + # testflushrefresh.sh: flushrefresh + # test_usecases.sh: use_append_chunk, use_append_mchunks, use_disable_mdc_flushes + # testswmr.sh: swmr* + # testvdsswmr.sh: vds_swmr* + add_test (H5SHELL-testflushrefresh ${SH_PROGRAM} ${HDF5_TEST_BINARY_DIR}/H5TEST/testflushrefresh.sh) + set_tests_properties (H5SHELL-testflushrefresh PROPERTIES + WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST + ) + add_test (H5SHELL-test_usecases ${SH_PROGRAM} ${HDF5_TEST_BINARY_DIR}/H5TEST/test_usecases.sh) + set_tests_properties (H5SHELL-test_usecases PROPERTIES + WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST + ) + add_test (H5SHELL-testswmr ${SH_PROGRAM} ${HDF5_TEST_BINARY_DIR}/H5TEST/testswmr.sh) + set_tests_properties (H5SHELL-testswmr PROPERTIES + WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST + ) + add_test (H5SHELL-testvdsswmr ${SH_PROGRAM} ${HDF5_TEST_BINARY_DIR}/H5TEST/testvdsswmr.sh) + set_tests_properties (H5SHELL-testvdsswmr PROPERTIES + WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST + ) + add_test (H5SHELL-testcheck_version ${SH_PROGRAM} ${HDF5_TEST_BINARY_DIR}/H5TEST/testcheck_version.sh) + set_tests_properties (H5SHELL-testcheck_version PROPERTIES + WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST + ) + + endif () +endif () diff --git a/test/flushrefreshTest.cmake b/test/flushrefreshTest.cmake new file mode 100644 index 0000000..f66ee64 --- /dev/null +++ b/test/flushrefreshTest.cmake @@ -0,0 +1,172 @@ +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. +# +# runTest.cmake executes a command and captures the output in a file. File is then compared +# against a reference file. Exit status of command can also be compared. +cmake_policy(SET CMP0007 NEW) + +# arguments checking +if (NOT TEST_PROGRAM) + message (FATAL_ERROR "Require TEST_PROGRAM to be defined") +endif () +if (NOT TEST_FOLDER) + message ( FATAL_ERROR "Require TEST_FOLDER to be defined") +endif () +if (NOT TEST_OUTPUT) + message (FATAL_ERROR "Require TEST_OUTPUT to be defined") +endif () +if (NOT PERL_SCRIPT) + message (STATUS "Require PERL_BIN_DIR to be defined") +endif () +if (NOT PERL_EXECUTABLE) + message (STATUS "Require PERL_EXECUTABLE to be defined") +endif () + +if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}) + file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT}) +endif () + +if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err) + file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT}.err) +endif () + +message (STATUS "COMMAND: ${TEST_PROGRAM} ${TEST_ARGS}") + +if (TEST_LIBRARY_DIRECTORY) + if (WIN32 AND NOT MINGW) + set (ENV{PATH} "$ENV{PATH};${TEST_LIBRARY_DIRECTORY}") + else () + set (ENV{LD_LIBRARY_PATH} "$ENV{LD_LIBRARY_PATH}:${TEST_LIBRARY_DIRECTORY}") + endif () +endif () + +if (TEST_ENV_VAR) + set (ENV{${TEST_ENV_VAR}} "${TEST_ENV_VALUE}") + #message (STATUS "ENV:${TEST_ENV_VAR}=$ENV{${TEST_ENV_VAR}}") +endif () + +message(STATUS "Background: ${PERL_EXECUTABLE} ${PERL_SCRIPT} ${TEST_PROGRAM}") +execute_process ( + COMMAND ${PERL_EXECUTABLE} ${PERL_SCRIPT} ${TEST_PROGRAM} + RESULT_VARIABLE SCRIPT_RESULT + ERROR_VARIABLE SCRIPT_ERR + OUTPUT_VARIABLE SCRIPT_OUTPUT + WORKING_DIRECTORY ${TEST_FOLDER} +) +message(STATUS "Background: ${SCRIPT_OUTPUT}") +if (NOT "${SCRIPT_RESULT}" STREQUAL "0") + message (FATAL_ERROR "Failed: The background script failed ${SCRIPT_RESULT}: ${SCRIPT_ERR}") +endif () + +set(verification_done "0") +while(verification_done LESS "1") + message(STATUS "checking first stage:${TEST_FOLDER}/${TEST_ARGS1}") + if(EXISTS ${TEST_FOLDER}/${TEST_ERR}) + # Error exit script + set(verification_done "3") + elseif(EXISTS ${TEST_FOLDER}/${TEST_ARGS1}) + file (STRINGS ${TEST_FOLDER}/${TEST_ARGS1} v1) + list (LENGTH v1 len_v1) + message(STATUS "v1:${v1} len_v1:${len_v1}") + if (NOT "${len_v1}" STREQUAL "0") + list (GET v1 0 param1) + list (GET v1 -1 param2) + endif () + file(REMOVE ${TEST_FOLDER}/${TEST_ARGS1}) + message(STATUS "PARAM1:${param1} PARAM2:${param2}") + + if(${param1} MATCHES "VERIFICATION_DONE") + set(verification_done "1") + file(WRITE ${TEST_FOLDER}/${TEST_ARGS2} "all flush verification complete") + message(STATUS "write: ${TEST_FOLDER}/${TEST_ARGS2}") + else() + message(STATUS "execute: ${TEST_PROGRAM} ${param1} ${param2}") + execute_process ( + COMMAND ${TEST_PROGRAM} ${param1} ${param2} + RESULT_VARIABLE TEST_RESULT + OUTPUT_FILE ${TEST_OUTPUT} + ERROR_FILE ${TEST_OUTPUT}.err + OUTPUT_VARIABLE TEST_OUT + ERROR_VARIABLE TEST_ERROR + WORKING_DIRECTORY ${TEST_FOLDER} + ) + message(STATUS "flush verification: ${TEST_OUT}") + if (NOT "${TEST_RESULT}" STREQUAL "0") + message (FATAL_ERROR "Failed: The flush verification failed ${TEST_RESULT}: ${TEST_ERROR}") + endif () + file(WRITE ${TEST_FOLDER}/${TEST_ARGS2} "verification flush process done") + endif() + else() + message(STATUS "waiting: ${TEST_FOLDER}/${TEST_ARGS1}") + #execute_process (COMMAND ${CMAKE_COMMAND} -E sleep 2) + endif() +endwhile() + +while(verification_done LESS "2") + message(STATUS "checking second stage:${TEST_FOLDER}/${TEST_ARGS1}") + if(EXISTS ${TEST_FOLDER}/${TEST_ERR}) + # Error exit script + set(verification_done "3") + elseif(EXISTS ${TEST_FOLDER}/${TEST_ARGS1}) + file (STRINGS ${TEST_FOLDER}/${TEST_ARGS1} v1) + list (LENGTH v1 len_v1) + message(STATUS "v1:${v1} len_v1:${len_v1}") + if (NOT "${len_v1}" STREQUAL "0") + list (GET v1 0 param1) + list (GET v1 -1 param2) + endif () + file(REMOVE ${TEST_FOLDER}/${TEST_ARGS1}) + message(STATUS "PARAM1:${param1} PARAM2:${param2}") + + if(${param1} MATCHES "VERIFICATION_DONE") + set(verification_done "2") + file(WRITE ${TEST_FOLDER}/${TEST_ARGS2} "all refresh verification complete") + message(STATUS "write: ${TEST_FOLDER}/${TEST_ARGS2}") + else() + message(STATUS "execute: ${TEST_PROGRAM} ${param1}") + execute_process ( + COMMAND ${TEST_PROGRAM} ${param1} + RESULT_VARIABLE TEST_RESULT + OUTPUT_FILE ${TEST_OUTPUT} + ERROR_FILE ${TEST_OUTPUT}.err + OUTPUT_VARIABLE TEST_OUT + ERROR_VARIABLE TEST_ERROR + WORKING_DIRECTORY ${TEST_FOLDER} + ) + message(STATUS "refresh verification: ${TEST_OUT}") + if (NOT "${TEST_RESULT}" STREQUAL "0") + message (FATAL_ERROR "Failed: The refresh verification failed ${TEST_RESULT}: ${TEST_ERROR}") + endif () + file(WRITE ${TEST_FOLDER}/${TEST_ARGS2} "refresh verifiction process done") + endif() + else() + message(STATUS "waiting: ${TEST_FOLDER}/${TEST_ARGS1}") + #execute_process (COMMAND ${CMAKE_COMMAND} -E sleep 2) + endif() +endwhile() + +message (STATUS "COMMAND Result: ${TEST_RESULT}") + +# if the return value is !=${TEST_EXPECT} bail out +if (NOT "${TEST_RESULT}" STREQUAL "${TEST_EXPECT}") + if (NOT TEST_NOERRDISPLAY) + if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}) + file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM) + message (STATUS "Output :\n${TEST_STREAM}") + endif () + endif () + message (FATAL_ERROR "Failed: Test program ${TEST_PROGRAM} exited != ${TEST_EXPECT}.\n${TEST_ERROR}") +endif () + +message (STATUS "COMMAND Error: ${TEST_ERROR}") + +# everything went fine... +message ("Passed") -- cgit v0.12 From 7117a66fd02d4350cf545262ff95a173d95aa10d Mon Sep 17 00:00:00 2001 From: Allen Byrne Date: Mon, 16 Jul 2018 13:07:50 -0500 Subject: TRILAB-31 add note --- release_docs/RELEASE.txt | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index a322164..34d90ff 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -46,7 +46,13 @@ New Features Configuration: ------------- - - + - Add option to execute swmr shell scripts from CMake. + + Option TEST_SHELL_SCRIPTS redirects processing into a + separate ShellTests.cmake file for UNIX types. The tests + execute the shell scripts if a SH program is found. + + (ADB - 2018/07/16) Library: -------- -- cgit v0.12 From d5e7134df911808bba26ac6629e1fe8324916585 Mon Sep 17 00:00:00 2001 From: Jerome Soumagne Date: Mon, 16 Jul 2018 13:33:23 -0500 Subject: Fix evaluation of __has_attribute(no_sanitize_address) on older GCC versions --- src/H5detect.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/H5detect.c b/src/H5detect.c index 60ef656..2d33a3d 100644 --- a/src/H5detect.c +++ b/src/H5detect.c @@ -54,10 +54,14 @@ static const char *FileHeader = "\n\ #include "H5Tpublic.h" #include "H5Rpublic.h" -#if defined(__has_attribute) && __has_attribute(no_sanitize_address) -#define HDF_NO_UBSAN __attribute__((no_sanitize_address)) +#if defined(__has_attribute) +# if __has_attribute(no_sanitize_address) +# define HDF_NO_UBSAN __attribute__((no_sanitize_address)) +# else +# define HDF_NO_UBSAN +# endif #else -#define HDF_NO_UBSAN +# define HDF_NO_UBSAN #endif #define MAXDETECT 64 -- cgit v0.12 From 83ca39ba9a2d6852dd7754b533f7839e9d2a0107 Mon Sep 17 00:00:00 2001 From: Binh-Minh Ribler Date: Mon, 16 Jul 2018 14:48:11 -0500 Subject: Combined macro lines as Dana commented Platforms tested: Linux/64 (jelly) (very minor) --- src/H5Dbtree.c | 3 +-- src/H5Dchunk.c | 3 +-- src/H5Dint.c | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/src/H5Dbtree.c b/src/H5Dbtree.c index 04f5419..8abfe27 100644 --- a/src/H5Dbtree.c +++ b/src/H5Dbtree.c @@ -688,8 +688,7 @@ H5D__btree_decode_key(const H5B_shared_t *shared, const uint8_t *raw, void *_key for(u = 0; u < layout->ndims; u++) { if (layout->dim[u] == 0) - HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, - "chunk size must be > 0, dim = %u ", u) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk size must be > 0, dim = %u ", u) /* Retrieve coordinate offset */ UINT64DECODE(raw, tmp_offset); diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 7b8377d..f03ee8b 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -948,8 +948,7 @@ H5D__chunk_init(H5F_t *f, const H5D_t *dset, hid_t dapl_id) /* Initial scaled dimension sizes */ if(dset->shared->layout.u.chunk.dim[u] == 0) - HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, - "chunk size must be > 0, dim = %u ", u) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk size must be > 0, dim = %u ", u) rdcc->scaled_dims[u] = dset->shared->curr_dims[u] / dset->shared->layout.u.chunk.dim[u]; if( !(scaled_power2up = H5VM_power2up(rdcc->scaled_dims[u])) ) diff --git a/src/H5Dint.c b/src/H5Dint.c index 0c7fee0..e8874a2 100644 --- a/src/H5Dint.c +++ b/src/H5Dint.c @@ -2634,8 +2634,7 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size) /* Compute the scaled dimension size value */ if(dset->shared->layout.u.chunk.dim[dim_idx] == 0) - HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, - "chunk size must be > 0, dim = %u ", dim_idx) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk size must be > 0, dim = %u ", dim_idx) scaled = size[dim_idx] / dset->shared->layout.u.chunk.dim[dim_idx]; -- cgit v0.12 From 073e0b1f0312ac93927e511e2c48d89728e91987 Mon Sep 17 00:00:00 2001 From: Binh-Minh Ribler Date: Mon, 16 Jul 2018 20:28:27 -0500 Subject: Fixed HDFFV-10476, HDFFV-10478, HDFFV-10480 Description: Fixed potential out of bound read and NULL pointer dereferences. Platforms tested: Linux/64 (jelly) Linux/32 (jam) Darwin (osx1010test) --- src/H5Ocache.c | 2 ++ src/H5Ofill.c | 8 ++++++-- src/H5Shyper.c | 5 +++-- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/H5Ocache.c b/src/H5Ocache.c index d65942b..59e1705 100644 --- a/src/H5Ocache.c +++ b/src/H5Ocache.c @@ -1553,6 +1553,8 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image /* Set object header values */ oh->has_refcount_msg = TRUE; + if(!refcount) + HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't decode refcount") oh->nlink = *refcount; } /* end if */ /* Check if message is a link message */ diff --git a/src/H5Ofill.c b/src/H5Ofill.c index da9829b..8a6004d 100644 --- a/src/H5Ofill.c +++ b/src/H5Ofill.c @@ -194,7 +194,7 @@ H5FL_BLK_EXTERN(type_conv); static void * H5O_fill_new_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, - size_t H5_ATTR_UNUSED p_size, const uint8_t *p) + size_t p_size, const uint8_t *p) { H5O_fill_t *fill = NULL; void *ret_value = NULL; /* Return value */ @@ -228,6 +228,8 @@ H5O_fill_new_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, INT32DECODE(p, fill->size); if(fill->size > 0) { H5_CHECK_OVERFLOW(fill->size, ssize_t, size_t); + if((size_t)fill->size > p_size) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "destination buffer too small") if(NULL == (fill->buf = H5MM_malloc((size_t)fill->size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for fill value") HDmemcpy(fill->buf, p, (size_t)fill->size); @@ -309,7 +311,7 @@ done: static void * H5O_fill_old_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, - size_t H5_ATTR_UNUSED p_size, const uint8_t *p) + size_t p_size, const uint8_t *p) { H5O_fill_t *fill = NULL; /* Decoded fill value message */ htri_t exists = FALSE; @@ -335,6 +337,8 @@ H5O_fill_old_decode(H5F_t *f, H5O_t *open_oh, /* Only decode the fill value itself if there is one */ if(fill->size > 0) { H5_CHECK_OVERFLOW(fill->size, ssize_t, size_t); + if((size_t)fill->size > p_size) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "destination buffer too small") /* Get the datatype message */ if((exists = H5O_msg_exists_oh(open_oh, H5O_DTYPE_ID)) < 0) diff --git a/src/H5Shyper.c b/src/H5Shyper.c index 5653209..e1587b9 100644 --- a/src/H5Shyper.c +++ b/src/H5Shyper.c @@ -5903,7 +5903,7 @@ H5S__hyper_make_spans(unsigned rank, const hsize_t *start, const hsize_t *stride last_span = NULL; /* Generate all the span segments for this dimension */ - for(u = 0, stride_iter = 0; u < count[i]; u++, stride_iter += stride[i]) { + for(u = 0, stride_iter = 0; u < count[i]; u++, stride_iter += stride[i]) { H5S_hyper_span_t *span; /* New hyperslab span */ /* Allocate a span node */ @@ -5945,7 +5945,8 @@ H5S__hyper_make_spans(unsigned rank, const hsize_t *start, const hsize_t *stride } /* end for */ /* Indicate that there is a pointer to this tree */ - down->count = 1; + if(down) + down->count = 1; /* Success! Return the head of the list in the slowest changing dimension */ ret_value = down; -- cgit v0.12 From fe916ada370f33b48b3c39dbf9e3ff73df00fdb7 Mon Sep 17 00:00:00 2001 From: Binh-Minh Ribler Date: Mon, 16 Jul 2018 22:10:50 -0500 Subject: Removed white space --- src/H5Shyper.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/H5Shyper.c b/src/H5Shyper.c index e1587b9..7a6d1be 100644 --- a/src/H5Shyper.c +++ b/src/H5Shyper.c @@ -5903,7 +5903,8 @@ H5S__hyper_make_spans(unsigned rank, const hsize_t *start, const hsize_t *stride last_span = NULL; /* Generate all the span segments for this dimension */ - for(u = 0, stride_iter = 0; u < count[i]; u++, stride_iter += stride[i]) { + for(u = 0, stride_iter = 0; u < count[i]; u++, stride_iter += stride[i]) + { H5S_hyper_span_t *span; /* New hyperslab span */ /* Allocate a span node */ -- cgit v0.12 From 14bf28780aa850928c0ffeae838f6dd6140bd615 Mon Sep 17 00:00:00 2001 From: Binh-Minh Ribler Date: Tue, 17 Jul 2018 01:09:45 -0500 Subject: Fixed HDFFV-10458 partially Description: Added wrappers for H5Oget_info2 and H5Oget_info_by_name2. // Returns information about an HDF5 object. void getInfo(H5O_info_t& objinfo, unsigned fields = H5O_INFO_BASIC) // Returns information about an HDF5 object, given its name. void getInfo(const char* name, H5O_info_t& objinfo, unsigned fields = H5O_INFO_BASIC, const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) void getInfo(const H5std_string& name, H5O_info_t& objinfo, unsigned fields = H5O_INFO_BASIC, const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) Platforms tested: Linux/64 (jelly) Linux/32 (jam) Darwin (osx1010test) --- c++/src/H5Object.cpp | 54 +++++++++++++++++++++++++++++ c++/src/H5Object.h | 5 +++ c++/test/tattr.cpp | 19 +++++++++- c++/test/tfile.cpp | 14 +++++++- c++/test/tlinks.cpp | 7 ++-- c++/test/tobject.cpp | 98 +++++++++++++++++++++++++++++++++++++++++++++++++--- 6 files changed, 187 insertions(+), 10 deletions(-) diff --git a/c++/src/H5Object.cpp b/c++/src/H5Object.cpp index b95e222..940a7c6 100644 --- a/c++/src/H5Object.cpp +++ b/c++/src/H5Object.cpp @@ -227,6 +227,60 @@ int H5Object::iterateAttrs(attr_operator_t user_op, unsigned *_idx, void *op_dat } //-------------------------------------------------------------------------- +// Function: H5Object::getInfo +///\brief Returns information about an HDF5 object. +///\return Struct containing the object info +///\exception +// July, 2018 +//-------------------------------------------------------------------------- +void H5Object::getInfo(H5O_info_t& objinfo, unsigned fields) const +{ + + // Use C API to get information of the object + herr_t ret_value = H5Oget_info2(getId(), &objinfo, fields); + + // Throw exception if C API returns failure + if (ret_value < 0) + throwException(inMemFunc("getObjinfo"), "H5Oget_info2 failed"); +} + +//-------------------------------------------------------------------------- +// Function: H5Object::getInfo +///\brief Returns information about an HDF5 object given its name. +///\return Struct containing the object info +///\exception +// July, 2018 +//-------------------------------------------------------------------------- +void H5Object::getInfo(const char* name, H5O_info_t& objinfo, unsigned fields, const LinkAccPropList& lapl) const +{ + // Use C API to get information of the object + herr_t ret_value = H5Oget_info_by_name2(getId(), name, &objinfo, fields, lapl.getId()); + + // Throw exception if C API returns failure + if (ret_value < 0) + throwException(inMemFunc("getObjinfo"), "H5Oget_info_by_name2 failed"); +} + +//-------------------------------------------------------------------------- +// Function: H5Object::getInfo +///\brief This is an overloaded member function, provided for convenience. +/// It differs from the above function in that it takes +/// a reference to an \c H5std_string for \a name. +///\return Struct containing the object info +///\exception +// July, 2018 +//-------------------------------------------------------------------------- +void H5Object::getInfo(const H5std_string& name, H5O_info_t& objinfo, unsigned fields, const LinkAccPropList& lapl) const +{ + // Use C API to get information of the object + herr_t ret_value = H5Oget_info_by_name2(getId(), name.c_str(), &objinfo, fields, lapl.getId()); + + // Throw exception if C API returns failure + if (ret_value < 0) + throwException(inMemFunc("getObjinfo"), "H5Oget_info_by_name2 failed"); +} + +//-------------------------------------------------------------------------- // Function: H5Object::objVersion ///\brief Returns the header version of this HDF5 object. ///\return Object version, which can have the following values: diff --git a/c++/src/H5Object.h b/c++/src/H5Object.h index 10b3865..606b0ce 100644 --- a/c++/src/H5Object.h +++ b/c++/src/H5Object.h @@ -92,6 +92,11 @@ class H5_DLLCPP H5Object : public H5Location { // Returns an identifier. virtual hid_t getId() const = 0; + // Returns information about an HDF5 object. + void getInfo(H5O_info_t& objinfo, unsigned fields = H5O_INFO_BASIC) const; + void getInfo(const char* name, H5O_info_t& objinfo, unsigned fields = H5O_INFO_BASIC, const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) const; + void getInfo(const H5std_string& name, H5O_info_t& objinfo, unsigned fields = H5O_INFO_BASIC, const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) const; + // Gets the name of this HDF5 object, i.e., Group, DataSet, or // DataType. ssize_t getObjName(char *obj_name, size_t buf_size = 0) const; diff --git a/c++/test/tattr.cpp b/c++/test/tattr.cpp index 5aa4bf5..94c811a 100644 --- a/c++/test/tattr.cpp +++ b/c++/test/tattr.cpp @@ -518,6 +518,12 @@ static void test_attr_basic_read() int num_attrs = dataset.getNumAttrs(); verify_val(num_attrs, 3, "DataSet::getNumAttrs", __LINE__, __FILE__); + // Verify the correct number of attributes another way + H5O_info_t oinfo; + HDmemset(&oinfo, 0, sizeof(oinfo)); + dataset.getInfo(oinfo, H5O_INFO_NUM_ATTRS); + verify_val(oinfo.num_attrs, 3, "DataSet::getInfo", __LINE__, __FILE__); + // Open an attribute for the dataset Attribute ds_attr=dataset.openAttribute(ATTR1_NAME); @@ -538,7 +544,12 @@ static void test_attr_basic_read() // Verify the correct number of attributes num_attrs = group.getNumAttrs(); - verify_val(num_attrs, 1, "H5Group::getNumAttrs", __LINE__, __FILE__); + verify_val(num_attrs, 1, "Group::getNumAttrs", __LINE__, __FILE__); + + // Verify the correct number of attributes another way + HDmemset(&oinfo, 0, sizeof(oinfo)); + group.getInfo(oinfo, H5O_INFO_NUM_ATTRS); + verify_val(oinfo.num_attrs, 1, "Group::getInfo", __LINE__, __FILE__); // Open an attribute for the group Attribute gr_attr = group.openAttribute(ATTR2_NAME); @@ -658,6 +669,12 @@ static void test_attr_compound_read() int num_attrs = dataset.getNumAttrs(); verify_val(num_attrs, 1, "DataSet::getNumAttrs", __LINE__, __FILE__); + // Verify the correct number of attributes another way + H5O_info_t oinfo; + HDmemset(&oinfo, 0, sizeof(oinfo)); + dataset.getInfo(oinfo, H5O_INFO_NUM_ATTRS); + verify_val(oinfo.num_attrs, 1, "DataSet::getInfo", __LINE__, __FILE__); + // Open 1st attribute for the dataset Attribute attr = dataset.openAttribute((unsigned)0); diff --git a/c++/test/tfile.cpp b/c++/test/tfile.cpp index c82ab42..dd32364 100644 --- a/c++/test/tfile.cpp +++ b/c++/test/tfile.cpp @@ -657,8 +657,15 @@ static void test_libver_bounds_real( unsigned obj_version = file.childObjVersion(ROOTGROUP); verify_val(obj_version, oh_vers_create, "H5File::childObjVersion", __LINE__, __FILE__); + // Verify object header version another way + H5O_info_t oinfo; + HDmemset(&oinfo, 0, sizeof(oinfo)); + file.getInfo(oinfo, H5O_INFO_HDR); + verify_val(oinfo.hdr.version, oh_vers_create, "H5File::getInfo", __LINE__, __FILE__); + /* - * Reopen the file and make sure the root group still has the correct version + * Reopen the file and make sure the root group still has the correct + * version */ file.close(); @@ -678,6 +685,11 @@ static void test_libver_bounds_real( obj_version = group.objVersion(); verify_val(obj_version, oh_vers_mod, "Group::objVersion", __LINE__, __FILE__); + // Verify object header version another way + HDmemset(&oinfo, 0, sizeof(oinfo)); + group.getInfo(oinfo, H5O_INFO_HDR); + verify_val(oinfo.hdr.version, oh_vers_mod, "Group::getInfo", __LINE__, __FILE__); + group.close(); // close "/G1" /* diff --git a/c++/test/tlinks.cpp b/c++/test/tlinks.cpp index b8560aa..f8d7089 100644 --- a/c++/test/tlinks.cpp +++ b/c++/test/tlinks.cpp @@ -515,7 +515,7 @@ test_lcpl(hid_t fapl_id, hbool_t new_format) } // end of try block catch (Exception& E) { - issue_fail_msg("test_num_links()", __LINE__, __FILE__, E.getCDetailMsg()); + issue_fail_msg("test_lcpl()", __LINE__, __FILE__, E.getCDetailMsg()); } } // end test_lcpl() @@ -657,7 +657,7 @@ test_move(hid_t fapl_id, hbool_t new_format) } // end of try block catch (Exception& E) { - issue_fail_msg("test_num_links()", __LINE__, __FILE__, E.getCDetailMsg()); + issue_fail_msg("test_move()", __LINE__, __FILE__, E.getCDetailMsg()); } } // test_move @@ -792,7 +792,7 @@ static void test_copy(hid_t fapl_id, hbool_t new_format) } // end of try block catch (Exception& E) { - issue_fail_msg("test_num_links()", __LINE__, __FILE__, E.getCDetailMsg()); + issue_fail_msg("test_copy()", __LINE__, __FILE__, E.getCDetailMsg()); } } // test_copy @@ -892,6 +892,7 @@ void test_links() /* General tests... (on both old & new format groups */ // FileAccPropList may be passed in instead of fapl id test_basic_links(my_fapl_id, new_format); + test_num_links(my_fapl_id, new_format); test_move(my_fapl_id, new_format); test_copy(my_fapl_id, new_format); test_lcpl(my_fapl_id, new_format); diff --git a/c++/test/tobject.cpp b/c++/test/tobject.cpp index 9980ce0..bfc13a0 100644 --- a/c++/test/tobject.cpp +++ b/c++/test/tobject.cpp @@ -523,19 +523,106 @@ static void test_open_object_header() cerr << " in Exception" << endl; issue_fail_msg("test_file_name()", __LINE__, __FILE__, E.getCDetailMsg()); } -} /* test_open_object_header() */ +} // test_open_object_header /*------------------------------------------------------------------------- - * Function: test_objects + * Function: test_getobjectinfo_same_file + * + * Purpose Test that querying the object info for objects in the same + * file will return the same file "number". + * + * Return None + * + * July, 2018 + *------------------------------------------------------------------------- + */ +const H5std_string FILE_OBJINFO("tobject_getinfo.h5"); +const H5std_string GROUP1NAME("group1"); +const H5std_string GROUP2NAME("group2"); +static void test_getobjectinfo_same_file() +{ + H5O_info_t oinfo1, oinfo2; /* Object info structs */ + + // Output message about test being performed + SUBTEST("Group::getInfo"); + + try { + // Create a new HDF5 file + H5File file1(FILE_OBJINFO, H5F_ACC_TRUNC); + + // Create two groups in the file + Group grp1(file1.createGroup(GROUP1NAME)); + Group grp2(file1.createGroup(GROUP2NAME)); + + // Reset object info + HDmemset(&oinfo1, 0, sizeof(oinfo1)); + HDmemset(&oinfo2, 0, sizeof(oinfo2)); + + // Query the info of two groups and verify that they have the same + // file number + grp1.getInfo(oinfo1); + grp2.getInfo(oinfo2); + verify_val(oinfo1.fileno, oinfo2.fileno, "file number from getInfo", __LINE__, __FILE__); + + // Close groups and file + grp1.close(); + grp2.close(); + file1.close(); + + // Open the file twice + file1.openFile(FILE_OBJINFO, H5F_ACC_RDWR); + H5File file2(FILE_OBJINFO, H5F_ACC_RDWR); + + // Create two groups in the file + grp1 = file1.openGroup(GROUP1NAME); + grp2 = file2.openGroup(GROUP2NAME); + + // Reset object info + HDmemset(&oinfo1, 0, sizeof(oinfo1)); + HDmemset(&oinfo2, 0, sizeof(oinfo2)); + + // Query the info of two groups and verify that they have the same + // file number + grp1.getInfo(oinfo1); + grp2.getInfo(oinfo2); + verify_val(oinfo1.fileno, oinfo2.fileno, "file number from getInfo", __LINE__, __FILE__); + + + // Reset object info + HDmemset(&oinfo1, 0, sizeof(oinfo1)); + HDmemset(&oinfo2, 0, sizeof(oinfo2)); + + file1.getInfo(GROUP1NAME, oinfo1); + file1.getInfo(GROUP2NAME, oinfo2); + verify_val(oinfo1.fileno, oinfo2.fileno, "file number from getObjectInfo", __LINE__, __FILE__); + + // Close groups and files + grp1.close(); + grp2.close(); + file1.close(); + file2.close(); + + PASSED(); + } // end of try block + // catch all other exceptions + catch (Exception& E) + { + cerr << " in Exception " << E.getCFuncName() << "detail: " << E.getCDetailMsg() << endl; + issue_fail_msg("test_file_name()", __LINE__, __FILE__, E.getCDetailMsg()); + } + +} // test_h5o_getinfo_same_file + +/*------------------------------------------------------------------------- + * Function: test_object * * Purpose Tests HDF5 object related functionality * * Return Success: 0 * Failure: -1 * - * Programmer Binh-Minh Ribler - * Friday, Mar 4, 2014 + * March 4, 2014 *------------------------------------------------------------------------- */ extern "C" @@ -549,8 +636,9 @@ void test_object() test_get_objname_ontypes(); // Test get object name from types test_get_objtype(); // Test get object type test_open_object_header(); // Test object header functions (H5O) + test_getobjectinfo_same_file(); // Test object info in same file -} // test_objects +} // test_object /*------------------------------------------------------------------------- -- cgit v0.12 From 944735221e3dc6545aca753b09041cef7ea09fb1 Mon Sep 17 00:00:00 2001 From: Binh-Minh Ribler Date: Tue, 17 Jul 2018 01:32:07 -0500 Subject: Fixed comments --- c++/src/H5Object.cpp | 19 +++++++++++++------ c++/src/H5Object.h | 1 + 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/c++/src/H5Object.cpp b/c++/src/H5Object.cpp index 940a7c6..58b34ae 100644 --- a/c++/src/H5Object.cpp +++ b/c++/src/H5Object.cpp @@ -229,8 +229,9 @@ int H5Object::iterateAttrs(attr_operator_t user_op, unsigned *_idx, void *op_dat //-------------------------------------------------------------------------- // Function: H5Object::getInfo ///\brief Returns information about an HDF5 object. -///\return Struct containing the object info -///\exception +///\param objinfo - OUT: Struct containing the object info +///\param fields - IN: Indicates the group of information to be retrieved +/// - default to H5O_INFO_BASIC // July, 2018 //-------------------------------------------------------------------------- void H5Object::getInfo(H5O_info_t& objinfo, unsigned fields) const @@ -247,8 +248,11 @@ void H5Object::getInfo(H5O_info_t& objinfo, unsigned fields) const //-------------------------------------------------------------------------- // Function: H5Object::getInfo ///\brief Returns information about an HDF5 object given its name. -///\return Struct containing the object info -///\exception +///\param name - IN: Name of the object to be queried - \c char * +///\param objinfo - OUT: Struct containing the object info +///\param fields - IN: Indicates the group of information to be retrieved +/// - default to H5O_INFO_BASIC +///\param lapl - IN: Link access property list // July, 2018 //-------------------------------------------------------------------------- void H5Object::getInfo(const char* name, H5O_info_t& objinfo, unsigned fields, const LinkAccPropList& lapl) const @@ -266,8 +270,11 @@ void H5Object::getInfo(const char* name, H5O_info_t& objinfo, unsigned fields, c ///\brief This is an overloaded member function, provided for convenience. /// It differs from the above function in that it takes /// a reference to an \c H5std_string for \a name. -///\return Struct containing the object info -///\exception +///\param name - IN: Name of the object to be queried - \c H5std_string +///\param objinfo - OUT: Struct containing the object info +///\param fields - IN: Indicates the group of information to be retrieved +/// - default to H5O_INFO_BASIC +///\param lapl - IN: Link access property list // July, 2018 //-------------------------------------------------------------------------- void H5Object::getInfo(const H5std_string& name, H5O_info_t& objinfo, unsigned fields, const LinkAccPropList& lapl) const diff --git a/c++/src/H5Object.h b/c++/src/H5Object.h index 606b0ce..f15c8eb 100644 --- a/c++/src/H5Object.h +++ b/c++/src/H5Object.h @@ -94,6 +94,7 @@ class H5_DLLCPP H5Object : public H5Location { // Returns information about an HDF5 object. void getInfo(H5O_info_t& objinfo, unsigned fields = H5O_INFO_BASIC) const; + // Returns information about an HDF5 object, given its name. void getInfo(const char* name, H5O_info_t& objinfo, unsigned fields = H5O_INFO_BASIC, const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) const; void getInfo(const H5std_string& name, H5O_info_t& objinfo, unsigned fields = H5O_INFO_BASIC, const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) const; -- cgit v0.12 From 3aa24435180d10aeca6493f7c1b277cfd5c73fad Mon Sep 17 00:00:00 2001 From: Binh-Minh Ribler Date: Tue, 17 Jul 2018 09:12:10 -0500 Subject: Fixed HDFFV-10458 partially Description: Added wrappers for H5Oget_info_by_idx2. // Returns information about an HDF5 object, given its index. void getInfo(const char* grp_name, H5_index_t idx_type, H5_iter_order_t order, hsize_t idx, H5O_info_t& objinfo, unsigned fields = H5O_INFO_BASIC, const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) void getInfo(const H5std_string& grp_name, H5_index_t idx_type, H5_iter_order_t order, hsize_t idx, H5O_info_t& objinfo, unsigned fields = H5O_INFO_BASIC, const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) Platforms tested: Linux/64 (jelly) Linux/32 (jam) Darwin (osx1010test) --- c++/src/H5Object.cpp | 77 +++++++++++++++++++++++++++++++++++++++++++++++----- c++/src/H5Object.h | 18 ++++++++++-- c++/test/dsets.cpp | 62 ++++++++++++++++++++++++++++++++++++++++++ c++/test/trefer.cpp | 15 ++++++++++ 4 files changed, 163 insertions(+), 9 deletions(-) diff --git a/c++/src/H5Object.cpp b/c++/src/H5Object.cpp index 58b34ae..a33acb3 100644 --- a/c++/src/H5Object.cpp +++ b/c++/src/H5Object.cpp @@ -228,10 +228,17 @@ int H5Object::iterateAttrs(attr_operator_t user_op, unsigned *_idx, void *op_dat //-------------------------------------------------------------------------- // Function: H5Object::getInfo -///\brief Returns information about an HDF5 object. +///\brief Retrieves information about an HDF5 object. ///\param objinfo - OUT: Struct containing the object info ///\param fields - IN: Indicates the group of information to be retrieved -/// - default to H5O_INFO_BASIC +///\par Description +/// Valid values of \a fields are as follows: +/// \li \c H5O_INFO_BASIC (default) +/// \li \c H5O_INFO_TIME +/// \li \c H5O_INFO_NUM_ATTRS +/// \li \c H5O_INFO_HDR +/// \li \c H5O_INFO_META_SIZE +/// \li \c H5O_INFO_ALL // July, 2018 //-------------------------------------------------------------------------- void H5Object::getInfo(H5O_info_t& objinfo, unsigned fields) const @@ -242,17 +249,25 @@ void H5Object::getInfo(H5O_info_t& objinfo, unsigned fields) const // Throw exception if C API returns failure if (ret_value < 0) - throwException(inMemFunc("getObjinfo"), "H5Oget_info2 failed"); + throwException(inMemFunc("getInfo"), "H5Oget_info2 failed"); } //-------------------------------------------------------------------------- // Function: H5Object::getInfo -///\brief Returns information about an HDF5 object given its name. +///\brief Retrieves information about an HDF5 object given its name. ///\param name - IN: Name of the object to be queried - \c char * ///\param objinfo - OUT: Struct containing the object info ///\param fields - IN: Indicates the group of information to be retrieved /// - default to H5O_INFO_BASIC ///\param lapl - IN: Link access property list +///\par Description +/// Valid values of \a fields are as follows: +/// \li \c H5O_INFO_BASIC (default) +/// \li \c H5O_INFO_TIME +/// \li \c H5O_INFO_NUM_ATTRS +/// \li \c H5O_INFO_HDR +/// \li \c H5O_INFO_META_SIZE +/// \li \c H5O_INFO_ALL // July, 2018 //-------------------------------------------------------------------------- void H5Object::getInfo(const char* name, H5O_info_t& objinfo, unsigned fields, const LinkAccPropList& lapl) const @@ -262,7 +277,7 @@ void H5Object::getInfo(const char* name, H5O_info_t& objinfo, unsigned fields, c // Throw exception if C API returns failure if (ret_value < 0) - throwException(inMemFunc("getObjinfo"), "H5Oget_info_by_name2 failed"); + throwException(inMemFunc("getInfo"), "H5Oget_info_by_name2 failed"); } //-------------------------------------------------------------------------- @@ -279,12 +294,60 @@ void H5Object::getInfo(const char* name, H5O_info_t& objinfo, unsigned fields, c //-------------------------------------------------------------------------- void H5Object::getInfo(const H5std_string& name, H5O_info_t& objinfo, unsigned fields, const LinkAccPropList& lapl) const { + getInfo(name.c_str(), objinfo, fields, lapl); +} + +//-------------------------------------------------------------------------- +// Function: H5Object::getInfo +///\brief Retrieves information about an HDF5 object given its index. +///\param grp_name - IN: Group name where the object belongs - \c char * +///\param idx_type - IN: Type of index +///\param order - IN: Order to traverse +///\param idx - IN: Object position +///\param objinfo - OUT: Struct containing the object info +///\param fields - IN: Indicates the group of information to be retrieved +/// - default to H5O_INFO_BASIC +///\param lapl - IN: Link access property list +///\par Description +/// Valid values of \a fields are as follows: +/// \li \c H5O_INFO_BASIC (default) +/// \li \c H5O_INFO_TIME +/// \li \c H5O_INFO_NUM_ATTRS +/// \li \c H5O_INFO_HDR +/// \li \c H5O_INFO_META_SIZE +/// \li \c H5O_INFO_ALL +// July, 2018 +//-------------------------------------------------------------------------- +void H5Object::getInfo(const char* grp_name, H5_index_t idx_type, + H5_iter_order_t order, hsize_t idx, H5O_info_t& objinfo, unsigned fields, + const LinkAccPropList& lapl) const +{ // Use C API to get information of the object - herr_t ret_value = H5Oget_info_by_name2(getId(), name.c_str(), &objinfo, fields, lapl.getId()); + herr_t ret_value = H5Oget_info_by_idx2(getId(), grp_name, idx_type, order, + idx, &objinfo, fields, lapl.getId()); // Throw exception if C API returns failure if (ret_value < 0) - throwException(inMemFunc("getObjinfo"), "H5Oget_info_by_name2 failed"); + throwException(inMemFunc("getInfo"), "H5Oget_info_by_idx2 failed"); +} + +//-------------------------------------------------------------------------- +// Function: H5Object::getInfo +///\brief This is an overloaded member function, provided for convenience. +/// It differs from the above function in that it takes +/// a reference to an \c H5std_string for \a name. +///\param name - IN: Name of the object to be queried - \c H5std_string +///\param objinfo - OUT: Struct containing the object info +///\param fields - IN: Indicates a group of information to be retrieved +/// - default to H5O_INFO_BASIC +///\param lapl - IN: Link access property list +// July, 2018 +//-------------------------------------------------------------------------- +void H5Object::getInfo(const H5std_string& grp_name, H5_index_t idx_type, + H5_iter_order_t order, hsize_t idx, H5O_info_t& objinfo, unsigned fields, + const LinkAccPropList& lapl) const +{ + getInfo(grp_name.c_str(), idx_type, order, idx, objinfo, fields, lapl); } //-------------------------------------------------------------------------- diff --git a/c++/src/H5Object.h b/c++/src/H5Object.h index f15c8eb..268fe58 100644 --- a/c++/src/H5Object.h +++ b/c++/src/H5Object.h @@ -95,8 +95,22 @@ class H5_DLLCPP H5Object : public H5Location { // Returns information about an HDF5 object. void getInfo(H5O_info_t& objinfo, unsigned fields = H5O_INFO_BASIC) const; // Returns information about an HDF5 object, given its name. - void getInfo(const char* name, H5O_info_t& objinfo, unsigned fields = H5O_INFO_BASIC, const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) const; - void getInfo(const H5std_string& name, H5O_info_t& objinfo, unsigned fields = H5O_INFO_BASIC, const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) const; + void getInfo(const char* name, H5O_info_t& objinfo, + unsigned fields = H5O_INFO_BASIC, + const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) const; + void getInfo(const H5std_string& name, H5O_info_t& objinfo, + unsigned fields = H5O_INFO_BASIC, + const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) const; + + // Retrieves information about an HDF5 object, given its index. + void getInfo(const char* grp_name, H5_index_t idx_type, + H5_iter_order_t order, hsize_t idx, H5O_info_t& objinfo, + unsigned fields = H5O_INFO_BASIC, + const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) const; + void getInfo(const H5std_string& grp_name, H5_index_t idx_type, + H5_iter_order_t order, hsize_t idx, H5O_info_t& objinfo, + unsigned fields = H5O_INFO_BASIC, + const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) const; // Gets the name of this HDF5 object, i.e., Group, DataSet, or // DataType. diff --git a/c++/test/dsets.cpp b/c++/test/dsets.cpp index 250ce90..e86052b 100644 --- a/c++/test/dsets.cpp +++ b/c++/test/dsets.cpp @@ -1116,6 +1116,67 @@ static herr_t test_types(H5File& file) /*------------------------------------------------------------------------- + * Function: test_getinfo + * + * Purpose Tests getInfo() + * + * Return Success: 0 + * + * Failure: -1 + * + * July, 2018 + *------------------------------------------------------------------------- + */ +static herr_t test_getinfo(H5File& file) +{ + SUBTEST("Getting object information"); + + try { + // Create a data space + hsize_t dims[2]; + dims[0] = 256; + dims[1] = 512; + DataSpace space (2, dims, NULL); + + // Create a dataset using the default dataset creation properties. + // We're not sure what they are, so we won't check. + DataSet dataset(file.openDataSet(DSET_CHUNKED_NAME)); + + // Get dataset header info + H5O_info_t oinfo; + HDmemset(&oinfo, 0, sizeof(oinfo)); + dataset.getInfo(oinfo, H5O_INFO_HDR); + verify_val(oinfo.hdr.nchunks, 1, "DataSet::getInfo", __LINE__, __FILE__); + dataset.close(); + + // Open the dataset we created above and then close it. This is one + // way to open an existing dataset for accessing. + dataset = file.openDataSet(DSET_DEFAULT_NAME); + HDmemset(&oinfo, 0, sizeof(oinfo)); + dataset.getInfo(oinfo, H5O_INFO_ALL); + verify_val(oinfo.hdr.nchunks, 1, "DataSet::getInfo", __LINE__, __FILE__); + dataset.close(); + + PASSED(); + return 0; + } // outer most try block + + catch (InvalidActionException& E) + { + cerr << " FAILED" << endl; + cerr << " <<< " << E.getDetailMsg() << " >>>" << endl << endl; + return -1; + } + // catch all other exceptions + catch (Exception& E) + { + issue_fail_msg("test_getinfo", __LINE__, __FILE__); + return -1; + } +} // test_getinfo + + +/*------------------------------------------------------------------------- * Function: test_virtual * * Purpose Tests fixed, unlimited, and printf selections in the same @@ -1237,6 +1298,7 @@ void test_dset() nerrors += test_create(file) < 0 ? 1:0; nerrors += test_simple_io(file) < 0 ? 1:0; + nerrors += test_getinfo(file) < 0 ? 1:0; nerrors += test_tconv(file) < 0 ? 1:0; nerrors += test_compression(file) < 0 ? 1:0; nerrors += test_nbit_compression(file) < 0 ? 1:0; diff --git a/c++/test/trefer.cpp b/c++/test/trefer.cpp index bb09616..fa214df 100644 --- a/c++/test/trefer.cpp +++ b/c++/test/trefer.cpp @@ -481,6 +481,21 @@ static void test_reference_group() fname = group.getFileName(); verify_val(fname, FILE1, "H5Group::getFileName",__LINE__,__FILE__); + // Check object type using Group::getInfo() + H5O_info_t oinfo; + HDmemset(&oinfo, 0, sizeof(oinfo)); + group.getInfo(".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)0, oinfo); + verify_val(oinfo.type, H5O_TYPE_DATASET, "Group::getInfo",__LINE__,__FILE__); + + // Check for out of bound query by index + try { + HDmemset(&oinfo, 0, sizeof(oinfo)); + group.getInfo(".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)9, oinfo); + + // Should FAIL but didn't, so throw an invalid action exception + throw InvalidActionException("Group::getInfo", "Out of bound index."); + } catch (Exception& err) {} // do nothing, failure expected + // Unlink one of the objects in the dereferenced group, and re-check refgroup.unlink(GROUPNAME2); nobjs = refgroup.getNumObjs(); -- cgit v0.12