diff options
author | Jordan Henderson <jhenderson@hdfgroup.org> | 2017-08-08 20:04:53 (GMT) |
---|---|---|
committer | Jordan Henderson <jhenderson@hdfgroup.org> | 2017-08-08 20:04:53 (GMT) |
commit | f74dd2adaeb668e1164ae896b7155e9491035ef2 (patch) | |
tree | 80ed458737607fbfdee6516dd6fa83122967df3f /testpar | |
parent | d5a70edb5036c6b75ca27ad6b3895d6227d9e5d1 (diff) | |
download | hdf5-f74dd2adaeb668e1164ae896b7155e9491035ef2.zip hdf5-f74dd2adaeb668e1164ae896b7155e9491035ef2.tar.gz hdf5-f74dd2adaeb668e1164ae896b7155e9491035ef2.tar.bz2 |
Start adding data verification
Diffstat (limited to 'testpar')
-rw-r--r-- | testpar/t_filters_parallel.c | 161 | ||||
-rw-r--r-- | testpar/t_filters_parallel.h | 78 |
2 files changed, 193 insertions, 46 deletions
diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c index 2dc209c..37a94bb 100644 --- a/testpar/t_filters_parallel.c +++ b/testpar/t_filters_parallel.c @@ -89,10 +89,13 @@ static void (*tests[])(void) = { * Programmer: Jordan Henderson * 02/01/2017 */ +/* XXX: DONE */ static void test_one_chunk_filtered_dataset(void) { C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; hsize_t dataset_dims[ONE_CHUNK_FILTERED_DATASET_DIMS]; hsize_t chunk_dims[ONE_CHUNK_FILTERED_DATASET_DIMS]; hsize_t sel_dims[ONE_CHUNK_FILTERED_DATASET_DIMS]; @@ -100,11 +103,11 @@ test_one_chunk_filtered_dataset(void) hsize_t stride[ONE_CHUNK_FILTERED_DATASET_DIMS]; hsize_t block[ONE_CHUNK_FILTERED_DATASET_DIMS]; hsize_t offset[ONE_CHUNK_FILTERED_DATASET_DIMS]; - size_t i, data_size; + size_t i, data_size, correct_buf_size; hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; - if (MAINPROCESS) TESTING("one-chunk filtered dataset"); + if (MAINPROCESS) puts("Testing one-chunk filtered dataset"); /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -174,12 +177,20 @@ test_one_chunk_filtered_dataset(void) /* Fill data buffer */ data_size = ONE_CHUNK_FILTERED_DATASET_CH_NROWS * ONE_CHUNK_FILTERED_DATASET_NCOLS * sizeof(*data); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); data = (C_DATATYPE *) malloc(data_size); VRFY((NULL != data), "malloc succeeded"); + correct_buf = (C_DATATYPE *) malloc(correct_buf_size); + VRFY((NULL != correct_buf), "malloc succeeded"); + for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = GEN_DATA(i); + data[i] = (C_DATATYPE) GEN_DATA(i); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = ((C_DATATYPE) i % (ONE_CHUNK_FILTERED_DATASET_CH_NROWS / NUM_MPI_RANKS * ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) + + ((C_DATATYPE) i / (ONE_CHUNK_FILTERED_DATASET_CH_NROWS / NUM_MPI_RANKS * ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)); /* Create property list for collective dataset write */ plist_id = H5Pcreate(H5P_DATASET_XFER); @@ -192,6 +203,22 @@ test_one_chunk_filtered_dataset(void) if (data) free(data); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + read_buf = (C_DATATYPE *) malloc(correct_buf_size); + VRFY((NULL != read_buf), "malloc succeeded"); + + dset_id = H5Dopen(file_id, "/" ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded"); + + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (correct_buf) free(correct_buf); + if (read_buf) free(read_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); @@ -224,7 +251,7 @@ test_filtered_dataset_no_overlap(void) hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; - if (MAINPROCESS) TESTING("write to unshared filtered chunks"); + if (MAINPROCESS) puts("Testing write to unshared filtered chunks"); /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -344,7 +371,7 @@ test_filtered_dataset_overlap(void) hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; - if (MAINPROCESS) TESTING("write to shared filtered chunks"); + if (MAINPROCESS) puts("Testing write to shared filtered chunks"); /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -466,7 +493,7 @@ test_filtered_dataset_single_no_selection(void) hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; - if (MAINPROCESS) TESTING("write to filtered chunks with a single process having no selection"); + if (MAINPROCESS) puts("Testing write to filtered chunks with a single process having no selection"); /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -591,7 +618,7 @@ test_filtered_dataset_all_no_selection(void) hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; - if (MAINPROCESS) TESTING("write to filtered chunks with all processes having no selection"); + if (MAINPROCESS) puts("Testing write to filtered chunks with all processes having no selection"); /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -675,19 +702,22 @@ test_filtered_dataset_all_no_selection(void) * Programmer: Jordan Henderson * 02/02/2017 */ +/* XXX: DONE */ static void test_filtered_dataset_point_selection(void) { C_DATATYPE *data = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *read_buf = NULL; hsize_t coords[2 * POINT_SELECTION_FILTERED_CHUNKS_NUM_CHUNKS][POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t dataset_dims[POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t chunk_dims[POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t sel_dims[POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - size_t i, j, data_size; + size_t i, j, data_size, correct_buf_size; hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; - if (MAINPROCESS) TESTING("write to filtered chunks with point selection"); + if (MAINPROCESS) puts("Testing write to filtered chunks with point selection"); /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -749,13 +779,20 @@ test_filtered_dataset_point_selection(void) /* Fill data buffer */ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); data = (C_DATATYPE *) malloc(data_size); VRFY((NULL != data), "malloc succeeded"); + correct_buf = (C_DATATYPE *) malloc(correct_buf_size); + VRFY((NULL != correct_buf), "malloc succeeded"); + for (i = 0; i < data_size / sizeof(*data); i++) data[i] = GEN_DATA(i); + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (dataset_dims[1] * (i / (NUM_MPI_RANKS * dataset_dims[1]))) + (i % dataset_dims[1]) + (((i % (NUM_MPI_RANKS * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]); + /* Create property list for collective dataset write */ plist_id = H5Pcreate(H5P_DATASET_XFER); VRFY((plist_id >= 0), "DXPL creation succeeded"); @@ -767,6 +804,22 @@ test_filtered_dataset_point_selection(void) if (data) free(data); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + read_buf = (C_DATATYPE *) malloc(correct_buf_size); + VRFY((NULL != read_buf), "malloc succeeded"); + + dset_id = H5Dopen(file_id, "/" POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded"); + + VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (correct_buf) free(correct_buf); + if (read_buf) free(read_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); @@ -801,7 +854,7 @@ test_filtered_dataset_interleaved_write(void) hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; - if (MAINPROCESS) TESTING("interleaved write to filtered chunks"); + if (MAINPROCESS) puts("Testing interleaved write to filtered chunks"); /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -919,7 +972,7 @@ test_3d_filtered_dataset_no_overlap_separate_pages(void) hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; - if (MAINPROCESS) TESTING("write to unshared filtered chunks on separate pages in 3D dataset"); + if (MAINPROCESS) puts("Testing write to unshared filtered chunks on separate pages in 3D dataset"); /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -1045,7 +1098,7 @@ test_3d_filtered_dataset_no_overlap_same_pages(void) hid_t file_id, dset_id, plist_id; hid_t filespace, memspace; - if (MAINPROCESS) TESTING("write to unshared filtered chunks on the same pages in 3D dataset"); + if (MAINPROCESS) puts("Testing write to unshared filtered chunks on the same pages in 3D dataset"); /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -1171,7 +1224,7 @@ test_3d_filtered_dataset_overlap(void) hid_t file_id = -1, dset_id = -1, plist_id = -1; hid_t filespace = -1, memspace = -1; - if (MAINPROCESS) TESTING("write to shared filtered chunks in 3D dataset"); + if (MAINPROCESS) puts("Testing write to shared filtered chunks in 3D dataset"); /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -1314,7 +1367,7 @@ test_cmpd_filtered_dataset_no_conversion_unshared(void) hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1; hid_t filespace = -1, memspace = -1; - if (MAINPROCESS) TESTING("write to unshared filtered chunks in Compound Datatype dataset without Datatype conversion"); + if (MAINPROCESS) puts("Testing write to unshared filtered chunks in Compound Datatype dataset without Datatype conversion"); /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -1436,7 +1489,7 @@ test_cmpd_filtered_dataset_no_conversion_shared(void) hid_t file_id, dset_id, plist_id, memtype; hid_t filespace, memspace; - if (MAINPROCESS) TESTING("write to shared filtered chunks in Compound Datatype dataset without Datatype conversion"); + if (MAINPROCESS) puts("Testing write to shared filtered chunks in Compound Datatype dataset without Datatype conversion"); /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -1563,7 +1616,7 @@ test_cmpd_filtered_dataset_type_conversion_unshared(void) hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1; hid_t filespace = -1, memspace = -1; - if (MAINPROCESS) TESTING("write to unshared filtered chunks in Compound Datatype dataset with Datatype conversion"); + if (MAINPROCESS) puts("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype conversion"); /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -1703,7 +1756,7 @@ test_cmpd_filtered_dataset_type_conversion_shared(void) hid_t file_id, dset_id, plist_id, filetype, memtype; hid_t filespace, memspace; - if (MAINPROCESS) TESTING("write to shared filtered chunks in Compound Datatype dataset with Datatype conversion"); + if (MAINPROCESS) puts("Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion"); /* Set up file access property list with parallel I/O access */ plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -1828,10 +1881,81 @@ test_cmpd_filtered_dataset_type_conversion_shared(void) static void test_write_serial_read_parallel(void) { + C_DATATYPE *data = NULL; + hsize_t dataset_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS]; + hsize_t sel_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS]; + hsize_t count[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS]; + hsize_t stride[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS]; + hsize_t block[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS]; + hsize_t offset[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS]; + size_t i, j, k, data_size; + hid_t file_id = -1, dset_id = -1, plist_id = -1; + hid_t filespace = -1, memspace = -1; + + if (MAINPROCESS) puts("Testing write file serially; read file in parallel"); + + /* Write the file on the MAINPROCESS rank */ if (MAINPROCESS) { + /* Set up file access property list */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = WRITE_SERIAL_READ_PARALLEL_NROWS; + dataset_dims[1] = WRITE_SERIAL_READ_PARALLEL_NCOLS; + dataset_dims[2] = WRITE_SERIAL_READ_PARALLEL_DEPTH; + chunk_dims[0] = WRITE_SERIAL_READ_PARALLEL_CH_NROWS; + chunk_dims[1] = WRITE_SERIAL_READ_PARALLEL_CH_NCOLS; + chunk_dims[2] = 1; + sel_dims[0] = WRITE_SERIAL_READ_PARALLEL_NROWS; + sel_dims[1] = WRITE_SERIAL_READ_PARALLEL_NCOLS; + sel_dims[2] = 1; + + filespace = H5Screate_simple(WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((plist_id >= 0), "DCPL creation succeeded"); + VRFY((H5Pset_chunk(plist_id, WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((SET_FILTER(plist_id) >= 0), "Filter set"); + + dset_id = H5Dcreate(file_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*data); + + data = (C_DATATYPE *) malloc(data_size); + VRFY((NULL != data), "malloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = GEN_DATA(i); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) >= 0), "Dataset write succeeded"); + + if (data) free(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } + /* All ranks open the file and verify their "portion" of the dataset is correct */ + return; } @@ -1927,7 +2051,8 @@ exit: ALARM_OFF; - h5_clean_files(FILENAME, fapl); + /* XXX: Commented for testing */ + /* h5_clean_files(FILENAME, fapl); */ H5close(); diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h index d627c27..d098b66 100644 --- a/testpar/t_filters_parallel.h +++ b/testpar/t_filters_parallel.h @@ -46,9 +46,14 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES; #define C_DATATYPE long #define C_DATATYPE_STR(type) STRINGIFY(type) #define HDF5_DATATYPE_NAME H5T_NATIVE_LONG -#define GEN_DATA(i) RANK_DATA(i) /* Given an index value i, generates test data based upon selected mode */ -#define RANK_DATA(i) (mpi_rank) /* Generates test data to visibly show which rank wrote to which parts of the dataset */ -#define INCREMENTAL_DATA(i) i /* Generates incremental test data */ + +#define GEN_DATA(i) INCREMENTAL_DATA(i) +#define INCREMENTAL_DATA(i) (mpi_rank + i) /* Generates incremental test data */ + +/* XXX: For experimental purposes only, will causes tests to fail data verification phase */ +/* #define GEN_DATA(i) RANK_DATA(i) */ /* Given an index value i, generates test data based upon selected mode */ +#define RANK_DATA(i) (mpi_rank) /* Generates test data to visibly show which rank wrote to which parts of the dataset */ + #ifdef DYNAMIC_FILTER #define SET_FILTER(dcpl) H5Pset_filter(dcpl, filter_id, flags, FILTER_NUM_CDVALUES, cd_values) /* Test other filter in parallel */ #else @@ -58,50 +63,49 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES; /* Defines for the one-chunk filtered dataset test */ #define ONE_CHUNK_FILTERED_DATASET_NAME "one_chunk_filtered_dataset" #define ONE_CHUNK_FILTERED_DATASET_DIMS 2 -#define ONE_CHUNK_FILTERED_DATASET_NROWS 16 /* Must be an even multiple of the number of ranks to avoid issues */ -#define ONE_CHUNK_FILTERED_DATASET_NCOLS 8 /* Must be an even multiple of the number of ranks to avoid issues */ +#define ONE_CHUNK_FILTERED_DATASET_NROWS (NUM_MPI_RANKS * 4) /* Must be an even multiple of the number of ranks to avoid issues */ +#define ONE_CHUNK_FILTERED_DATASET_NCOLS (NUM_MPI_RANKS * 2) /* Must be an even multiple of the number of ranks to avoid issues */ #define ONE_CHUNK_FILTERED_DATASET_CH_NROWS ONE_CHUNK_FILTERED_DATASET_NROWS #define ONE_CHUNK_FILTERED_DATASET_CH_NCOLS ONE_CHUNK_FILTERED_DATASET_NCOLS /* Defines for the unshared filtered chunks write test */ #define UNSHARED_FILTERED_CHUNKS_DATASET_NAME "unshared_filtered_chunks" #define UNSHARED_FILTERED_CHUNKS_DATASET_DIMS 2 -#define UNSHARED_FILTERED_CHUNKS_NROWS 16 -#define UNSHARED_FILTERED_CHUNKS_NCOLS 8 +#define UNSHARED_FILTERED_CHUNKS_NROWS (NUM_MPI_RANKS * 4) +#define UNSHARED_FILTERED_CHUNKS_NCOLS (NUM_MPI_RANKS * 2) #define UNSHARED_FILTERED_CHUNKS_CH_NROWS (UNSHARED_FILTERED_CHUNKS_NROWS / NUM_MPI_RANKS) #define UNSHARED_FILTERED_CHUNKS_CH_NCOLS (UNSHARED_FILTERED_CHUNKS_NCOLS / NUM_MPI_RANKS) /* Defines for the shared filtered chunks write test */ #define SHARED_FILTERED_CHUNKS_DATASET_NAME "shared_filtered_chunks" #define SHARED_FILTERED_CHUNKS_DATASET_DIMS 2 -#define SHARED_FILTERED_CHUNKS_NROWS 16 -#define SHARED_FILTERED_CHUNKS_NCOLS 8 +#define SHARED_FILTERED_CHUNKS_NROWS (NUM_MPI_RANKS * 4) +#define SHARED_FILTERED_CHUNKS_NCOLS (NUM_MPI_RANKS * 2) #define SHARED_FILTERED_CHUNKS_CH_NROWS (SHARED_FILTERED_CHUNKS_NROWS / NUM_MPI_RANKS) #define SHARED_FILTERED_CHUNKS_CH_NCOLS (SHARED_FILTERED_CHUNKS_NCOLS / NUM_MPI_RANKS) /* Defines for the filtered chunks write test where a process has no selection */ #define SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks" #define SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 -#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS 16 -#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS 8 +#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS (NUM_MPI_RANKS * 4) +#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS (NUM_MPI_RANKS * 2) #define SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS / NUM_MPI_RANKS) #define SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS / NUM_MPI_RANKS) -#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC 3 +#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC NUM_MPI_RANKS - 1 /* Defines for the filtered chunks write test where no process has a selection */ #define ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "all_no_selection_filtered_chunks" #define ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 -#define ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS 16 -#define ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS 8 +#define ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS (NUM_MPI_RANKS * 4) +#define ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS (NUM_MPI_RANKS * 2) #define ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS / NUM_MPI_RANKS) #define ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS / NUM_MPI_RANKS) -#define ALL_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC 3 /* Defines for the filtered chunks write test with a point selection */ #define POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME "point_selection_filtered_chunks" #define POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 -#define POINT_SELECTION_FILTERED_CHUNKS_NROWS 16 -#define POINT_SELECTION_FILTERED_CHUNKS_NCOLS 8 +#define POINT_SELECTION_FILTERED_CHUNKS_NROWS (NUM_MPI_RANKS * 4) +#define POINT_SELECTION_FILTERED_CHUNKS_NCOLS (NUM_MPI_RANKS * 2) #define POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS (POINT_SELECTION_FILTERED_CHUNKS_NROWS / NUM_MPI_RANKS) #define POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS (POINT_SELECTION_FILTERED_CHUNKS_NCOLS / NUM_MPI_RANKS) #define POINT_SELECTION_FILTERED_CHUNKS_NUM_CHUNKS ((POINT_SELECTION_FILTERED_CHUNKS_NROWS / POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS) * (POINT_SELECTION_FILTERED_CHUNKS_NCOLS / POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS)) @@ -109,8 +113,8 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES; /* Defines for the filtered dataset interleaved write test */ #define INTERLEAVED_WRITE_FILTERED_DATASET_NAME "interleaved_write_filtered_dataset" #define INTERLEAVED_WRITE_FILTERED_DATASET_DIMS 2 -#define INTERLEAVED_WRITE_FILTERED_DATASET_NROWS 16 /* Must be an even multiple of the number of ranks to avoid issues */ -#define INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS 8 /* Must be an even multiple of the number of ranks to avoid issues */ +#define INTERLEAVED_WRITE_FILTERED_DATASET_NROWS (NUM_MPI_RANKS * 4) /* Must be an even multiple of the number of ranks to avoid issues */ +#define INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS (NUM_MPI_RANKS * 2) /* Must be an even multiple of the number of ranks to avoid issues */ #define INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS (INTERLEAVED_WRITE_FILTERED_DATASET_NROWS / NUM_MPI_RANKS) #define INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS (INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS / NUM_MPI_RANKS) #define INTERLEAVED_WRITE_FILTERED_DATASET_SEL_NPOINTS (INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS * INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS / INTERLEAVED_WRITE_FILTERED_DATASET_NUM_RANKS) @@ -118,27 +122,27 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES; /* Defines for the 3D unshared filtered dataset separate page write test */ #define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME "3d_unshared_filtered_chunks_separate_pages" #define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS 3 -#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS 16 -#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS 8 -#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH NUM_MPI_RANKS +#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS (NUM_MPI_RANKS * 4) +#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS (NUM_MPI_RANKS * 2) +#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH (NUM_MPI_RANKS) #define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS (UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / NUM_MPI_RANKS) #define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS (UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / NUM_MPI_RANKS) /* Defines for the 3D unshared filtered dataset same page write test */ #define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME "3d_unshared_filtered_chunks_same_pages" #define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS 3 -#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS 16 -#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS 8 -#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH NUM_MPI_RANKS +#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS (NUM_MPI_RANKS * 4) +#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS (NUM_MPI_RANKS * 2) +#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH (NUM_MPI_RANKS) #define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS (UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS / NUM_MPI_RANKS) #define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS (UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / NUM_MPI_RANKS) /* Defines for the 3d shared filtered dataset write test */ #define SHARED_FILTERED_CHUNKS_3D_DATASET_NAME "3d_shared_filtered_chunks" #define SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS 3 -#define SHARED_FILTERED_CHUNKS_3D_NROWS 16 -#define SHARED_FILTERED_CHUNKS_3D_NCOLS 8 -#define SHARED_FILTERED_CHUNKS_3D_DEPTH NUM_MPI_RANKS +#define SHARED_FILTERED_CHUNKS_3D_NROWS (NUM_MPI_RANKS * 4) +#define SHARED_FILTERED_CHUNKS_3D_NCOLS (NUM_MPI_RANKS * 2) +#define SHARED_FILTERED_CHUNKS_3D_DEPTH (NUM_MPI_RANKS) #define SHARED_FILTERED_CHUNKS_3D_CH_NROWS (SHARED_FILTERED_CHUNKS_3D_NROWS / NUM_MPI_RANKS) #define SHARED_FILTERED_CHUNKS_3D_CH_NCOLS (SHARED_FILTERED_CHUNKS_3D_NCOLS / NUM_MPI_RANKS) @@ -186,4 +190,22 @@ typedef struct { #define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS 1 #define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS +/* Defines for the write file serially/read in parallel test */ +#define WRITE_SERIAL_READ_PARALLEL_DATASET_NAME "write_serial_read_parallel" +#define WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS 3 +#define WRITE_SERIAL_READ_PARALLEL_NROWS (NUM_MPI_RANKS * 4) +#define WRITE_SERIAL_READ_PARALLEL_NCOLS (NUM_MPI_RANKS * 2) +#define WRITE_SERIAL_READ_PARALLEL_DEPTH (NUM_MPI_RANKS) +#define WRITE_SERIAL_READ_PARALLEL_CH_NROWS (WRITE_SERIAL_READ_PARALLEL_NROWS / NUM_MPI_RANKS) +#define WRITE_SERIAL_READ_PARALLEL_CH_NCOLS (WRITE_SERIAL_READ_PARALLEL_NCOLS / NUM_MPI_RANKS) + +/* Defines for the write file in parallel/read serially test */ +#define WRITE_PARALLEL_READ_SERIAL_DATASET_NAME "write_parallel_read_serial" +#define WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS 3 +#define WRITE_PARALLEL_READ_SERIAL_NROWS (NUM_MPI_RANKS * 4) +#define WRITE_PARALLEL_READ_SERIAL_NCOLS (NUM_MPI_RANKS * 2) +#define WRITE_PARALLEL_READ_SERIAL_DEPTH (NUM_MPI_RANKS) +#define WRITE_PARALLEL_READ_SERIAL_CH_NROWS (WRITE_PARALLEL_READ_SERIAL_NROWS / NUM_MPI_RANKS) +#define WRITE_PARALLEL_READ_SERIAL_CH_NCOLS (WRITE_PARALLEL_READ_SERIAL_NCOLS / NUM_MPI_RANKS) + #endif /* TEST_PARALLEL_FILTERS_H_ */ |