diff options
Diffstat (limited to 'testpar')
-rw-r--r-- | testpar/Makefile.am | 2 | ||||
-rw-r--r-- | testpar/t_cache_image.c | 2161 | ||||
-rw-r--r-- | testpar/t_file.c | 615 | ||||
-rw-r--r-- | testpar/t_mdset.c | 6 | ||||
-rw-r--r-- | testpar/testphdf5.c | 5 | ||||
-rw-r--r-- | testpar/testphdf5.h | 1 |
6 files changed, 2784 insertions, 6 deletions
diff --git a/testpar/Makefile.am b/testpar/Makefile.am index 4fe0ba8..3889630 100644 --- a/testpar/Makefile.am +++ b/testpar/Makefile.am @@ -25,7 +25,7 @@ AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/test # Test programs. These are our main targets. # -TEST_PROG_PARA=t_mpi testphdf5 t_cache t_pflush1 t_pflush2 t_pshutdown t_prestart t_init_term t_shapesame +TEST_PROG_PARA=t_mpi testphdf5 t_cache t_cache_image t_pflush1 t_pflush2 t_pshutdown t_prestart t_init_term t_shapesame check_PROGRAMS = $(TEST_PROG_PARA) diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c new file mode 100644 index 0000000..7283fa7 --- /dev/null +++ b/testpar/t_cache_image.c @@ -0,0 +1,2161 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the files COPYING and Copyright.html. COPYING can be found at the root * + * of the source code distribution tree; Copyright.html can be found at the * + * root level of an installed copy of the electronic HDF5 document set and * + * is linked from the top-level documents page. It can also be found at * + * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * + * access to either file, you may request a copy from help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* Programmer: John Mainzer + * 7/13/15 + * + * This file contains tests specific to the cache image + * feature implemented in H5C.c + */ +#include "h5test.h" +#include "testphdf5.h" +#include "testpar.h" +#include "cache_common.h" +#include "genall5.h" + +#define CHUNK_SIZE 10 +#define DSET_SIZE (40 * CHUNK_SIZE) +#define MAX_NUM_DSETS 256 + + +/* global variable declarations: */ + + +const char *FILENAMES[] = { + "t_cache_image_00", + "t_cache_image_01", + NULL +}; + +/* local utility function declarations */ + +static void create_data_sets(hid_t file_id, int min_dset, int max_dset); +static void delete_data_sets(hid_t file_id, int min_dset, int max_dset); + +static void open_hdf5_file(const hbool_t create_file, + const hbool_t mdci_sbem_expected, const hbool_t read_only, + const hbool_t set_mdci_fapl, const hbool_t config_fsm, + const char * hdf_file_name, const unsigned cache_image_flags, + hid_t * file_id_ptr, H5F_t ** file_ptr_ptr, H5C_t ** cache_ptr_ptr, + MPI_Comm comm, MPI_Info info, int l_facc_type, + const hbool_t all_coll_metadata_ops, const hbool_t coll_metadata_write, + const int md_write_strat); + +static void verify_data_sets(hid_t file_id, int min_dset, int max_dset); + +/* local test function declarations */ + +static hbool_t parse_flags(int argc, char * argv[], hbool_t * setup_ptr, + hbool_t display); +static void usage(void); +static unsigned construct_test_file(int test_file_index); + + +/* top level test function declarations */ +static unsigned verify_cache_image_RW(int file_name_id, + int md_write_strat, int mpi_rank); + + +/****************************************************************************/ +/***************************** Utility Functions ****************************/ +/****************************************************************************/ + +/*------------------------------------------------------------------------- + * Function: construct_test_file() + * + * Purpose: This function attempts to mimic the typical "poor man's + * parallel use case in which the file is passed between + * processes, each of which open the file, write some data, + * close the file, and then pass control on to the next + * process. + * + * In this case, we create one group for each process, and + * populate it with a "zoo" of HDF5 objects selected to + * (ideally) exercise all HDF5 on disk data structures. + * + * The end result is a test file used verify that PHDF5 + * can open a file with a cache image. + * + * Cycle of operation + * + * 1) Create a HDF5 file with the cache image FAPL entry. + * + * Verify that the cache is informed of the cache image + * FAPL entry. + * + * Set all cache image flags, forcing full functionality. + * + * 2) Create a data set in the file. + * + * 3) Close the file. + * + * 4) Open the file. + * + * Verify that the metadata cache is instructed to load + * the metadata cache image. + * + * 5) Create a data set in the file. + * + * 6) Close the file. If enough datasets have been created + * goto 7. Otherwise return to 4. + * + * 7) Open the file R/O. + * + * Verify that the file contains a metadata cache image + * superblock extension message. + * + * 8) Verify all data sets. + * + * Verify that the cache image has been loaded. + * + * 9) close the file. + * + * Return: void + * + * Programmer: John Mainzer + * 1/25/17 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +construct_test_file(int test_file_index) +{ + const char * fcn_name = "construct_test_file()"; + char filename[512]; + hbool_t show_progress = FALSE; + hid_t file_id = -1; + H5F_t *file_ptr = NULL; + H5C_t *cache_ptr = NULL; + int cp = 0; + int min_dset = 0; + int max_dset = 0; + MPI_Comm dummy_comm = MPI_COMM_WORLD; + MPI_Info dummy_info = MPI_INFO_NULL; + + pass = TRUE; + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + + /* setup the file name */ + if ( pass ) { + + HDassert(FILENAMES[test_file_index]); + + if ( h5_fixname(FILENAMES[test_file_index], H5P_DEFAULT, + filename, sizeof(filename)) + == NULL ) { + + pass = FALSE; + failure_mssg = "h5_fixname() failed.\n"; + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + + /* 1) Create a HDF5 file with the cache image FAPL entry. + * + * Verify that the cache is informed of the cache image FAPL entry. + * + * Set flags forcing full function of the cache image feature. + */ + + if ( pass ) { + + open_hdf5_file(/* create_file */ TRUE, + /* mdci_sbem_expected */ FALSE, + /* read_only */ FALSE, + /* set_mdci_fapl */ TRUE, + /* config_fsm */ TRUE, + /* hdf_file_name */ filename, + /* cache_image_flags */ H5C_CI__ALL_FLAGS, + /* file_id_ptr */ &file_id, + /* file_ptr_ptr */ &file_ptr, + /* cache_ptr_ptr */ &cache_ptr, + /* comm */ dummy_comm, + /* info */ dummy_info, + /* l_facc_type */ 0, + /* all_coll_metadata_ops */ FALSE, + /* coll_metadata_write */ FALSE, + /* md_write_strat */ 0); + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + + /* 2) Create a data set in the file. */ + + if ( pass ) { + + create_data_sets(file_id, min_dset++, max_dset++); + } + +#if H5C_COLLECT_CACHE_STATS + if ( pass ) { + + if ( cache_ptr->images_loaded != 0 ) { + + pass = FALSE; + failure_mssg = "metadata cache image block loaded(1)."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + + /* 3) Close the file. */ + + if ( pass ) { + + if ( H5Fclose(file_id) < 0 ) { + + pass = FALSE; + failure_mssg = "H5Fclose() failed.\n"; + + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + + while ( ( pass ) && ( max_dset < MAX_NUM_DSETS ) ) + { + + /* 4) Open the file. + * + * Verify that the metadata cache is instructed to load the + * metadata cache image. + */ + + if ( pass ) { + + open_hdf5_file(/* create_file */ FALSE, + /* mdci_sbem_expected */ TRUE, + /* read_only */ FALSE, + /* set_mdci_fapl */ TRUE, + /* config_fsm */ FALSE, + /* hdf_file_name */ filename, + /* cache_image_flags */ H5C_CI__ALL_FLAGS, + /* file_id_ptr */ &file_id, + /* file_ptr_ptr */ &file_ptr, + /* cache_ptr_ptr */ &cache_ptr, + /* comm */ dummy_comm, + /* info */ dummy_info, + /* l_facc_type */ 0, + /* all_coll_metadata_ops */ FALSE, + /* coll_metadata_write */ FALSE, + /* md_write_strat */ 0); + } + + if ( show_progress ) + HDfprintf(stdout, "%s:L1 cp = %d, max_dset = %d, pass = %d.\n", + fcn_name, cp, max_dset, pass); + + + /* 5) Create a data set in the file. */ + + if ( pass ) { + + create_data_sets(file_id, min_dset++, max_dset++); + } + +#if H5C_COLLECT_CACHE_STATS + if ( pass ) { + + if ( cache_ptr->images_loaded == 0 ) { + + pass = FALSE; + failure_mssg = "metadata cache image block not loaded(1)."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if ( show_progress ) + HDfprintf(stdout, "%s:L2 cp = %d, max_dset = %d, pass = %d.\n", + fcn_name, cp + 1, max_dset, pass); + + + /* 6) Close the file. */ + + if ( pass ) { + + if ( H5Fclose(file_id) < 0 ) { + + pass = FALSE; + failure_mssg = "H5Fclose() failed.\n"; + + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s:L3 cp = %d, max_dset = %d, pass = %d.\n", + fcn_name, cp + 2, max_dset, pass); + } /* end while */ + cp += 3; + + + /* 7) Open the file R/O. + * + * Verify that the file contains a metadata cache image + * superblock extension message. + */ + + if ( pass ) { + + open_hdf5_file(/* create_file */ FALSE, + /* mdci_sbem_expected */ TRUE, + /* read_only */ TRUE, + /* set_mdci_fapl */ FALSE, + /* config_fsm */ FALSE, + /* hdf_file_name */ filename, + /* cache_image_flags */ H5C_CI__ALL_FLAGS, + /* file_id_ptr */ &file_id, + /* file_ptr_ptr */ &file_ptr, + /* cache_ptr_ptr */ &cache_ptr, + /* comm */ dummy_comm, + /* info */ dummy_info, + /* l_facc_type */ 0, + /* all_coll_metadata_ops */ FALSE, + /* coll_metadata_write */ FALSE, + /* md_write_strat */ 0); + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + + /* 8) Open and close all data sets. + * + * Verify that the cache image has been loaded. + */ + + if ( pass ) { + + verify_data_sets(file_id, 0, max_dset - 1); + } + +#if H5C_COLLECT_CACHE_STATS + if ( pass ) { + + if ( cache_ptr->images_loaded == 0 ) { + + pass = FALSE; + failure_mssg = "metadata cache image block not loaded(2)."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 9) Close the file. */ + + if ( pass ) { + + if ( H5Fclose(file_id) < 0 ) { + + pass = FALSE; + failure_mssg = "H5Fclose() failed.\n"; + } + } + + return !pass; + +} /* construct_test_file() */ + + +/*------------------------------------------------------------------------- + * Function: create_data_sets() + * + * Purpose: If pass is TRUE on entry, create the specified data sets + * in the indicated file. + * + * Data sets and their contents must be well know, as we + * will verify that they contain the expected data later. + * + * On failure, set pass to FALSE, and set failure_mssg + * to point to an appropriate failure message. + * + * Do nothing if pass is FALSE on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 7/15/15 + * + * Modifications: + * + * Added min_dset and max_dset parameters and supporting + * code. This allows the caller to specify a range of + * datasets to create. + * JRM -- 8/20/15 + * + *------------------------------------------------------------------------- + */ + +static void +create_data_sets(hid_t file_id, int min_dset, int max_dset) +{ + const char * fcn_name = "create_data_sets()"; + char dset_name[64]; + hbool_t show_progress = FALSE; + hbool_t valid_chunk; + hbool_t verbose = FALSE; + int cp = 0; + int i, j, k, l, m; + int data_chunk[CHUNK_SIZE][CHUNK_SIZE]; + herr_t status; + hid_t dataspace_id = -1; + hid_t filespace_ids[MAX_NUM_DSETS]; + hid_t memspace_id = -1; + hid_t dataset_ids[MAX_NUM_DSETS]; + hid_t properties = -1; + hsize_t dims[2]; + hsize_t a_size[2]; + hsize_t offset[2]; + hsize_t chunk_size[2]; + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + HDassert(0 <= min_dset); + HDassert(min_dset <= max_dset); + HDassert(max_dset < MAX_NUM_DSETS); + + /* create the datasets */ + + if ( pass ) { + + i = min_dset; + + while ( ( pass ) && ( i <= max_dset ) ) + { + /* create a dataspace for the chunked dataset */ + dims[0] = DSET_SIZE; + dims[1] = DSET_SIZE; + dataspace_id = H5Screate_simple(2, dims, NULL); + + if ( dataspace_id < 0 ) { + + pass = FALSE; + failure_mssg = "H5Screate_simple() failed."; + } + + /* set the dataset creation plist to specify that the raw data is + * to be partioned into 10X10 element chunks. + */ + + if ( pass ) { + + chunk_size[0] = CHUNK_SIZE; + chunk_size[1] = CHUNK_SIZE; + properties = H5Pcreate(H5P_DATASET_CREATE); + + if ( properties < 0 ) { + + pass = FALSE; + failure_mssg = "H5Pcreate() failed."; + } + } + + if ( pass ) { + + if ( H5Pset_chunk(properties, 2, chunk_size) < 0 ) { + + pass = FALSE; + failure_mssg = "H5Pset_chunk() failed."; + } + } + + /* create the dataset */ + if ( pass ) { + + sprintf(dset_name, "/dset%03d", i); + dataset_ids[i] = H5Dcreate2(file_id, dset_name, H5T_STD_I32BE, + dataspace_id, H5P_DEFAULT, + properties, H5P_DEFAULT); + + if ( dataset_ids[i] < 0 ) { + + pass = FALSE; + failure_mssg = "H5Dcreate() failed."; + } + } + + /* get the file space ID */ + if ( pass ) { + + filespace_ids[i] = H5Dget_space(dataset_ids[i]); + + if ( filespace_ids[i] < 0 ) { + + pass = FALSE; + failure_mssg = "H5Dget_space() failed."; + } + } + + i++; + } + } + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* create the mem space to be used to read and write chunks */ + if ( pass ) { + + dims[0] = CHUNK_SIZE; + dims[1] = CHUNK_SIZE; + memspace_id = H5Screate_simple(2, dims, NULL); + + if ( memspace_id < 0 ) { + + pass = FALSE; + failure_mssg = "H5Screate_simple() failed."; + } + } + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* select in memory hyperslab */ + if ( pass ) { + + offset[0] = 0; /*offset of hyperslab in memory*/ + offset[1] = 0; + a_size[0] = CHUNK_SIZE; /*size of hyperslab*/ + a_size[1] = CHUNK_SIZE; + status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL, + a_size, NULL); + + if ( status < 0 ) { + + pass = FALSE; + failure_mssg = "H5Sselect_hyperslab() failed."; + } + } + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* initialize all datasets on a round robin basis */ + i = 0; + while ( ( pass ) && ( i < DSET_SIZE ) ) + { + j = 0; + while ( ( pass ) && ( j < DSET_SIZE ) ) + { + m = min_dset; + while ( ( pass ) && ( m <= max_dset ) ) + { + /* initialize the slab */ + for ( k = 0; k < CHUNK_SIZE; k++ ) + { + for ( l = 0; l < CHUNK_SIZE; l++ ) + { + data_chunk[k][l] = (DSET_SIZE * DSET_SIZE * m) + + (DSET_SIZE * (i + k)) + j + l; + } + } + + /* select on disk hyperslab */ + offset[0] = (hsize_t)i; /*offset of hyperslab in file*/ + offset[1] = (hsize_t)j; + a_size[0] = CHUNK_SIZE; /*size of hyperslab*/ + a_size[1] = CHUNK_SIZE; + status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET, + offset, NULL, a_size, NULL); + + if ( status < 0 ) { + + pass = FALSE; + failure_mssg = "disk H5Sselect_hyperslab() failed."; + } + + /* write the chunk to file */ + status = H5Dwrite(dataset_ids[m], H5T_NATIVE_INT, memspace_id, + filespace_ids[m], H5P_DEFAULT, data_chunk); + + if ( status < 0 ) { + + pass = FALSE; + failure_mssg = "H5Dwrite() failed."; + } + m++; + } + j += CHUNK_SIZE; + } + + i += CHUNK_SIZE; + } + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* read data from data sets and validate it */ + i = 0; + while ( ( pass ) && ( i < DSET_SIZE ) ) + { + j = 0; + while ( ( pass ) && ( j < DSET_SIZE ) ) + { + m = min_dset; + while ( ( pass ) && ( m <= max_dset ) ) + { + + /* select on disk hyperslab */ + offset[0] = (hsize_t)i; /* offset of hyperslab in file */ + offset[1] = (hsize_t)j; + a_size[0] = CHUNK_SIZE; /* size of hyperslab */ + a_size[1] = CHUNK_SIZE; + status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET, + offset, NULL, a_size, NULL); + + if ( status < 0 ) { + + pass = FALSE; + failure_mssg = "disk hyperslab create failed."; + } + + /* read the chunk from file */ + if ( pass ) { + + status = H5Dread(dataset_ids[m], H5T_NATIVE_INT, + memspace_id, filespace_ids[m], + H5P_DEFAULT, data_chunk); + + if ( status < 0 ) { + + pass = FALSE; + failure_mssg = "disk hyperslab create failed."; + } + } + + /* validate the slab */ + if ( pass ) { + + valid_chunk = TRUE; + for ( k = 0; k < CHUNK_SIZE; k++ ) + { + for ( l = 0; l < CHUNK_SIZE; l++ ) + { + if ( data_chunk[k][l] + != + ((DSET_SIZE * DSET_SIZE * m) + + (DSET_SIZE * (i + k)) + j + l) ) { + + valid_chunk = FALSE; + + if ( verbose ) { + + HDfprintf(stdout, + "data_chunk[%0d][%0d] = %0d, expect %0d.\n", + k, l, data_chunk[k][l], + ((DSET_SIZE * DSET_SIZE * m) + + (DSET_SIZE * (i + k)) + j + l)); + HDfprintf(stdout, + "m = %d, i = %d, j = %d, k = %d, l = %d\n", + m, i, j, k, l); + } + } + } + } + + if ( ! valid_chunk ) { + + pass = FALSE; + failure_mssg = "slab validation failed."; + + if ( verbose ) { + + fprintf(stdout, + "Chunk (%0d, %0d) in /dset%03d is invalid.\n", + i, j, m); + } + } + } + m++; + } + j += CHUNK_SIZE; + } + i += CHUNK_SIZE; + } + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* close the file spaces */ + i = min_dset; + while ( ( pass ) && ( i <= max_dset ) ) + { + if ( H5Sclose(filespace_ids[i]) < 0 ) { + + pass = FALSE; + failure_mssg = "H5Sclose() failed."; + } + i++; + } + + + /* close the datasets */ + i = min_dset; + while ( ( pass ) && ( i <= max_dset ) ) + { + if ( H5Dclose(dataset_ids[i]) < 0 ) { + + pass = FALSE; + failure_mssg = "H5Dclose() failed."; + } + i++; + } + + /* close the mem space */ + if ( pass ) { + + if ( H5Sclose(memspace_id) < 0 ) { + + pass = FALSE; + failure_mssg = "H5Sclose(memspace_id) failed."; + } + } + + return; + +} /* create_data_sets() */ + + +/*------------------------------------------------------------------------- + * Function: delete_data_sets() + * + * Purpose: If pass is TRUE on entry, verify and then delete the + * dataset(s) indicated by min_dset and max_dset in the + * indicated file. + * + * Data sets and their contents must be well know, as we + * will verify that they contain the expected data later. + * + * On failure, set pass to FALSE, and set failure_mssg + * to point to an appropriate failure message. + * + * Do nothing if pass is FALSE on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 10/31/16 + * + * Modifications: + * + * None. + * JRM -- 8/20/15 + * + *------------------------------------------------------------------------- + */ + +static void +delete_data_sets(hid_t file_id, int min_dset, int max_dset) +{ + const char * fcn_name = "delete_data_sets()"; + char dset_name[64]; + hbool_t show_progress = FALSE; + int cp = 0; + int i; + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + HDassert(0 <= min_dset); + HDassert(min_dset <= max_dset); + HDassert(max_dset < MAX_NUM_DSETS); + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* first, verify the contents of the target dataset(s) */ + verify_data_sets(file_id, min_dset, max_dset); + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* now delete the target datasets */ + if ( pass ) { + + i = min_dset; + + while ( ( pass ) && ( i <= max_dset ) ) + { + sprintf(dset_name, "/dset%03d", i); + + if ( H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) { + + pass = FALSE; + failure_mssg = "H5Ldelete() failed."; + } + + i++; + } + } + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + return; + +} /* delete_data_sets() */ + + +/*------------------------------------------------------------------------- + * Function: open_hdf5_file() + * + * Purpose: If pass is true on entry, create or open the specified HDF5 + * and test to see if it has a metadata cache image superblock + * extension message. + * + * Set pass to FALSE and issue a suitable failure + * message if either the file contains a metadata cache image + * superblock extension and mdci_sbem_expected is TRUE, or + * vise versa. + * + * If mdci_sbem_expected is TRUE, also verify that the metadata + * cache has been advised of this. + * + * If read_only is TRUE, open the file read only. Otherwise + * open the file read/write. + * + * If set_mdci_fapl is TRUE, set the metadata cache image + * FAPL entry when opening the file, and verify that the + * metadata cache is notified. + * + * If config_fsm is TRUE, setup the persistant free space + * manager. Note that this flag may only be set if + * create_file is also TRUE. + * + * Return pointers to the cache data structure and file data + * structures. + * + * On failure, set pass to FALSE, and set failure_mssg + * to point to an appropriate failure message. + * + * Do nothing if pass is FALSE on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 7/14/15 + * + * Modifications: + * + * Modified function to handle parallel file creates / opens. + * + * JRM -- 2/1/17 + * + *------------------------------------------------------------------------- + */ + +static void +open_hdf5_file(const hbool_t create_file, + const hbool_t mdci_sbem_expected, + const hbool_t read_only, + const hbool_t set_mdci_fapl, + const hbool_t config_fsm, + const char * hdf_file_name, + const unsigned cache_image_flags, + hid_t * file_id_ptr, + H5F_t ** file_ptr_ptr, + H5C_t ** cache_ptr_ptr, + MPI_Comm comm, + MPI_Info info, + int l_facc_type, + const hbool_t all_coll_metadata_ops, + const hbool_t coll_metadata_write, + const int md_write_strat) +{ + const char * fcn_name = "open_hdf5_file()"; + hbool_t show_progress = FALSE; + hbool_t verbose = FALSE; + int cp = 0; + hid_t fapl_id = -1; + hid_t fcpl_id = -1; + hid_t file_id = -1; + herr_t result; + H5F_t * file_ptr = NULL; + H5C_t * cache_ptr = NULL; + H5C_cache_image_ctl_t image_ctl; + H5AC_cache_image_config_t cache_image_config = { + H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION, + TRUE, + FALSE, + H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE}; + + if ( pass ) + { + /* opening the file both read only and with a cache image + * requested is a contradiction. We resolve it by ignoring + * the cache image request silently. + */ + if ( ( create_file && mdci_sbem_expected ) || + ( create_file && read_only ) || + ( config_fsm && !create_file ) || + ( hdf_file_name == NULL ) || + ( ( set_mdci_fapl ) && ( cache_image_flags == 0 ) ) || + ( ( set_mdci_fapl ) && + ( (cache_image_flags & ~H5C_CI__ALL_FLAGS) != 0 ) ) || + ( file_id_ptr == NULL ) || + ( file_ptr_ptr == NULL ) || + ( cache_ptr_ptr == NULL ) || + ( l_facc_type != (l_facc_type & (FACC_MPIO)) ) ) { + + failure_mssg = + "Bad param(s) on entry to open_hdf5_file().\n"; + + pass = FALSE; + } else if ( verbose ) { + + HDfprintf(stdout, "%s: HDF file name = \"%s\".\n", + fcn_name, hdf_file_name); + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* create a file access propertly list. */ + if ( pass ) { + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + + if ( fapl_id < 0 ) { + + pass = FALSE; + failure_mssg = "H5Pcreate() failed.\n"; + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* call H5Pset_libver_bounds() on the fapl_id */ + if ( pass ) { + + if ( H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) + < 0 ) { + + pass = FALSE; + failure_mssg = "H5Pset_libver_bounds() failed.\n"; + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* get metadata cache image config -- verify that it is the default */ + if ( pass ) { + + result = H5Pget_mdc_image_config(fapl_id, &cache_image_config); + + if ( result < 0 ) { + + pass = FALSE; + failure_mssg = "H5Pget_mdc_image_config() failed.\n"; + } + + if ( ( cache_image_config.version != + H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) || + ( cache_image_config.generate_image != FALSE ) || + ( cache_image_config.save_resize_status != FALSE ) || + ( cache_image_config.entry_ageout != + H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ) { + + pass = FALSE; + failure_mssg = "Unexpected default cache image config.\n"; + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* set metadata cache image fapl entry if indicated */ + if ( ( pass ) && ( set_mdci_fapl ) ) { + + /* set cache image config fields to taste */ + cache_image_config.generate_image = TRUE; + cache_image_config.save_resize_status = FALSE; + cache_image_config.entry_ageout = H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE; + + result = H5Pset_mdc_image_config(fapl_id, &cache_image_config); + + if ( result < 0 ) { + + pass = FALSE; + failure_mssg = "H5Pset_mdc_image_config() failed.\n"; + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* setup the persistant free space manager if indicated */ + if ( ( pass ) && ( config_fsm ) ) { + + fcpl_id = H5Pcreate(H5P_FILE_CREATE); + + if ( fcpl_id <= 0 ) { + + pass = FALSE; + failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed."; + } + } + + if ( ( pass ) && ( config_fsm ) ) { + + if ( H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, + TRUE, (hsize_t)1) == FAIL ) { + pass = FALSE; + failure_mssg = "H5Pset_file_space_strategy() failed.\n"; + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) { + + /* set Parallel access with communicator */ + if ( H5Pset_fapl_mpio(fapl_id, comm, info) < 0 ) { + + pass = FALSE; + failure_mssg = "H5Pset_fapl_mpio() failed.\n"; + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) { + + if (H5Pset_all_coll_metadata_ops(fapl_id, all_coll_metadata_ops) < 0) { + + pass = FALSE; + failure_mssg = "H5Pset_all_coll_metadata_ops() failed.\n"; + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) { + + if ( H5Pset_coll_metadata_write(fapl_id, coll_metadata_write) < 0 ) { + + pass = FALSE; + failure_mssg = "H5Pset_coll_metadata_write() failed.\n"; + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) { + + /* set the desired parallel metadata write strategy */ + H5AC_cache_config_t mdc_config; + + mdc_config.version = H5C__CURR_AUTO_SIZE_CTL_VER; + + if ( H5Pget_mdc_config(fapl_id, &mdc_config) < 0 ) { + + pass = FALSE; + failure_mssg = "H5Pget_mdc_config() failed.\n"; + } + + mdc_config.metadata_write_strategy = md_write_strat; + + if ( H5Pset_mdc_config(fapl_id, &mdc_config) < 0 ) { + + pass = FALSE; + failure_mssg = "H5Pset_mdc_config() failed.\n"; + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* open the file */ + if ( pass ) { + + if ( create_file ) { + + if ( fcpl_id != -1 ) + + file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC, + fcpl_id, fapl_id); + else + + file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC, + H5P_DEFAULT, fapl_id); + + } else { + + if ( read_only ) + + file_id = H5Fopen(hdf_file_name, H5F_ACC_RDONLY, fapl_id); + + else + + file_id = H5Fopen(hdf_file_name, H5F_ACC_RDWR, fapl_id); + } + + if ( file_id < 0 ) { + + pass = FALSE; + failure_mssg = "H5Fcreate() or H5Fopen() failed.\n"; + + } else { + + file_ptr = (struct H5F_t *)H5I_object_verify(file_id, H5I_FILE); + + if ( file_ptr == NULL ) { + + pass = FALSE; + failure_mssg = "Can't get file_ptr."; + + if ( verbose ) { + HDfprintf(stdout, "%s: Can't get file_ptr.\n", fcn_name); + } + } + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* get a pointer to the files internal data structure and then + * to the cache structure + */ + if ( pass ) { + + if ( file_ptr->shared->cache == NULL ) { + + pass = FALSE; + failure_mssg = "can't get cache pointer(1).\n"; + + } else { + + cache_ptr = file_ptr->shared->cache; + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* verify expected metadata cache status */ + + /* get the cache image control structure from the cache, and verify + * that it contains the expected values. + * + * Then set the flags in this structure to the specified value. + */ + if ( pass ) { + + if ( H5C_get_cache_image_config(cache_ptr, &image_ctl) < 0 ) { + + pass = FALSE; + failure_mssg = "error returned by H5C_get_cache_image_config()."; + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if ( pass ) { + + if ( set_mdci_fapl ) { + + if ( read_only ) { + + if ( ( image_ctl.version != + H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) || + ( image_ctl.generate_image != FALSE ) || + ( image_ctl.save_resize_status != FALSE ) || + ( image_ctl.entry_ageout != + H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) || + ( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) { + + pass = FALSE; + failure_mssg = "Unexpected image_ctl values(1).\n"; + } + } else { + + if ( ( image_ctl.version != + H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) || + ( image_ctl.generate_image != TRUE ) || + ( image_ctl.save_resize_status != FALSE ) || + ( image_ctl.entry_ageout != + H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) || + ( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) { + + pass = FALSE; + failure_mssg = "Unexpected image_ctl values(2).\n"; + } + } + } else { + + if ( ( image_ctl.version != + H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) || + ( image_ctl.generate_image != FALSE ) || + ( image_ctl.save_resize_status != FALSE ) || + ( image_ctl.entry_ageout != + H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) || + ( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) { + + pass = FALSE; + failure_mssg = "Unexpected image_ctl values(3).\n"; + } + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if ( ( pass ) && ( set_mdci_fapl ) ) { + + image_ctl.flags = cache_image_flags; + + if ( H5C_set_cache_image_config(file_ptr, cache_ptr, &image_ctl) < 0 ) { + + pass = FALSE; + failure_mssg = "error returned by H5C_set_cache_image_config()."; + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if ( pass ) { + + if ( cache_ptr->close_warning_received == TRUE ) { + + pass = FALSE; + failure_mssg = "Unexpected value of close_warning_received.\n"; + } + + if ( mdci_sbem_expected ) { + + if ( read_only ) { + + if ( ( cache_ptr->load_image != TRUE ) || + ( cache_ptr->delete_image != FALSE ) ) { + + pass = FALSE; + failure_mssg = "mdci sb extension message not present?\n"; + } + } else { + + if ( ( cache_ptr->load_image != TRUE ) || + ( cache_ptr->delete_image != TRUE ) ) { + + pass = FALSE; + failure_mssg = "mdci sb extension message not present?\n"; + } + } + } else { + + if ( ( cache_ptr->load_image == TRUE ) || + ( cache_ptr->delete_image == TRUE ) ) { + + pass = FALSE; + failure_mssg = "mdci sb extension message present?\n"; + } + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if ( pass ) { + + *file_id_ptr = file_id; + *file_ptr_ptr = file_ptr; + *cache_ptr_ptr = cache_ptr; + } + + if ( show_progress ) { + HDfprintf(stdout, "%s: cp = %d, pass = %d -- exiting.\n", + fcn_name, cp++, pass); + + if ( ! pass ) + HDfprintf(stdout, "%s: failure_mssg = %s\n", + fcn_name, failure_mssg); + } + + return; + +} /* open_hdf5_file() */ + + +/*------------------------------------------------------------------------- + * Function: parse_flags + * + * Purpose: Parse the flags passed to this program, and load the + * values into the supplied field. + * + * Return: Success: 1 + * Failure: 0 + * + * Programmer: J Mainzer + * 4/28/11 + * + *------------------------------------------------------------------------- + */ +hbool_t +parse_flags(int argc, char * argv[], hbool_t * setup_ptr, hbool_t display) +{ + const char * fcn_name = "parse_flags()"; + const char * (ops[]) = {"setup"}; + int success = TRUE; + + if ( setup_ptr == NULL ) { + + success = FALSE; + HDfprintf(stdout, "%s: bad arg(s) on entry.\n", fcn_name); + } + + if ( ( success ) && + ( ( argc < 1 ) || ( argc > 2 ) ) ) { + + success = FALSE; + usage(); + } + + if ( success ) { + + if ( argc == 2 ) { + + if ( strcmp(argv[1], ops[0]) == 0 ) { + + *setup_ptr = TRUE; + + } else { + + success = FALSE; + usage(); + } + } else { + + *setup_ptr = FALSE; + } + } + + if ( ( success ) && ( display ) ) { + + if ( *setup_ptr ) + HDfprintf(stdout, "t_cache_image setup\n"); + else + HDfprintf(stdout, "t_cache_image\n"); + } + + return(success); + +} /* parse_flags() */ + + +/*------------------------------------------------------------------------- + * Function: usage + * + * Purpose: Display a brief message describing the purpose and use + * of the program. + * + * Return: void + * + * Programmer: John Mainzer + * 4/28/11 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +void +usage(void) +{ + const char * s[] = + { + "\n", + "t_cache_image:\n", + "\n", + "Run the parallel cache image tests. \n" + "\n" + "In general, this program is run via MPI. However, at present, files\n" + "with cache images can only be constructed by serial processes.\n", + "\n", + "To square this circle, one process in the parallel computation \n" + "forks a serial version of the test program to handle this detail.\n", + "The \"setup\" parameter indicates that t_cache_image is being \n", + "invokde for this purpose.\n", + "\n", + "usage: t_cache_image [setup]\n", + "\n", + "where:\n", + "\n", + " setup parameter forces creation of test file\n", + "\n", + "Returns 0 on success, 1 on failure.\n", + "\n", + NULL, + }; + int i = 0; + + while(s[i] != NULL) { + fprintf(stdout, "%s", s[i]); + i++; + } + + return; +} /* usage() */ + + +/*------------------------------------------------------------------------- + * Function: verify_data_sets() + * + * Purpose: If pass is TRUE on entry, verify that the data sets in the + * file exist and contain the expected data. + * + * Note that these data sets were created by + * create_data_sets() above. Thus any changes in that + * function must be reflected in this function, and + * vise-versa. + * + * On failure, set pass to FALSE, and set failure_mssg + * to point to an appropriate failure message. + * + * Do nothing if pass is FALSE on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 7/15/15 + * + * Modifications: + * + * Added min_dset and max_dset parameters and supporting + * code. This allows the caller to specify a range of + * datasets to verify. + * JRM -- 8/20/15 + * + *------------------------------------------------------------------------- + */ + +static void +verify_data_sets(hid_t file_id, int min_dset, int max_dset) +{ + const char * fcn_name = "verify_data_sets()"; + char dset_name[64]; + hbool_t show_progress = FALSE; + hbool_t valid_chunk; + hbool_t verbose = FALSE; + int cp = 0; + int i, j, k, l, m; + int data_chunk[CHUNK_SIZE][CHUNK_SIZE]; + herr_t status; + hid_t filespace_ids[MAX_NUM_DSETS]; + hid_t memspace_id = -1; + hid_t dataset_ids[MAX_NUM_DSETS]; + hsize_t dims[2]; + hsize_t a_size[2]; + hsize_t offset[2]; + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + HDassert(0 <= min_dset); + HDassert(min_dset <= max_dset); + HDassert(max_dset < MAX_NUM_DSETS); + + /* open the datasets */ + + if ( pass ) { + + i = min_dset; + + while ( ( pass ) && ( i <= max_dset ) ) + { + /* open the dataset */ + if ( pass ) { + + sprintf(dset_name, "/dset%03d", i); + dataset_ids[i] = H5Dopen2(file_id, dset_name, H5P_DEFAULT); + + if ( dataset_ids[i] < 0 ) { + + pass = FALSE; + failure_mssg = "H5Dopen2() failed."; + } + } + + /* get the file space ID */ + if ( pass ) { + + filespace_ids[i] = H5Dget_space(dataset_ids[i]); + + if ( filespace_ids[i] < 0 ) { + + pass = FALSE; + failure_mssg = "H5Dget_space() failed."; + } + } + + i++; + } + } + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* create the mem space to be used to read and write chunks */ + if ( pass ) { + + dims[0] = CHUNK_SIZE; + dims[1] = CHUNK_SIZE; + memspace_id = H5Screate_simple(2, dims, NULL); + + if ( memspace_id < 0 ) { + + pass = FALSE; + failure_mssg = "H5Screate_simple() failed."; + } + } + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* select in memory hyperslab */ + if ( pass ) { + + offset[0] = 0; /*offset of hyperslab in memory*/ + offset[1] = 0; + a_size[0] = CHUNK_SIZE; /*size of hyperslab*/ + a_size[1] = CHUNK_SIZE; + status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL, + a_size, NULL); + + if ( status < 0 ) { + + pass = FALSE; + failure_mssg = "H5Sselect_hyperslab() failed."; + } + } + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + + /* read data from data sets and validate it */ + i = 0; + while ( ( pass ) && ( i < DSET_SIZE ) ) + { + j = 0; + while ( ( pass ) && ( j < DSET_SIZE ) ) + { + m = min_dset; + while ( ( pass ) && ( m <= max_dset ) ) + { + + /* select on disk hyperslab */ + offset[0] = (hsize_t)i; /* offset of hyperslab in file */ + offset[1] = (hsize_t)j; + a_size[0] = CHUNK_SIZE; /* size of hyperslab */ + a_size[1] = CHUNK_SIZE; + status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET, + offset, NULL, a_size, NULL); + + if ( status < 0 ) { + + pass = FALSE; + failure_mssg = "disk hyperslab create failed."; + } + + /* read the chunk from file */ + if ( pass ) { + + status = H5Dread(dataset_ids[m], H5T_NATIVE_INT, + memspace_id, filespace_ids[m], + H5P_DEFAULT, data_chunk); + + if ( status < 0 ) { + + pass = FALSE; + failure_mssg = "disk hyperslab create failed."; + } + } + + /* validate the slab */ + if ( pass ) { + + valid_chunk = TRUE; + for ( k = 0; k < CHUNK_SIZE; k++ ) + { + for ( l = 0; l < CHUNK_SIZE; l++ ) + { + if ( data_chunk[k][l] + != + ((DSET_SIZE * DSET_SIZE * m) + + (DSET_SIZE * (i + k)) + j + l) ) { + + valid_chunk = FALSE; + + if ( verbose ) { + + HDfprintf(stdout, + "data_chunk[%0d][%0d] = %0d, expect %0d.\n", + k, l, data_chunk[k][l], + ((DSET_SIZE * DSET_SIZE * m) + + (DSET_SIZE * (i + k)) + j + l)); + HDfprintf(stdout, + "m = %d, i = %d, j = %d, k = %d, l = %d\n", + m, i, j, k, l); + } + } + } + } + + if ( ! valid_chunk ) { + + pass = FALSE; + failure_mssg = "slab validation failed."; + + if ( verbose ) { + + fprintf(stdout, + "Chunk (%0d, %0d) in /dset%03d is invalid.\n", + i, j, m); + } + } + } + m++; + } + j += CHUNK_SIZE; + } + i += CHUNK_SIZE; + } + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* close the file spaces */ + i = min_dset; + while ( ( pass ) && ( i <= max_dset ) ) + { + if ( H5Sclose(filespace_ids[i]) < 0 ) { + + pass = FALSE; + failure_mssg = "H5Sclose() failed."; + } + i++; + } + + + /* close the datasets */ + i = min_dset; + while ( ( pass ) && ( i <= max_dset ) ) + { + if ( H5Dclose(dataset_ids[i]) < 0 ) { + + pass = FALSE; + failure_mssg = "H5Dclose() failed."; + } + i++; + } + + /* close the mem space */ + if ( pass ) { + + if ( H5Sclose(memspace_id) < 0 ) { + + pass = FALSE; + failure_mssg = "H5Sclose(memspace_id) failed."; + } + } + + return; + +} /* verify_data_sets() */ + + +/****************************************************************************/ +/******************************* Test Functions *****************************/ +/****************************************************************************/ + +/*------------------------------------------------------------------------- + * Function: verify_cache_image_RW() + * + * Purpose: Verify that a HDF5 file containing a cache image is + * opened and read correctly by PHDF5 with the specified + * metadata write strategy. + * + * Basic cycle of operation is as follows: + * + * 1) Open the test file created at the beginning of this + * test. + * + * Verify that the file contains a cache image. + * + * Verify that only process 0 reads the cache image. + * + * Verify that all other processes receive the cache + * image block from process 0. + * + * 2) Verify that the file contains the expected data. + * + * 3) Close the file. + * + * 4) Open the file, and verify that it doesn't contain + * a cache image. + * + * 5) Verify that the file contains the expected data. + * + * 6) Close the file. + * + * 7) Delete the file. + * + * Return: void + * + * Programmer: John Mainzer + * 1/25/17 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank) +{ + const char * fcn_name = "verify_cache_imageRW()"; + char filename[512]; + hbool_t show_progress = FALSE; + hid_t file_id = -1; + H5F_t *file_ptr = NULL; + H5C_t *cache_ptr = NULL; + int cp = 0; + int i; + + pass = TRUE; + + if ( mpi_rank == 0 ) { + + switch(md_write_strat) { + + case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY: + TESTING("parallel CI load test -- proc0 md write -- R/W"); + break; + + case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED: + TESTING("parallel CI load test -- dist md write -- R/W"); + break; + + default: + TESTING("parallel CI load test -- unknown md write -- R/W"); + pass = FALSE; + break; + } + } + + show_progress = ( ( show_progress ) && ( mpi_rank == 0 ) ); + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + + /* setup the file name */ + if ( pass ) { + + if ( h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT, + filename, sizeof(filename)) == NULL ) { + + pass = FALSE; + failure_mssg = "h5_fixname() failed.\n"; + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + + /* 1) Open the test file created at the beginning of this test. + * + * Verify that the file contains a cache image. + * + * Verify that only process 0 reads the cache image. + * + * Verify that all other processes receive the cache + * image block from process 0. + */ + + if ( pass ) { + + open_hdf5_file(/* create_file */ FALSE, + /* mdci_sbem_expected */ TRUE, + /* read_only */ FALSE, + /* set_mdci_fapl */ FALSE, + /* config_fsm */ FALSE, + /* hdf_file_name */ filename, + /* cache_image_flags */ H5C_CI__ALL_FLAGS, + /* file_id_ptr */ &file_id, + /* file_ptr_ptr */ &file_ptr, + /* cache_ptr_ptr */ &cache_ptr, + /* comm */ MPI_COMM_WORLD, + /* info */ MPI_INFO_NULL, + /* l_facc_type */ FACC_MPIO, + /* all_coll_metadata_ops */ FALSE, + /* coll_metadata_write */ FALSE, + /* md_write_strat */ md_write_strat); + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* Verify that only process 0 reads the cache image. */ + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* Verify that all other processes receive the cache image block + * from process 0. + */ + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + + /* 2) Verify that the file contains the expected data. */ + if ( pass ) { + + verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1); + } + +#if H5C_COLLECT_CACHE_STATS + if ( pass ) { + + if ( cache_ptr->images_loaded == 0 ) { + + pass = FALSE; + failure_mssg = "metadata cache image block not loaded(2)."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + + /* 3) Close the file. */ + + if ( pass ) { + + if ( H5Fclose(file_id) < 0 ) { + + pass = FALSE; + failure_mssg = "H5Fclose() failed.\n"; + + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + + /* 4) Open the file, and verify that it doesn't contain a cache image. */ + + if ( pass ) { + + open_hdf5_file(/* create_file */ FALSE, + /* mdci_sbem_expected */ FALSE, + /* read_only */ FALSE, + /* set_mdci_fapl */ FALSE, + /* config_fsm */ FALSE, + /* hdf_file_name */ filename, + /* cache_image_flags */ H5C_CI__ALL_FLAGS, + /* file_id_ptr */ &file_id, + /* file_ptr_ptr */ &file_ptr, + /* cache_ptr_ptr */ &cache_ptr, + /* comm */ MPI_COMM_WORLD, + /* info */ MPI_INFO_NULL, + /* l_facc_type */ FACC_MPIO, + /* all_coll_metadata_ops */ FALSE, + /* coll_metadata_write */ FALSE, + /* md_write_strat */ md_write_strat); + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + + /* 5) Verify that the file contains the expected data. */ + + if ( pass ) { + + verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1); + } + +#if H5C_COLLECT_CACHE_STATS + if ( pass ) { + + if ( cache_ptr->images_loaded != 0 ) { + + pass = FALSE; + failure_mssg = "metadata cache image block loaded(1)."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + + /* 6) Close the file. */ + + if ( pass ) { + + if ( H5Fclose(file_id) < 0 ) { + + pass = FALSE; + failure_mssg = "H5Fclose() failed.\n"; + + } + } + + if ( show_progress ) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + + /* 7) Delete the file. */ + + if ( pass ) { + + /* wait for everyone to close the file */ + MPI_Barrier(MPI_COMM_WORLD); + + if ( ( mpi_rank == 0 ) && ( HDremove(filename) < 0 ) ) { + + pass = FALSE; + failure_mssg = "HDremove() failed.\n"; + } + } + + + /* report results */ + if ( mpi_rank == 0 ) { + + if ( pass ) { + + PASSED(); + + } else { + + H5_FAILED(); + + if ( show_progress ) + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", failure_mssg); + } + } + + + return !pass; + +} /* verify_cache_imageRW() */ + + +/*------------------------------------------------------------------------- + * Function: main + * + * Purpose: Run parallel tests on the cache image feature. + * + * At present, cache image is disabled in parallel, and + * thus these tests are restructed to verifying that a + * file with a cache image can be opened in the parallel + * case, and verifying that instructions to create a + * cache image are ignored in the parallel case. + * + * WARNING: This test uses fork() and execve(), and + * therefore will not run on Windows. + * + * Return: Success: 0 + * + * Failure: 1 + * + * Programmer: John Mainzer + * 1/25/17 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +int +main(int argc, char **argv) +{ + hbool_t setup = FALSE; + unsigned nerrs = 0; + int i; + int mpi_size; + int mpi_rank; + + if ( ! parse_flags(argc, argv, &setup, FALSE) ) + exit(1); /* exit now if unable to parse flags */ + + if ( setup ) { /* construct test file and exit */ + + H5open(); + HDfprintf(stdout, "Constructing test files: \n"); + HDfflush(stdout); + + i = 0; + while ( FILENAMES[i] != NULL ) { + + HDfprintf(stdout, " writing %s ... ", FILENAMES[i]); + HDfflush(stdout); + construct_test_file(i); + + if ( pass ) { + + printf("done.\n"); + HDfflush(stdout); + + } else { + + printf("failed.\n"); + exit(1); + } + i++; + } + HDfprintf(stdout, "Test file construction complete.\n"); + exit(0); + } + + HDassert(!setup); + + MPI_Init(&argc, &argv); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Attempt to turn off atexit post processing so that in case errors + * happen during the test and the process is aborted, it will not get + * hang in the atexit post processing in which it may try to make MPI + * calls. By then, MPI calls may not work. + */ + if (H5dont_atexit() < 0){ + printf("%d:Failed to turn off atexit processing. Continue.\n", + mpi_rank); + }; + + H5open(); + + if ( mpi_rank == 0 ) { + printf("===================================\n"); + printf("Parallel metadata cache image tests\n"); + printf(" mpi_size = %d\n", mpi_size); + printf("===================================\n"); + } + + if ( mpi_size < 2 ) { + + if ( mpi_rank == 0 ) { + + printf(" Need at least 2 processes. Exiting.\n"); + } + goto finish; + } + + if ( mpi_rank == 0 ) { /* create test files */ + + int child_status; + pid_t child_pid; + + child_pid = fork(); + + if ( child_pid == 0 ) { /* this is the child process */ + + /* fun and games to shutup the compiler */ + char param0[32] = "t_cache_image"; + char param1[32] = "setup"; + char * child_argv[] = {param0, param1, NULL}; + + /* we may need to play with the path here */ + if ( execv("t_cache_image", child_argv) == -1 ) { + + HDfprintf(stdout, + "execl() of setup process failed. errno = %d(%s)\n", + errno, strerror(errno)); + exit(1); + } + + } else if ( child_pid != -1 ) { + /* this is the parent process -- wait until child is done */ + if ( -1 == waitpid(child_pid, &child_status, WUNTRACED)) { + + HDfprintf(stdout, "can't wait on setup process.\n"); + + } else if ( ! WIFEXITED(child_status) ) { + + HDfprintf(stdout, "setup process hasn't exitied.\n"); + + } else if ( WEXITSTATUS(child_status) != 0 ) { + + HDfprintf(stdout, "setup process reports failure.\n"); + + } else { + + HDfprintf(stdout, + "testfile construction complete -- proceeding with tests.\n"); + } + } else { /* fork failed */ + + HDfprintf(stdout, "can't create process to construct test file.\n"); + } + } + + /* can't start test until test file exists */ + MPI_Barrier(MPI_COMM_WORLD); + + nerrs += verify_cache_image_RW(0, + H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, mpi_rank); + nerrs += verify_cache_image_RW(1, + H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, mpi_rank); + +finish: + + /* make sure all processes are finished before final report, cleanup + * and exit. + */ + MPI_Barrier(MPI_COMM_WORLD); + + if ( mpi_rank == 0 ) { /* only process 0 reports */ + sleep(10); + printf("===================================\n"); + if ( nerrs > 0 ) { + printf("***metadata cache image tests detected %d failures***\n", + nerrs); + } + else { + printf("metadata cache image tests finished with no failures\n"); + } + printf("===================================\n"); + } + + /* takedown_derived_types(); */ + + /* close HDF5 library */ + H5close(); + + /* MPI_Finalize must be called AFTER H5close which may use MPI calls */ + MPI_Finalize(); + + /* cannot just return (nerrs) because exit code is limited to 1byte */ + return(nerrs > 0); + +} /* main() */ + diff --git a/testpar/t_file.c b/testpar/t_file.c index b2f1d5e..4a923d4 100644 --- a/testpar/t_file.c +++ b/testpar/t_file.c @@ -19,6 +19,30 @@ #include "testphdf5.h" +#include "H5PBprivate.h" +#include "H5Iprivate.h" + +/* + * This file needs to access private information from the H5F package. + */ +#define H5C_FRIEND /*suppress error about including H5Cpkg */ +#include "H5Cpkg.h" +#define H5AC_FRIEND /*suppress error about including H5ACpkg */ +#include "H5ACpkg.h" +#define H5MF_FRIEND /*suppress error about including H5MFpkg */ +#include "H5MFpkg.h" +#define H5F_FRIEND /*suppress error about including H5Fpkg */ +#define H5F_TESTING +#include "H5Fpkg.h" + +#define NUM_DSETS 5 + +int mpi_size, mpi_rank; + +static int create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy); +static int open_file(const char *filename, hid_t fapl, int metadata_write_strategy, + hsize_t page_size, size_t page_buffer_size); + /* * test file access by communicator besides COMM_WORLD. * Split COMM_WORLD into two, one (even_comm) contains the original @@ -33,7 +57,6 @@ void test_split_comm_access(void) { - int mpi_size, mpi_rank; MPI_Comm comm; MPI_Info info = MPI_INFO_NULL; int is_old, mrc; @@ -95,6 +118,595 @@ test_split_comm_access(void) } void +test_page_buffer_access(void) +{ + hid_t file_id = -1; /* File ID */ + hid_t fcpl, fapl, fapl_self; + hid_t dxpl_id = H5P_DATASET_XFER_DEFAULT; + size_t page_count = 0; + int i, num_elements = 200; + haddr_t raw_addr, meta_addr; + int *data; + H5F_t *f = NULL; + herr_t ret; /* generic return value */ + const char *filename; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + filename = (const char *)GetTestParameters(); + + if (VERBOSE_MED) + printf("Page Buffer Usage in Parallel %s\n", filename); + + fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl >= 0), "create_faccess_plist succeeded"); + fcpl = H5Pcreate(H5P_FILE_CREATE); + VRFY((fcpl >= 0), ""); + + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)0); + VRFY((ret == 0), ""); + ret = H5Pset_file_space_page_size(fcpl, sizeof(int)*100); + VRFY((ret == 0), ""); + ret = H5Pset_page_buffer_size(fapl, sizeof(int)*100000, 0, 0); + VRFY((ret == 0), ""); + + /* This should fail because collective metadata writes are not supported with page buffering */ + H5E_BEGIN_TRY { + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); + } H5E_END_TRY; + VRFY((file_id < 0), "H5Fcreate failed"); + + /* disable collective metadata writes for page buffering to work */ + ret = H5Pset_coll_metadata_write(fapl, FALSE); + VRFY((ret >= 0), ""); + + ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); + VRFY((ret == 0), ""); + ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, sizeof(int)*100, sizeof(int)*100000); + VRFY((ret == 0), ""); + + ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY); + VRFY((ret == 0), ""); + ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, sizeof(int)*100, sizeof(int)*100000); + VRFY((ret == 0), ""); + + ret = H5Pset_file_space_page_size(fcpl, sizeof(int)*100); + VRFY((ret == 0), ""); + + data = (int *) HDmalloc(sizeof(int)*(size_t)num_elements); + + /* intialize all the elements to have a value of -1 */ + for(i=0 ; i<num_elements ; i++) + data[i] = -1; + + /* MSC - why this stopped working ? */ +#if 0 + if(MAINPROCESS) { + hid_t fapl_self; + + fapl_self = create_faccess_plist(MPI_COMM_SELF, MPI_INFO_NULL, facc_type); + + ret = H5Pset_page_buffer_size(fapl_self, sizeof(int)*1000, 0, 0); + VRFY((ret == 0), ""); + /* collective metadata writes do not work with page buffering */ + ret = H5Pset_coll_metadata_write(fapl_self, FALSE); + VRFY((ret >= 0), ""); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_self); + VRFY((file_id >= 0), ""); + + /* Get a pointer to the internal file object */ + f = (H5F_t *)H5I_object(file_id); + + VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process"); + + /* allocate space for 200 raw elements */ + raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements); + VRFY((raw_addr != HADDR_UNDEF), ""); + + /* allocate space for 200 metadata elements */ + meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements); + VRFY((meta_addr != HADDR_UNDEF), ""); + + page_count = 0; + + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, H5AC_ind_read_dxpl_id, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, H5AC_ind_read_dxpl_id, data); + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*(size_t)num_elements, H5AC_rawdata_dxpl_id, data); + VRFY((ret == 0), ""); + + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update the first 50 elements */ + for(i=0 ; i<50 ; i++) + data[i] = i; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data); + H5Eprint2(H5E_DEFAULT, stderr); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data); + VRFY((ret == 0), ""); + page_count += 2; + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update the second 50 elements */ + for(i=0 ; i<50 ; i++) + data[i] = i+50; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*50), sizeof(int)*50, H5AC_rawdata_dxpl_id, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*50), sizeof(int)*50, H5AC_ind_read_dxpl_id, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update 100 - 200 */ + for(i=0 ; i<100 ; i++) + data[i] = i+100; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*100), sizeof(int)*100, H5AC_rawdata_dxpl_id, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*100), sizeof(int)*100, H5AC_ind_read_dxpl_id, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + ret = H5PB_flush(f, dxpl_id, FALSE); + VRFY((ret == 0), ""); + + /* read elements 0 - 200 */ + ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*200, H5AC_rawdata_dxpl_id, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i=0; i < 200; i++) + VRFY((data[i] == i), "Read different values than written"); + ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*200, H5AC_ind_read_dxpl_id, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i=0; i < 200; i++) + VRFY((data[i] == i), "Read different values than written"); + + /* read elements 0 - 50 */ + ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i=0; i < 50; i++) + VRFY((data[i] == i), "Read different values than written"); + ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i=0; i < 50; i++) + VRFY((data[i] == i), "Read different values than written"); + + /* close the file */ + ret = H5Fclose(file_id); + VRFY((ret >= 0), "H5Fclose succeeded"); + ret = H5Pclose(fapl_self); + VRFY((ret>=0), "H5Pclose succeeded"); + } +#endif + + MPI_Barrier(MPI_COMM_WORLD); + + if(mpi_size > 1) { + ret = H5Pset_page_buffer_size(fapl, sizeof(int)*1000, 0, 0); + VRFY((ret == 0), ""); + /* collective metadata writes do not work with page buffering */ + ret = H5Pset_coll_metadata_write(fapl, FALSE); + VRFY((ret >= 0), ""); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); + VRFY((file_id >= 0), ""); + + /* Get a pointer to the internal file object */ + f = (H5F_t *)H5I_object(file_id); + + VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process"); + + /* allocate space for 200 raw elements */ + raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements); + VRFY((raw_addr != HADDR_UNDEF), ""); + /* allocate space for 200 metadata elements */ + meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements); + VRFY((meta_addr != HADDR_UNDEF), ""); + + page_count = 0; + + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, H5AC_ind_read_dxpl_id, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*(size_t)num_elements, H5AC_rawdata_dxpl_id, data); + VRFY((ret == 0), ""); + + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update the first 50 elements */ + for(i=0 ; i<50 ; i++) + data[i] = i; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update the second 50 elements */ + for(i=0 ; i<50 ; i++) + data[i] = i+50; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*50), sizeof(int)*50, H5AC_rawdata_dxpl_id, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*50), sizeof(int)*50, H5AC_ind_read_dxpl_id, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update 100 - 200 */ + for(i=0 ; i<100 ; i++) + data[i] = i+100; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*100), sizeof(int)*100, H5AC_rawdata_dxpl_id, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*100), sizeof(int)*100, H5AC_ind_read_dxpl_id, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((ret == 0), ""); + + /* read elements 0 - 200 */ + ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*200, H5AC_rawdata_dxpl_id, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i=0; i < 200; i++) + VRFY((data[i] == i), "Read different values than written"); + ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*200, H5AC_ind_read_dxpl_id, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i=0; i < 200; i++) + VRFY((data[i] == i), "Read different values than written"); + + /* read elements 0 - 50 */ + ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i=0; i < 50; i++) + VRFY((data[i] == i), "Read different values than written"); + ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data); + VRFY((ret == 0), ""); + page_count += 1; + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i=0; i < 50; i++) + VRFY((data[i] == i), "Read different values than written"); + + MPI_Barrier(MPI_COMM_WORLD); + /* reset the first 50 elements to -1*/ + for(i=0 ; i<50 ; i++) + data[i] = -1; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* read elements 0 - 50 */ + ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i=0; i < 50; i++) + VRFY((data[i] == -1), "Read different values than written"); + ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i=0; i < 50; i++) + VRFY((data[i] == -1), "Read different values than written"); + + /* close the file */ + ret = H5Fclose(file_id); + VRFY((ret >= 0), "H5Fclose succeeded"); + } + + ret = H5Pclose(fapl); + VRFY((ret>=0), "H5Pclose succeeded"); + ret = H5Pclose(fcpl); + VRFY((ret>=0), "H5Pclose succeeded"); + + HDfree(data); + data = NULL; + MPI_Barrier(MPI_COMM_WORLD); +} + +static int +create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy) +{ + hid_t file_id, dset_id, grp_id; + hid_t sid, mem_dataspace; + hsize_t start[RANK]; + hsize_t count[RANK]; + hsize_t stride[RANK]; + hsize_t block[RANK]; + DATATYPE *data_array = NULL; + hsize_t dims[RANK], i; + hsize_t num_elements; + int k; + char dset_name[10]; + H5F_t *f = NULL; + H5C_t *cache_ptr = NULL; + H5AC_cache_config_t config; + herr_t ret; + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); + VRFY((file_id >= 0), ""); + + ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((ret == 0), ""); + + f = (H5F_t *)H5I_object(file_id); + VRFY((f != NULL), ""); + + cache_ptr = f->shared->cache; + VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), ""); + + cache_ptr->ignore_tags = TRUE; + H5C_stats__reset(cache_ptr); + config.version = H5AC__CURR_CACHE_CONFIG_VERSION; + + ret = H5AC_get_cache_auto_resize_config(cache_ptr, &config); + VRFY((ret == 0), ""); + + config.metadata_write_strategy = metadata_write_strategy; + + ret = H5AC_set_cache_auto_resize_config(cache_ptr, &config); + VRFY((ret == 0), ""); + + grp_id = H5Gcreate2(file_id, "GROUP", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((grp_id >= 0), ""); + + dims[0] = ROW_FACTOR*mpi_size; + dims[1] = COL_FACTOR*mpi_size; + sid = H5Screate_simple (RANK, dims, NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* Each process takes a slabs of rows. */ + block[0] = dims[0]/mpi_size; + block[1] = dims[1]; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = mpi_rank*block[0]; + start[1] = 0; + + num_elements = block[0] * block[1]; + /* allocate memory for data buffer */ + data_array = (DATATYPE *)HDmalloc(num_elements*sizeof(DATATYPE)); + VRFY((data_array != NULL), "data_array HDmalloc succeeded"); + /* put some trivial data in the data_array */ + for(i=0 ; i<num_elements; i++) + data_array[i] = mpi_rank + 1; + + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple (1, &num_elements, NULL); + VRFY((mem_dataspace >= 0), ""); + + for(k=0 ; k<NUM_DSETS; k++) { + sprintf(dset_name, "D1dset%d", k); + dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + ret = H5Dclose(dset_id); + VRFY((ret == 0), ""); + + sprintf(dset_name, "D2dset%d", k); + dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + ret = H5Dclose(dset_id); + VRFY((ret == 0), ""); + + sprintf(dset_name, "D3dset%d", k); + dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + ret = H5Dclose(dset_id); + VRFY((ret == 0), ""); + + sprintf(dset_name, "dset%d", k); + dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + + ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array); + VRFY((ret == 0), ""); + + ret = H5Dclose(dset_id); + VRFY((ret == 0), ""); + + HDmemset(data_array, 0, num_elements*sizeof(DATATYPE)); + dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + + ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array); + VRFY((ret == 0), ""); + + ret = H5Dclose(dset_id); + VRFY((ret == 0), ""); + + for (i=0; i < num_elements; i++) + VRFY((data_array[i] == mpi_rank+1), "Dataset Verify failed"); + + sprintf(dset_name, "D1dset%d", k); + ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT); + VRFY((ret == 0), ""); + sprintf(dset_name, "D2dset%d", k); + ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT); + VRFY((ret == 0), ""); + sprintf(dset_name, "D3dset%d", k); + ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT); + VRFY((ret == 0), ""); + } + + ret = H5Gclose(grp_id); + VRFY((ret == 0), ""); + ret = H5Fclose(file_id); + VRFY((ret == 0), ""); + ret = H5Sclose(sid); + VRFY((ret == 0), ""); + ret = H5Sclose(mem_dataspace); + VRFY((ret == 0), ""); + + MPI_Barrier(MPI_COMM_WORLD); + HDfree(data_array); + return 0; +} /* create_file */ + +static int +open_file(const char *filename, hid_t fapl, int metadata_write_strategy, + hsize_t page_size, size_t page_buffer_size) +{ + hid_t file_id, dset_id, grp_id, grp_id2; + hid_t sid, mem_dataspace; + DATATYPE *data_array = NULL; + hsize_t dims[RANK]; + hsize_t start[RANK]; + hsize_t count[RANK]; + hsize_t stride[RANK]; + hsize_t block[RANK]; + int i, k, ndims; + hsize_t num_elements; + char dset_name[10]; + H5F_t *f = NULL; + H5C_t *cache_ptr = NULL; + H5AC_cache_config_t config; + herr_t ret; + + config.version = H5AC__CURR_CACHE_CONFIG_VERSION; + ret = H5Pget_mdc_config(fapl, &config); + VRFY((ret == 0), ""); + + config.metadata_write_strategy = metadata_write_strategy; + + ret = H5Pget_mdc_config(fapl, &config); + VRFY((ret == 0), ""); + + file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl); + H5Eprint2(H5E_DEFAULT, stderr); + VRFY((file_id >= 0), ""); + + ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((ret == 0), ""); + + f = (H5F_t *)H5I_object(file_id); + VRFY((f != NULL), ""); + + cache_ptr = f->shared->cache; + VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), ""); + + MPI_Barrier(MPI_COMM_WORLD); + + VRFY((f->shared->page_buf != NULL), ""); + VRFY((f->shared->page_buf->page_size == page_size), ""); + VRFY((f->shared->page_buf->max_size == page_buffer_size), ""); + + grp_id = H5Gopen2(file_id, "GROUP", H5P_DEFAULT); + VRFY((grp_id >= 0), ""); + + dims[0] = ROW_FACTOR*mpi_size; + dims[1] = COL_FACTOR*mpi_size; + + /* Each process takes a slabs of rows. */ + block[0] = dims[0]/mpi_size; + block[1] = dims[1]; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = mpi_rank*block[0]; + start[1] = 0; + + num_elements = block[0] * block[1]; + /* allocate memory for data buffer */ + data_array = (DATATYPE *)HDmalloc(num_elements*sizeof(DATATYPE)); + VRFY((data_array != NULL), "data_array HDmalloc succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple (1, &num_elements, NULL); + VRFY((mem_dataspace >= 0), ""); + + for(k=0 ; k<NUM_DSETS; k++) { + sprintf(dset_name, "dset%d", k); + dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + + sid = H5Dget_space(dset_id); + VRFY((dset_id >= 0), "H5Dget_space succeeded"); + + ndims = H5Sget_simple_extent_dims(sid, dims, NULL); + VRFY((ndims == 2), "H5Sget_simple_extent_dims succeeded"); + VRFY(dims[0] == ROW_FACTOR*mpi_size, "Wrong dataset dimensions"); + VRFY(dims[1] == COL_FACTOR*mpi_size, "Wrong dataset dimensions"); + + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array); + VRFY((ret >= 0), ""); + + ret = H5Dclose(dset_id); + VRFY((ret >= 0), ""); + ret = H5Sclose(sid); + VRFY((ret == 0), ""); + + for (i=0; i < num_elements; i++) + VRFY((data_array[i] == mpi_rank+1), "Dataset Verify failed"); + } + + grp_id2 = H5Gcreate2(file_id, "GROUP/GROUP2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((grp_id2 >= 0), ""); + ret = H5Gclose(grp_id2); + VRFY((ret == 0), ""); + + ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((ret == 0), ""); + + MPI_Barrier(MPI_COMM_WORLD); + /* flush invalidate each ring, starting from the outermost ring and + * working inward. + */ + for ( i = 0; i < H5C__HASH_TABLE_LEN; i++ ) { + H5C_cache_entry_t * entry_ptr = NULL; + + entry_ptr = cache_ptr->index[i]; + + while ( entry_ptr != NULL ) { + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(entry_ptr->is_dirty == FALSE); + + if(!entry_ptr->is_pinned && !entry_ptr->is_protected) { + ret = H5AC_expunge_entry(f, H5AC_ind_read_dxpl_id, entry_ptr->type, entry_ptr->addr, 0); + VRFY((ret == 0), ""); + } + + entry_ptr = entry_ptr->ht_next; + } + } + MPI_Barrier(MPI_COMM_WORLD); + + grp_id2 = H5Gopen2(file_id, "GROUP/GROUP2", H5P_DEFAULT); + H5Eprint2(H5E_DEFAULT, stderr); + VRFY((grp_id2 >= 0), ""); + ret = H5Gclose(grp_id2); + H5Eprint2(H5E_DEFAULT, stderr); + VRFY((ret == 0), ""); + + ret = H5Gclose(grp_id); + VRFY((ret == 0), ""); + ret = H5Fclose(file_id); + VRFY((ret == 0), ""); + ret = H5Sclose(mem_dataspace); + VRFY((ret == 0), ""); + HDfree(data_array); + + return nerrors; +} + +void test_file_properties(void) { hid_t fid; /* HDF5 file ID */ @@ -103,7 +715,6 @@ test_file_properties(void) const char *filename; MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - int mpi_size, mpi_rank; herr_t ret; /* Generic return value */ filename = (const char *)GetTestParameters(); diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c index 7077081..39cfbbc 100644 --- a/testpar/t_mdset.c +++ b/testpar/t_mdset.c @@ -510,7 +510,7 @@ void big_dataset(void) /* Check that file of the correct size was created */ file_size = h5_get_file_size(filename, fapl); - VRFY((file_size == 2147485792ULL), "File is correct size(~2GB)"); + VRFY((file_size == 2147485696ULL), "File is correct size(~2GB)"); /* * Create >4GB HDF5 file @@ -539,7 +539,7 @@ void big_dataset(void) /* Check that file of the correct size was created */ file_size = h5_get_file_size(filename, fapl); - VRFY((file_size == 4294969440ULL), "File is correct size(~4GB)"); + VRFY((file_size == 4294969344ULL), "File is correct size(~4GB)"); /* * Create >8GB HDF5 file @@ -568,7 +568,7 @@ void big_dataset(void) /* Check that file of the correct size was created */ file_size = h5_get_file_size(filename, fapl); - VRFY((file_size == 8589936736ULL), "File is correct size(~8GB)"); + VRFY((file_size == 8589936640ULL), "File is correct size(~8GB)"); /* Close fapl */ ret = H5Pclose(fapl); diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c index c54cb5e..a58452e 100644 --- a/testpar/testphdf5.c +++ b/testpar/testphdf5.c @@ -351,6 +351,11 @@ int main(int argc, char **argv) AddTest("split", test_split_comm_access, NULL, "dataset using split communicators", PARATESTFILE); +#ifdef PB_OUT /* temporary: disable page buffering when parallel */ + AddTest("page_buffer", test_page_buffer_access, NULL, + "page buffer usage in parallel", PARATESTFILE); +#endif + AddTest("props", test_file_properties, NULL, "Coll Metadata file property settings", PARATESTFILE); diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h index 9838673..0cf5857 100644 --- a/testpar/testphdf5.h +++ b/testpar/testphdf5.h @@ -246,6 +246,7 @@ void collective_group_write(void); void independent_group_read(void); void test_fapl_mpio_dup(void); void test_split_comm_access(void); +void test_page_buffer_access(void); void dataset_atomicity(void); void dataset_writeInd(void); void dataset_writeAll(void); |