summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn Mainzer <mainzer@hdfgroup.org>2017-03-20 18:13:13 (GMT)
committerJohn Mainzer <mainzer@hdfgroup.org>2017-03-20 18:13:13 (GMT)
commit52ee0344ef077b8109123b83b0677be7f132b4dc (patch)
treec78b7d1b038cca384558f2171fbf6e2ff2460757
parentb359e8f1a2349d177e70d34560d0a089ce37c4c8 (diff)
parentb7c58f7dfca66d4caa26d7f8b318ad3514b8c46f (diff)
downloadhdf5-52ee0344ef077b8109123b83b0677be7f132b4dc.zip
hdf5-52ee0344ef077b8109123b83b0677be7f132b4dc.tar.gz
hdf5-52ee0344ef077b8109123b83b0677be7f132b4dc.tar.bz2
Merge pull request #341 in HDFFV/hdf5 from ~MAINZER/hdf5_jrm:develop to develop
* commit 'b7c58f7dfca66d4caa26d7f8b318ad3514b8c46f': Minor code changes to address comments in pull request Checkin of additions to cache image parallel test code and associated bug fixes.
-rw-r--r--src/H5AC.c13
-rw-r--r--src/H5C.c21
-rw-r--r--src/H5Fint.c16
-rw-r--r--src/H5MF.c6
-rw-r--r--src/H5PB.c12
-rw-r--r--src/H5PBprivate.h2
-rw-r--r--testpar/t_cache_image.c2021
7 files changed, 2045 insertions, 46 deletions
diff --git a/src/H5AC.c b/src/H5AC.c
index a561852..be41b6a 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -611,6 +611,7 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
/* Sanity check */
HDassert(f);
+ HDassert(f->shared);
HDassert(f->shared->cache);
#if H5AC_DUMP_STATS_ON_CLOSE
@@ -641,9 +642,17 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
/* Sanity check */
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
- /* Attempt to flush all entries from rank 0 & Bcast clean list to other ranks */
- if(H5AC__flush_entries(f, dxpl_id) < 0)
+ /* If the file was opened R/W, attempt to flush all entries
+ * from rank 0 & Bcast clean list to other ranks.
+ *
+ * Must not flush in the R/O case, as this will trigger the
+ * free space manager settle routines.
+ */
+ if ( ( H5F_ACC_RDWR & H5F_INTENT(f) ) &&
+ ( H5AC__flush_entries(f, dxpl_id) < 0 ) )
+
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush")
+
#endif /* H5_HAVE_PARALLEL */
/* Destroy the cache */
diff --git a/src/H5C.c b/src/H5C.c
index 120abb8..2ba9f2d 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -761,7 +761,10 @@ H5C_prep_for_file_close(H5F_t *f, hid_t dxpl_id)
HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create cache image")
#ifdef H5_HAVE_PARALLEL
- if(!image_generated && cache_ptr->aux_ptr != NULL && f->shared->fs_persist) {
+ if ( ( H5F_INTENT(f) & H5F_ACC_RDWR ) &&
+ ( ! image_generated ) &&
+ ( cache_ptr->aux_ptr != NULL ) &&
+ ( f->shared->fs_persist ) ) {
/* If persistent free space managers are enabled, flushing the
* metadata cache may result in the deletion, insertion, and/or
* dirtying of entries.
@@ -7295,14 +7298,20 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed,
prev_ptr = entry_ptr->aux_prev;
+ if ( ( !(entry_ptr->prefetched_dirty) )
#ifdef H5_HAVE_PARALLEL
- if(!(entry_ptr->coll_access)) {
+ && ( ! (entry_ptr->coll_access) )
#endif /* H5_HAVE_PARALLEL */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
-#ifdef H5_HAVE_PARALLEL
+ ) {
+
+ if ( H5C__flush_single_entry(f, dxpl_id, entry_ptr,
+ H5C__FLUSH_INVALIDATE_FLAG |
+ H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to flush entry")
+
} /* end if */
-#endif /* H5_HAVE_PARALLEL */
/* we are scanning the clean LRU, so the serialize function
* will not be called on any entry -- thus there is no
diff --git a/src/H5Fint.c b/src/H5Fint.c
index 794be50..444d409 100644
--- a/src/H5Fint.c
+++ b/src/H5Fint.c
@@ -1006,8 +1006,15 @@ H5F__dest(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t flush)
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "problems closing file")
+ /* Set up I/O info for operation */
+ fio_info.f = f;
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(meta_dxpl_id)))
+ HDONE_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
+ HDONE_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+
/* Shutdown the page buffer cache */
- if(H5PB_dest(f) < 0)
+ if(H5PB_dest(&fio_info) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "problems closing page buffer cache")
@@ -1027,13 +1034,6 @@ H5F__dest(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t flush)
f->shared->root_grp = NULL;
} /* end if */
- /* Set up I/O info for operation */
- fio_info.f = f;
- if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(meta_dxpl_id)))
- HDONE_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
- if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
- HDONE_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
-
/* Destroy other components of the file */
if(H5F__accum_reset(&fio_info, TRUE) < 0)
/* Push error, but keep going*/
diff --git a/src/H5MF.c b/src/H5MF.c
index 358e326..87c910c 100644
--- a/src/H5MF.c
+++ b/src/H5MF.c
@@ -1347,6 +1347,12 @@ HDfprintf(stderr, "%s: Entering: alloc_type = %u, addr = %a, size = %Hu, extra_r
HDassert(f);
HDassert(H5F_INTENT(f) & H5F_ACC_RDWR);
+ if(f->shared->first_alloc_dealloc) {
+ HDassert(! H5AC_cache_image_pending(f));
+ if(H5MF_tidy_self_referential_fsm_hack(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "tidy of self referential fsm hack failed")
+ } /* end if */
+
/* Set mapped type, treating global heap as raw data */
map_type = (alloc_type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : alloc_type;
diff --git a/src/H5PB.c b/src/H5PB.c
index c67ae59..575903d 100644
--- a/src/H5PB.c
+++ b/src/H5PB.c
@@ -468,7 +468,7 @@ H5PB__dest_cb(void *item, void H5_ATTR_UNUSED *key, void *_op_data)
/*-------------------------------------------------------------------------
* Function: H5PB_dest
*
- * Purpose: destroy the PB on the file.
+ * Purpose: Flush and destroy the PB on the file if it exists.
*
* Return: Non-negative on success/Negative on failure
*
@@ -477,20 +477,26 @@ H5PB__dest_cb(void *item, void H5_ATTR_UNUSED *key, void *_op_data)
*-------------------------------------------------------------------------
*/
herr_t
-H5PB_dest(H5F_t *f)
+H5PB_dest(const H5F_io_info2_t *fio_info)
{
herr_t ret_value = SUCCEED; /* Return value */
+ H5F_t *f; /* file pointer */
FUNC_ENTER_NOAPI(FAIL)
/* Sanity checks */
+ HDassert(fio_info);
+ f = fio_info->f;
HDassert(f);
- /* Destroy page buffer info, if there is any */
+ /* flush and destroy the page buffer, if it exists */
if(f->shared->page_buf) {
H5PB_t *page_buf = f->shared->page_buf;
H5PB_ud1_t op_data; /* Iteration context */
+ if(H5PB_flush(fio_info)<0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTFLUSH, FAIL, "can't flush page buffer")
+
/* Set up context info */
op_data.page_buf = page_buf;
diff --git a/src/H5PBprivate.h b/src/H5PBprivate.h
index 3b5dcae..7dd4071 100644
--- a/src/H5PBprivate.h
+++ b/src/H5PBprivate.h
@@ -89,7 +89,7 @@ typedef struct H5PB_t {
/* General routines */
H5_DLL herr_t H5PB_create(H5F_t *file, size_t page_buffer_size, unsigned page_buf_min_meta_perc, unsigned page_buf_min_raw_perc);
H5_DLL herr_t H5PB_flush(const H5F_io_info2_t *fio_info);
-H5_DLL herr_t H5PB_dest(H5F_t *file);
+H5_DLL herr_t H5PB_dest(const H5F_io_info2_t *fio_info);
H5_DLL herr_t H5PB_add_new_page(H5F_t *f, H5FD_mem_t type, haddr_t page_addr);
H5_DLL herr_t H5PB_update_entry(H5PB_t *page_buf, haddr_t addr, size_t size, const void *buf);
H5_DLL herr_t H5PB_remove_entry(const H5F_t *f, haddr_t addr);
diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c
index 7283fa7..a28af8e 100644
--- a/testpar/t_cache_image.c
+++ b/testpar/t_cache_image.c
@@ -25,10 +25,13 @@
#include "cache_common.h"
#include "genall5.h"
+#define TEST_FILES_TO_CONSTRUCT 2
#define CHUNK_SIZE 10
#define DSET_SIZE (40 * CHUNK_SIZE)
#define MAX_NUM_DSETS 256
-
+#define PAR_NUM_DSETS 32
+#define PAGE_SIZE (4 * 1024)
+#define PB_SIZE (64 * PAGE_SIZE)
/* global variable declarations: */
@@ -36,6 +39,7 @@
const char *FILENAMES[] = {
"t_cache_image_00",
"t_cache_image_01",
+ "t_cache_image_02",
NULL
};
@@ -45,28 +49,48 @@ static void create_data_sets(hid_t file_id, int min_dset, int max_dset);
static void delete_data_sets(hid_t file_id, int min_dset, int max_dset);
static void open_hdf5_file(const hbool_t create_file,
- const hbool_t mdci_sbem_expected, const hbool_t read_only,
- const hbool_t set_mdci_fapl, const hbool_t config_fsm,
- const char * hdf_file_name, const unsigned cache_image_flags,
- hid_t * file_id_ptr, H5F_t ** file_ptr_ptr, H5C_t ** cache_ptr_ptr,
- MPI_Comm comm, MPI_Info info, int l_facc_type,
- const hbool_t all_coll_metadata_ops, const hbool_t coll_metadata_write,
- const int md_write_strat);
+ const hbool_t mdci_sbem_expected,
+ const hbool_t read_only,
+ const hbool_t set_mdci_fapl,
+ const hbool_t config_fsm,
+ const hbool_t enable_page_buffer,
+ const char * hdf_file_name,
+ const unsigned cache_image_flags,
+ hid_t * file_id_ptr,
+ H5F_t ** file_ptr_ptr,
+ H5C_t ** cache_ptr_ptr,
+ MPI_Comm comm,
+ MPI_Info info,
+ int l_facc_type,
+ const hbool_t all_coll_metadata_ops,
+ const hbool_t coll_metadata_write,
+ const int md_write_strat);
static void verify_data_sets(hid_t file_id, int min_dset, int max_dset);
/* local test function declarations */
static hbool_t parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
- hbool_t display);
+ hbool_t * ici_ptr, int * file_idx_ptr, int * mpi_size_ptr, hbool_t display);
static void usage(void);
static unsigned construct_test_file(int test_file_index);
+static void par_create_dataset(int dset_num, hid_t file_id, int mpi_rank,
+ int mpi_size);
+static void par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank);
+static void par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank);
+static hbool_t serial_insert_cache_image(int file_name_idx, int mpi_size);
+static void serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size);
/* top level test function declarations */
+static unsigned verify_cache_image_RO(int file_name_id,
+ int md_write_strat, int mpi_rank);
static unsigned verify_cache_image_RW(int file_name_id,
int md_write_strat, int mpi_rank);
+static hbool_t smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info,
+ int mpi_rank, int mpi_size);
+
/****************************************************************************/
/***************************** Utility Functions ****************************/
@@ -187,6 +211,7 @@ construct_test_file(int test_file_index)
/* read_only */ FALSE,
/* set_mdci_fapl */ TRUE,
/* config_fsm */ TRUE,
+ /* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
/* file_id_ptr */ &file_id,
@@ -258,6 +283,7 @@ construct_test_file(int test_file_index)
/* read_only */ FALSE,
/* set_mdci_fapl */ TRUE,
/* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
/* file_id_ptr */ &file_id,
@@ -331,6 +357,7 @@ construct_test_file(int test_file_index)
/* read_only */ TRUE,
/* set_mdci_fapl */ FALSE,
/* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
/* file_id_ptr */ &file_id,
@@ -863,6 +890,8 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
*
* JRM -- 2/1/17
*
+ * Modified function to handle
+ *
*-------------------------------------------------------------------------
*/
@@ -872,6 +901,7 @@ open_hdf5_file(const hbool_t create_file,
const hbool_t read_only,
const hbool_t set_mdci_fapl,
const hbool_t config_fsm,
+ const hbool_t enable_page_buffer,
const char * hdf_file_name,
const unsigned cache_image_flags,
hid_t * file_id_ptr,
@@ -901,6 +931,8 @@ open_hdf5_file(const hbool_t create_file,
FALSE,
H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE};
+ HDassert(!create_file || config_fsm);
+
if ( pass )
{
/* opening the file both read only and with a cache image
@@ -910,6 +942,7 @@ open_hdf5_file(const hbool_t create_file,
if ( ( create_file && mdci_sbem_expected ) ||
( create_file && read_only ) ||
( config_fsm && !create_file ) ||
+ ( create_file && enable_page_buffer && ! config_fsm ) ||
( hdf_file_name == NULL ) ||
( ( set_mdci_fapl ) && ( cache_image_flags == 0 ) ) ||
( ( set_mdci_fapl ) &&
@@ -1029,9 +1062,32 @@ open_hdf5_file(const hbool_t create_file,
}
}
+ if ( ( pass ) && ( config_fsm ) ) {
+
+ if ( H5Pset_file_space_page_size(fcpl_id, PAGE_SIZE) == FAIL ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_file_space_page_size() failed.\n";
+ }
+ }
+
if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+ /* setup the page buffer if indicated */
+ if ( ( pass ) && ( enable_page_buffer ) ) {
+
+ if ( H5Pset_page_buffer_size(fapl_id, PB_SIZE, 0, 0) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_page_buffer_size() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) {
/* set Parallel access with communicator */
@@ -1162,6 +1218,31 @@ open_hdf5_file(const hbool_t create_file,
if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* verify expected page buffer status. At present, page buffering
+ * must be disabled in parallel -- hopefully this will change in the
+ * future.
+ */
+ if ( pass ) {
+
+ if ( ( file_ptr->shared->page_buf ) &&
+ ( ( ! enable_page_buffer ) || ( l_facc_type == FACC_MPIO ) ) ) {
+
+ pass = FALSE;
+ failure_mssg = "page buffer unexepectedly enabled.";
+
+ } else if ( ( file_ptr->shared->page_buf != NULL ) &&
+ ( ( enable_page_buffer ) || ( l_facc_type != FACC_MPIO ) ) ) {
+
+ pass = FALSE;
+ failure_mssg = "page buffer unexepectedly disabled.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
/* verify expected metadata cache status */
/* get the cache image control structure from the cache, and verify
@@ -1181,6 +1262,7 @@ open_hdf5_file(const hbool_t create_file,
if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
if ( pass ) {
if ( set_mdci_fapl ) {
@@ -1308,6 +1390,1244 @@ open_hdf5_file(const hbool_t create_file,
/*-------------------------------------------------------------------------
+ * Function: par_create_dataset()
+ *
+ * Purpose: Collectively create a chunked dataset, and fill it with
+ * known values.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/4/17
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+par_create_dataset(int dset_num,
+ hid_t file_id,
+ int mpi_rank,
+ int mpi_size)
+{
+ const char * fcn_name = "par_create_dataset()";
+ char dset_name[256];
+ hbool_t show_progress = FALSE;
+ hbool_t valid_chunk;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ int i, j, k, l;
+ int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE];
+ hsize_t dims[3];
+ hsize_t a_size[3];
+ hsize_t offset[3];
+ hsize_t chunk_size[3];
+ hid_t status;
+ hid_t dataspace_id = -1;
+ hid_t memspace_id = -1;
+ hid_t dset_id = -1;
+ hid_t filespace_id = -1;
+ hid_t dcpl_id = -1;
+ hid_t dxpl_id = -1;
+
+ show_progress = (show_progress && (mpi_rank == 0));
+ verbose = (verbose && (mpi_rank == 0));
+
+ sprintf(dset_name, "/dset%03d", dset_num);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+ }
+
+ if ( pass ) {
+
+ /* create a dataspace for the chunked dataset */
+ dims[0] = (hsize_t)mpi_size;
+ dims[1] = DSET_SIZE;
+ dims[2] = DSET_SIZE;
+ dataspace_id = H5Screate_simple(3, dims, NULL);
+
+ if ( dataspace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* set the dataset creation plist to specify that the raw data is
+ * to be partioned into 1X10X10 element chunks.
+ */
+
+ if ( pass ) {
+
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+
+ if ( dcpl_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_DATASET_CREATE) failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ chunk_size[0] = 1;
+ chunk_size[1] = CHUNK_SIZE;
+ chunk_size[2] = CHUNK_SIZE;
+
+ if ( H5Pset_chunk(dcpl_id, 3, chunk_size) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_chunk() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* create the dataset */
+ if ( pass ) {
+
+ dset_id = H5Dcreate2(file_id, dset_name, H5T_STD_I32BE,
+ dataspace_id, H5P_DEFAULT,
+ dcpl_id, H5P_DEFAULT);
+
+ if ( dset_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dcreate() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* get the file space ID */
+ if ( pass ) {
+
+ filespace_id = H5Dget_space(dset_id);
+
+ if ( filespace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dget_space() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* create the mem space to be used to read and write chunks */
+ if ( pass ) {
+
+ dims[0] = 1;
+ dims[1] = CHUNK_SIZE;
+ dims[2] = CHUNK_SIZE;
+ memspace_id = H5Screate_simple(3, dims, NULL);
+
+ if ( memspace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* select in memory hyperslab */
+ if ( pass ) {
+
+ offset[0] = 0; /* offset of hyperslab in memory */
+ offset[1] = 0;
+ offset[2] = 0;
+ a_size[0] = 1; /* size of hyperslab */
+ a_size[1] = CHUNK_SIZE;
+ a_size[2] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL,
+ a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sselect_hyperslab() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* setup the DXPL for collective I/O */
+ if ( pass ) {
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+
+ if ( dxpl_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ if ( H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_dxpl_mpio() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* initialize the dataset with collective writes */
+ i = 0;
+ while ( ( pass ) && ( i < DSET_SIZE ) )
+ {
+ j = 0;
+ while ( ( pass ) && ( j < DSET_SIZE ) )
+ {
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d.0, pass = %d.\n",
+ fcn_name, cp, pass);
+
+ /* initialize the slab */
+ for ( k = 0; k < CHUNK_SIZE; k++ )
+ {
+ for ( l = 0; l < CHUNK_SIZE; l++ )
+ {
+ data_chunk[0][k][l] = (DSET_SIZE * DSET_SIZE * mpi_rank) +
+ (DSET_SIZE * (i + k)) + j + l +
+ dset_num;
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d.1, pass = %d.\n",
+ fcn_name, cp, pass);
+
+ /* select on disk hyperslab */
+ offset[0] = (hsize_t)mpi_rank; /* offset of hyperslab in file */
+ offset[1] = (hsize_t)i;
+ offset[2] = (hsize_t)j;
+ a_size[0] = (hsize_t)1; /* size of hyperslab */
+ a_size[1] = CHUNK_SIZE;
+ a_size[2] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET,
+ offset, NULL, a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk H5Sselect_hyperslab() failed.";
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d.2, pass = %d.\n",
+ fcn_name, cp, pass);
+
+ /* write the chunk to file */
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace_id,
+ filespace_id, dxpl_id, data_chunk);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dwrite() failed.";
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d.3, pass = %d.\n",
+ fcn_name, cp, pass);
+
+ j += CHUNK_SIZE;
+ }
+
+ i += CHUNK_SIZE;
+ }
+
+ cp++;
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* read data from data sets and validate it */
+ i = 0;
+ while ( ( pass ) && ( i < DSET_SIZE ) )
+ {
+ j = 0;
+ while ( ( pass ) && ( j < DSET_SIZE ) )
+ {
+ /* select on disk hyperslab */
+ offset[0] = (hsize_t)mpi_rank;
+ offset[1] = (hsize_t)i; /* offset of hyperslab in file */
+ offset[2] = (hsize_t)j;
+ a_size[0] = (hsize_t)1;
+ a_size[1] = CHUNK_SIZE; /* size of hyperslab */
+ a_size[2] = CHUNK_SIZE;
+
+ status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET,
+ offset, NULL, a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
+ }
+
+ /* read the chunk from file */
+ if ( pass ) {
+
+ status = H5Dread(dset_id, H5T_NATIVE_INT,
+ memspace_id, filespace_id,
+ dxpl_id, data_chunk);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "chunk read failed.";
+ }
+ }
+
+ /* validate the slab */
+ if ( pass ) {
+
+ valid_chunk = TRUE;
+ for ( k = 0; k < CHUNK_SIZE; k++ )
+ {
+ for ( l = 0; l < CHUNK_SIZE; l++ )
+ {
+ if ( data_chunk[0][k][l]
+ !=
+ ((DSET_SIZE * DSET_SIZE * mpi_rank) +
+ (DSET_SIZE * (i + k)) + j + l + dset_num) ) {
+
+ valid_chunk = FALSE;
+
+ if ( verbose ) {
+
+ HDfprintf(stdout,
+ "data_chunk[%0d][%0d] = %0d, expect %0d.\n",
+ k, l, data_chunk[0][k][l],
+ ((DSET_SIZE * DSET_SIZE * mpi_rank) +
+ (DSET_SIZE * (i + k)) + j + l + dset_num));
+ HDfprintf(stdout,
+ "dset_num = %d, i = %d, j = %d, k = %d, l = %d\n",
+ dset_num, i, j, k, l);
+ }
+ }
+ }
+ }
+
+ if ( ! valid_chunk ) {
+
+ pass = FALSE;
+ failure_mssg = "slab validation failed.";
+
+ if ( verbose ) {
+
+ fprintf(stdout,
+ "Chunk (%0d, %0d) in /dset%03d is invalid.\n",
+ i, j, dset_num);
+ }
+ }
+ }
+ j += CHUNK_SIZE;
+ }
+ i += CHUNK_SIZE;
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* close the data space */
+ if ( ( pass ) && ( H5Sclose(dataspace_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(dataspace_id) failed.";
+ }
+
+ /* close the file space */
+ if ( ( pass ) && ( H5Sclose(filespace_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(filespace_id) failed.";
+ }
+
+ /* close the dataset */
+ if ( ( pass ) && ( H5Dclose(dset_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dclose(dset_id) failed.";
+ }
+
+ /* close the mem space */
+ if ( ( pass ) && ( H5Sclose(memspace_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(memspace_id) failed.";
+ }
+
+ /* close the dataset creation property list */
+ if ( ( pass ) && ( H5Pclose(dcpl_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pclose(dcpl) failed.";
+ }
+
+ /* close the data access property list */
+ if ( ( pass ) && ( H5Pclose(dxpl_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pclose(dxpl) failed.";
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ return;
+
+} /* par_create_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: par_delete_dataset()
+ *
+ * Purpose: Collectively delete the specified dataset.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/6/17
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+par_delete_dataset(int dset_num,
+ hid_t file_id,
+ int mpi_rank)
+{
+ const char * fcn_name = "par_delete_dataset()";
+ char dset_name[256];
+ hbool_t show_progress = FALSE;
+ int cp = 0;
+
+ show_progress = (show_progress && (mpi_rank == 0));
+
+ sprintf(dset_name, "/dset%03d", dset_num);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+ }
+
+ /* verify the target dataset */
+ if ( pass ) {
+
+ par_verify_dataset(dset_num, file_id, mpi_rank);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* delete the target dataset */
+ if ( pass ) {
+
+ if ( H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5Ldelete() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ return;
+
+} /* par_delete_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: par_insert_cache_image()
+ *
+ * Purpose: Insert a cache image in the supplied file.
+ *
+ * At present, cache image is not enabled in the parallel
+ * so we have to insert the cache image with a serial
+ * process. Do this via a fork and an execv from process 0.
+ * All processes wait until the child process completes, and
+ * then return.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/8/17
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
+{
+ hbool_t show_progress = FALSE;
+
+ if ( pass ) {
+
+ if ( mpi_rank == 0 ) { /* insert cache image in supplied test file */
+
+ char file_name_idx_str[32];
+ char mpi_size_str[32];
+ int child_status;
+ pid_t child_pid;
+
+ sprintf(file_name_idx_str, "%d", file_name_idx);
+ sprintf(mpi_size_str, "%d", mpi_size);
+
+ child_pid = fork();
+
+ if ( child_pid == 0 ) { /* this is the child process */
+
+ /* fun and games to shutup the compiler */
+ char param0[32] = "t_cache_image";
+ char param1[32] = "ici";
+ char * child_argv[] = {param0,
+ param1,
+ file_name_idx_str,
+ mpi_size_str,
+ NULL};
+
+ /* we may need to play with the path here */
+ if ( execv("t_cache_image", child_argv) == -1 ) {
+
+ HDfprintf(stdout,
+ "execl() of ici process failed. errno = %d(%s)\n",
+ errno, strerror(errno));
+ exit(1);
+ }
+
+ } else if ( child_pid != -1 ) {
+ /* this is the parent process -- wait until child is done */
+ if ( -1 == waitpid(child_pid, &child_status, WUNTRACED)) {
+
+ HDfprintf(stdout, "can't wait on ici process.\n");
+ pass = FALSE;
+
+ } else if ( ! WIFEXITED(child_status) ) {
+
+ HDfprintf(stdout, "ici process hasn't exitied.\n");
+ pass = FALSE;
+
+ } else if ( WEXITSTATUS(child_status) != 0 ) {
+
+ HDfprintf(stdout, "ici process reports failure.\n");
+ pass = FALSE;
+
+ } else if ( show_progress ) {
+
+ HDfprintf(stdout, "cache image insertion complete.\n");
+ }
+ } else { /* fork failed */
+
+ HDfprintf(stdout,
+ "can't create process to insert cache image.\n");
+ pass = FALSE;
+ }
+ }
+ }
+
+ if ( pass ) {
+
+ /* make sure insertion of the cache image is complete
+ * before proceeding
+ */
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ return;
+
+} /* par_insert_cache_image() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: par_verify_dataset()
+ *
+ * Purpose: Collectively verify the contents of a chunked dataset.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/6/17
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+par_verify_dataset(int dset_num,
+ hid_t file_id,
+ int mpi_rank)
+{
+ const char * fcn_name = "par_verify_dataset()";
+ char dset_name[256];
+ hbool_t show_progress = FALSE;
+ hbool_t valid_chunk;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ int i, j, k, l;
+ int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE];
+ hsize_t dims[3];
+ hsize_t a_size[3];
+ hsize_t offset[3];
+ hid_t status;
+ hid_t memspace_id = -1;
+ hid_t dset_id = -1;
+ hid_t filespace_id = -1;
+ hid_t dxpl_id = -1;
+
+ show_progress = (show_progress && (mpi_rank == 0));
+ verbose = (verbose && (mpi_rank == 0));
+
+ sprintf(dset_name, "/dset%03d", dset_num);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+ }
+
+ if ( pass ) {
+
+ /* open the dataset */
+
+ dset_id = H5Dopen2(file_id, dset_name, H5P_DEFAULT);
+
+ if ( dset_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dopen2() failed.";
+ }
+ }
+
+ /* get the file space ID */
+ if ( pass ) {
+
+ filespace_id = H5Dget_space(dset_id);
+
+ if ( filespace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dget_space() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* create the mem space to be used to read */
+ if ( pass ) {
+
+ dims[0] = 1;
+ dims[1] = CHUNK_SIZE;
+ dims[2] = CHUNK_SIZE;
+ memspace_id = H5Screate_simple(3, dims, NULL);
+
+ if ( memspace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* select in memory hyperslab */
+ if ( pass ) {
+
+ offset[0] = 0; /* offset of hyperslab in memory */
+ offset[1] = 0;
+ offset[2] = 0;
+ a_size[0] = 1; /* size of hyperslab */
+ a_size[1] = CHUNK_SIZE;
+ a_size[2] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL,
+ a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sselect_hyperslab() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* setup the DXPL for collective I/O */
+ if ( pass ) {
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+
+ if ( dxpl_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ if ( H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_dxpl_mpio() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* read data from data sets and validate it */
+ i = 0;
+ while ( ( pass ) && ( i < DSET_SIZE ) )
+ {
+ j = 0;
+ while ( ( pass ) && ( j < DSET_SIZE ) )
+ {
+ /* select on disk hyperslab */
+ offset[0] = (hsize_t)mpi_rank;
+ offset[1] = (hsize_t)i; /* offset of hyperslab in file */
+ offset[2] = (hsize_t)j;
+ a_size[0] = (hsize_t)1;
+ a_size[1] = CHUNK_SIZE; /* size of hyperslab */
+ a_size[2] = CHUNK_SIZE;
+
+ status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET,
+ offset, NULL, a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
+ }
+
+ /* read the chunk from file */
+ if ( pass ) {
+
+ status = H5Dread(dset_id, H5T_NATIVE_INT,
+ memspace_id, filespace_id,
+ dxpl_id, data_chunk);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "chunk read failed.";
+ }
+ }
+
+ /* validate the slab */
+ if ( pass ) {
+
+ valid_chunk = TRUE;
+ for ( k = 0; k < CHUNK_SIZE; k++ )
+ {
+ for ( l = 0; l < CHUNK_SIZE; l++ )
+ {
+ if ( data_chunk[0][k][l]
+ !=
+ ((DSET_SIZE * DSET_SIZE * mpi_rank) +
+ (DSET_SIZE * (i + k)) + j + l + dset_num) ) {
+
+ valid_chunk = FALSE;
+
+ if ( verbose ) {
+
+ HDfprintf(stdout,
+ "data_chunk[%0d][%0d] = %0d, expect %0d.\n",
+ k, l, data_chunk[0][k][l],
+ ((DSET_SIZE * DSET_SIZE * mpi_rank) +
+ (DSET_SIZE * (i + k)) + j + l + dset_num));
+ HDfprintf(stdout,
+ "dset_num = %d, i = %d, j = %d, k = %d, l = %d\n",
+ dset_num, i, j, k, l);
+ }
+ }
+ }
+ }
+
+ if ( ! valid_chunk ) {
+
+ pass = FALSE;
+ failure_mssg = "slab validation failed.";
+
+ if ( verbose ) {
+
+ fprintf(stdout,
+ "Chunk (%0d, %0d) in /dset%03d is invalid.\n",
+ i, j, dset_num);
+ }
+ }
+ }
+ j += CHUNK_SIZE;
+ }
+ i += CHUNK_SIZE;
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* close the file space */
+ if ( ( pass ) && ( H5Sclose(filespace_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(filespace_id) failed.";
+ }
+
+ /* close the dataset */
+ if ( ( pass ) && ( H5Dclose(dset_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dclose(dset_id) failed.";
+ }
+
+ /* close the mem space */
+ if ( ( pass ) && ( H5Sclose(memspace_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(memspace_id) failed.";
+ }
+
+ /* close the data access property list */
+ if ( ( pass ) && ( H5Pclose(dxpl_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pclose(dxpl) failed.";
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ return;
+
+} /* par_verify_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: serial_insert_cache_image()
+ *
+ * Purpose: Insert a cache image in the supplied file.
+ *
+ * To populate the cache image, validate the contents
+ * of the file before closing.
+ *
+ * On failure, print an appropriate error message and
+ * return FALSE.
+ *
+ * Return: TRUE if succussful, FALSE otherwise.
+ *
+ * Programmer: John Mainzer
+ * 3/8/17
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static hbool_t
+serial_insert_cache_image(int file_name_idx, int mpi_size )
+{
+ const char * fcn_name = "serial_insert_cache_image()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ int cp = 0;
+ int i;
+ int num_dsets = PAR_NUM_DSETS;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ MPI_Comm dummy_comm = MPI_COMM_WORLD;
+ MPI_Info dummy_info = MPI_INFO_NULL;
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) setup the file name */
+ if ( pass ) {
+
+ HDassert(FILENAMES[file_name_idx]);
+
+ if ( h5_fixname(FILENAMES[file_name_idx], H5P_DEFAULT,
+ filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ HDfprintf(stdout, "h5_fixname() failed.\n");
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Open the PHDF5 file with the cache image FAPL entry.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr,
+ /* comm */ dummy_comm,
+ /* info */ dummy_info,
+ /* l_facc_type */ 0,
+ /* all_coll_metadata_ops */ FALSE,
+ /* coll_metadata_write */ FALSE,
+ /* md_write_strat */ 1);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Validate contents of the file */
+
+ i = 0;
+ while ( ( pass ) && ( i < num_dsets ) ) {
+
+ serial_verify_dataset(i, file_id, mpi_size);
+ i++;
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Close the file */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ return pass;
+
+} /* serial_insert_cache_image() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: serial_verify_dataset()
+ *
+ * Purpose: Verify the contents of a chunked dataset.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/6/17
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+serial_verify_dataset(int dset_num,
+ hid_t file_id,
+ int mpi_size)
+{
+ const char * fcn_name = "serial_verify_dataset()";
+ char dset_name[256];
+ hbool_t show_progress = FALSE;
+ hbool_t valid_chunk;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ int i, j, k, l, m;
+ int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE];
+ hsize_t dims[3];
+ hsize_t a_size[3];
+ hsize_t offset[3];
+ hid_t status;
+ hid_t memspace_id = -1;
+ hid_t dset_id = -1;
+ hid_t filespace_id = -1;
+
+ sprintf(dset_name, "/dset%03d", dset_num);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+ }
+
+ if ( pass ) {
+
+ /* open the dataset */
+
+ dset_id = H5Dopen2(file_id, dset_name, H5P_DEFAULT);
+
+ if ( dset_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dopen2() failed.";
+ }
+ }
+
+ /* get the file space ID */
+ if ( pass ) {
+
+ filespace_id = H5Dget_space(dset_id);
+
+ if ( filespace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dget_space() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* create the mem space to be used to read */
+ if ( pass ) {
+
+ dims[0] = 1;
+ dims[1] = CHUNK_SIZE;
+ dims[2] = CHUNK_SIZE;
+ memspace_id = H5Screate_simple(3, dims, NULL);
+
+ if ( memspace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* select in memory hyperslab */
+ if ( pass ) {
+
+ offset[0] = 0; /* offset of hyperslab in memory */
+ offset[1] = 0;
+ offset[2] = 0;
+ a_size[0] = 1; /* size of hyperslab */
+ a_size[1] = CHUNK_SIZE;
+ a_size[2] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL,
+ a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sselect_hyperslab() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* read data from data sets and validate it */
+ i = 0;
+ while ( ( pass ) && ( i < mpi_size ) )
+ {
+ j = 0;
+ while ( ( pass ) && ( j < DSET_SIZE ) )
+ {
+ k = 0;
+ while ( ( pass ) && ( k < DSET_SIZE ) )
+ {
+ /* select on disk hyperslab */
+ offset[0] = (hsize_t)i; /* offset of hyperslab in file */
+ offset[1] = (hsize_t)j; /* offset of hyperslab in file */
+ offset[2] = (hsize_t)k;
+ a_size[0] = (hsize_t)1;
+ a_size[1] = CHUNK_SIZE; /* size of hyperslab */
+ a_size[2] = CHUNK_SIZE;
+
+ status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET,
+ offset, NULL, a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
+ }
+
+ /* read the chunk from file */
+ if ( pass ) {
+
+ status = H5Dread(dset_id, H5T_NATIVE_INT,
+ memspace_id, filespace_id,
+ H5P_DEFAULT, data_chunk);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "chunk read failed.";
+ }
+ }
+
+ /* validate the slab */
+ if ( pass ) {
+
+ valid_chunk = TRUE;
+
+ for ( l = 0; l < CHUNK_SIZE; l++ )
+ {
+ for ( m = 0; m < CHUNK_SIZE; m++ )
+ {
+ if ( data_chunk[0][l][m]
+ !=
+ ((DSET_SIZE * DSET_SIZE * i) +
+ (DSET_SIZE * (j + l)) + k + m + dset_num) ) {
+
+ valid_chunk = FALSE;
+
+ if ( verbose ) {
+
+ HDfprintf(stdout,
+ "data_chunk[%0d][%0d] = %0d, expect %0d.\n",
+ j, k, data_chunk[0][j][k],
+ ((DSET_SIZE * DSET_SIZE * i) +
+ (DSET_SIZE * (j + l)) + k + m + dset_num));
+ HDfprintf(stdout,
+ "dset_num = %d, i = %d, j = %d, k = %d, l = %d, m = %d\n",
+ dset_num, i, j, k, l, m);
+ }
+ }
+ }
+ }
+
+ if ( ! valid_chunk ) {
+
+ pass = FALSE;
+ failure_mssg = "slab validation failed.";
+
+ if ( verbose ) {
+
+ fprintf(stdout,
+ "Chunk (%0d, %0d) in /dset%03d is invalid.\n",
+ j, k, dset_num);
+ }
+ }
+ }
+ k += CHUNK_SIZE;
+ }
+ j += CHUNK_SIZE;
+ }
+ i++;
+ }
+
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* close the file space */
+ if ( ( pass ) && ( H5Sclose(filespace_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(filespace_id) failed.";
+ }
+
+ /* close the dataset */
+ if ( ( pass ) && ( H5Dclose(dset_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dclose(dset_id) failed.";
+ }
+
+ /* close the mem space */
+ if ( ( pass ) && ( H5Sclose(memspace_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(memspace_id) failed.";
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ return;
+
+} /* serial_verify_dataset() */
+
+
+/*-------------------------------------------------------------------------
* Function: parse_flags
*
* Purpose: Parse the flags passed to this program, and load the
@@ -1321,50 +2641,80 @@ open_hdf5_file(const hbool_t create_file,
*
*-------------------------------------------------------------------------
*/
-hbool_t
-parse_flags(int argc, char * argv[], hbool_t * setup_ptr, hbool_t display)
+static hbool_t
+parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
+ hbool_t * ici_ptr, int * file_idx_ptr, int * mpi_size_ptr, hbool_t display)
{
const char * fcn_name = "parse_flags()";
- const char * (ops[]) = {"setup"};
+ const char * (ops[]) = {"setup", "ici"};
int success = TRUE;
+ HDassert(setup_ptr);
+ HDassert(*setup_ptr == FALSE);
+ HDassert(ici_ptr);
+ HDassert(*ici_ptr == FALSE);
+ HDassert(file_idx_ptr);
+ HDassert(mpi_size_ptr);
+
if ( setup_ptr == NULL ) {
success = FALSE;
HDfprintf(stdout, "%s: bad arg(s) on entry.\n", fcn_name);
}
+
if ( ( success ) &&
- ( ( argc < 1 ) || ( argc > 2 ) ) ) {
+ ( ( argc != 1 ) && ( argc != 2 ) && ( argc != 4 ) ) ) {
success = FALSE;
usage();
}
- if ( success ) {
- if ( argc == 2 ) {
+ if ( ( success ) && ( argc >= 2 ) ) {
- if ( strcmp(argv[1], ops[0]) == 0 ) {
+ if ( strcmp(argv[1], ops[0]) == 0 ) {
- *setup_ptr = TRUE;
+ if ( argc != 2 ) {
+
+ success = FALSE;
+ usage();
} else {
+ *setup_ptr = TRUE;
+
+ }
+ } else if ( strcmp(argv[1], ops[1]) == 0 ) {
+
+ if ( argc != 4 ) {
+
success = FALSE;
usage();
- }
- } else {
- *setup_ptr = FALSE;
+ } else {
+
+ *ici_ptr = TRUE;
+ *file_idx_ptr = atoi(argv[2]);
+ *mpi_size_ptr = atoi(argv[3]);
+
+ }
}
}
if ( ( success ) && ( display ) ) {
if ( *setup_ptr )
+
HDfprintf(stdout, "t_cache_image setup\n");
+
+ else if ( *ici_ptr )
+
+ HDfprintf(stdout, "t_cache_image ici %d %d\n",
+ *file_idx_ptr, *mpi_size_ptr);
+
else
+
HDfprintf(stdout, "t_cache_image\n");
}
@@ -1694,6 +3044,266 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
/****************************************************************************/
/*-------------------------------------------------------------------------
+ * Function: verify_cache_image_RO()
+ *
+ * Purpose: Verify that a HDF5 file containing a cache image is
+ * opened R/O and read correctly by PHDF5 with the specified
+ * metadata write strategy.
+ *
+ * Basic cycle of operation is as follows:
+ *
+ * 1) Open the test file created at the beginning of this
+ * test read only.
+ *
+ * Verify that the file contains a cache image.
+ *
+ * Verify that only process 0 reads the cache image.
+ *
+ * Verify that all other processes receive the cache
+ * image block from process 0.
+ *
+ * 2) Verify that the file contains the expected data.
+ *
+ * 3) Close the file.
+ *
+ * 4) Open the file R/O, and verify that it still contains
+ * a cache image.
+ *
+ * 5) Verify that the file contains the expected data.
+ *
+ * 6) Close the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/11/17
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
+{
+ const char * fcn_name = "verify_cache_image_RO()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+
+ pass = TRUE;
+
+ if ( mpi_rank == 0 ) {
+
+ switch(md_write_strat) {
+
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ TESTING("parallel CI load test -- proc0 md write -- R/O");
+ break;
+
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ TESTING("parallel CI load test -- dist md write -- R/O");
+ break;
+
+ default:
+ TESTING("parallel CI load test -- unknown md write -- R/o");
+ pass = FALSE;
+ break;
+ }
+ }
+
+ show_progress = ( ( show_progress ) && ( mpi_rank == 0 ) );
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT,
+ filename, sizeof(filename)) == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Open the test file created at the beginning of this test.
+ *
+ * Verify that the file contains a cache image.
+ *
+ * Verify that only process 0 reads the cache image.
+ *
+ * Verify that all other processes receive the cache
+ * image block from process 0.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr,
+ /* comm */ MPI_COMM_WORLD,
+ /* info */ MPI_INFO_NULL,
+ /* l_facc_type */ FACC_MPIO,
+ /* all_coll_metadata_ops */ FALSE,
+ /* coll_metadata_write */ FALSE,
+ /* md_write_strat */ md_write_strat);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* Verify that only process 0 reads the cache image. */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* Verify that all other processes receive the cache image block
+ * from process 0.
+ */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Verify that the file contains the expected data. */
+ if ( pass ) {
+
+ verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded == 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block not loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ /* 3) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Open the file, and verify that it doesn't contain a cache image. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr,
+ /* comm */ MPI_COMM_WORLD,
+ /* info */ MPI_INFO_NULL,
+ /* l_facc_type */ FACC_MPIO,
+ /* all_coll_metadata_ops */ FALSE,
+ /* coll_metadata_write */ FALSE,
+ /* md_write_strat */ md_write_strat);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Verify that the file contains the expected data. */
+
+ if ( pass ) {
+
+ verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 1 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block not loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ /* 6) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* report results */
+ if ( mpi_rank == 0 ) {
+
+ if ( pass ) {
+
+ PASSED();
+
+ } else {
+
+ H5_FAILED();
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", failure_mssg);
+ }
+ }
+
+
+ return !pass;
+
+} /* verify_cache_image_RO() */
+
+
+/*-------------------------------------------------------------------------
* Function: verify_cache_image_RW()
*
* Purpose: Verify that a HDF5 file containing a cache image is
@@ -1747,7 +3357,6 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
H5F_t *file_ptr = NULL;
H5C_t *cache_ptr = NULL;
int cp = 0;
- int i;
pass = TRUE;
@@ -1808,6 +3417,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
/* read_only */ FALSE,
/* set_mdci_fapl */ FALSE,
/* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
/* file_id_ptr */ &file_id,
@@ -1880,6 +3490,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
/* read_only */ FALSE,
/* set_mdci_fapl */ FALSE,
/* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
/* file_id_ptr */ &file_id,
@@ -1969,6 +3580,338 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
} /* verify_cache_imageRW() */
+/*****************************************************************************
+ *
+ * Function: smoke_check_1()
+ *
+ * Purpose: Initial smoke check to verify correct behaviour of cache
+ * image in combination with parallel.
+ *
+ * As cache image is currently disabled in the parallel case,
+ * we construct a test file in parallel, verify it in serial
+ * and generate a cache image in passing, and then verify
+ * it again in parallel.
+ *
+ * In passing, also verify that page buffering is silently
+ * disabled in the parallel case. Needless to say, this part
+ * of the test will have to be re-worked when and if page
+ * buffering is supported in parallel.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 3/6/17
+ *
+ *****************************************************************************/
+static hbool_t
+smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
+{
+ const char * fcn_name = "smoke_check_1()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+ int i;
+ int num_dsets = PAR_NUM_DSETS;
+ int test_file_index = 2;
+ h5_stat_size_t file_size;
+
+ pass = TRUE;
+
+ if ( mpi_rank == 0 ) {
+
+ TESTING("parallel cache image smoke check 1");
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* setup the file name */
+ if ( pass ) {
+
+ HDassert(FILENAMES[test_file_index]);
+
+ if ( h5_fixname(FILENAMES[test_file_index], H5P_DEFAULT,
+ filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a PHDF5 file without the cache image FAPL entry.
+ *
+ * Verify that a cache image is not requested
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ TRUE,
+ /* enable_page_buffer */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr,
+ /* comm */ mpi_comm,
+ /* info */ mpi_info,
+ /* l_facc_type */ FACC_MPIO,
+ /* all_coll_metadata_ops */ FALSE,
+ /* coll_metadata_write */ TRUE,
+ /* md_write_strat */ 1);
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Create datasets in the file */
+
+ i = 0;
+ while ( ( pass ) && ( i < num_dsets ) ) {
+
+ par_create_dataset(i, file_id, mpi_rank, mpi_size);
+ i++;
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Verify the datasets in the file */
+
+ i = 0;
+ while ( ( pass ) && ( i < num_dsets ) ) {
+
+ par_verify_dataset(i, file_id, mpi_rank);
+ i++;
+ }
+
+
+ /* 4) Close the file */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5 Insert a cache image into the file */
+
+ if ( pass ) {
+
+ par_insert_cache_image(test_file_index, mpi_rank, mpi_size);
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Open the file R/O */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr,
+ /* comm */ mpi_comm,
+ /* info */ mpi_info,
+ /* l_facc_type */ FACC_MPIO,
+ /* all_coll_metadata_ops */ FALSE,
+ /* coll_metadata_write */ TRUE,
+ /* md_write_strat */ 1);
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Verify the datasets in the file backwards */
+
+ i = num_dsets - 1;
+ while ( ( pass ) && ( i >= 0 ) ) {
+
+ par_verify_dataset(i, file_id, mpi_rank);
+ i--;
+ }
+
+
+ /* 8) Close the file */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.";
+
+ }
+ }
+
+
+ /* 9) Open the file */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr,
+ /* comm */ mpi_comm,
+ /* info */ mpi_info,
+ /* l_facc_type */ FACC_MPIO,
+ /* all_coll_metadata_ops */ FALSE,
+ /* coll_metadata_write */ TRUE,
+ /* md_write_strat */ 1);
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 10) Verify the datasets in the file */
+
+ i = 0;
+ while ( ( pass ) && ( i < num_dsets ) ) {
+
+ par_verify_dataset(i, file_id, mpi_rank);
+ i++;
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 11) Delete the datasets in the file */
+
+ i = 0;
+ while ( ( pass ) && ( i < num_dsets ) ) {
+
+ par_delete_dataset(i, file_id, mpi_rank);
+ i++;
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 12) Close the file */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.";
+
+ }
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 13) Get the size of the file. Verify that it is less
+ * than 20 KB. Without deletions and persistant free
+ * space managers, size size is about 30 MB, so this
+ * is sufficient to verify that the persistant free
+ * space managers are more or less doing their job.
+ *
+ * Note that this test will have to change if we use
+ * a larger page size.
+ */
+ if ( pass ) {
+
+ if ( ( file_size = h5_get_file_size(filename, H5P_DEFAULT) ) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_get_file_size() failed.";
+
+ } else if ( file_size > 20 * 1024 ) {
+
+ pass = FALSE;
+ failure_mssg = "unexpectedly large file size.";
+ }
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 14) Delete the file */
+
+ if ( pass ) {
+
+ /* wait for everyone to close the file */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if ( ( mpi_rank == 0 ) && ( HDremove(filename) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* report results */
+ if ( mpi_rank == 0 ) {
+
+ if ( pass ) {
+
+ PASSED();
+
+ } else {
+
+ H5_FAILED();
+
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n",
+ fcn_name, failure_mssg);
+ }
+ }
+
+ return !pass;
+
+} /* smoke_check_1() */
+
+
/*-------------------------------------------------------------------------
* Function: main
*
@@ -1999,22 +3942,26 @@ int
main(int argc, char **argv)
{
hbool_t setup = FALSE;
+ hbool_t ici = FALSE;
unsigned nerrs = 0;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ int file_idx;
int i;
int mpi_size;
int mpi_rank;
- if ( ! parse_flags(argc, argv, &setup, FALSE) )
+ if ( ! parse_flags(argc, argv, &setup, &ici, &file_idx, &mpi_size, FALSE) )
exit(1); /* exit now if unable to parse flags */
- if ( setup ) { /* construct test file and exit */
+ if ( setup ) { /* construct test files and exit */
H5open();
HDfprintf(stdout, "Constructing test files: \n");
HDfflush(stdout);
i = 0;
- while ( FILENAMES[i] != NULL ) {
+ while ( ( FILENAMES[i] != NULL ) && ( i < TEST_FILES_TO_CONSTRUCT ) ) {
HDfprintf(stdout, " writing %s ... ", FILENAMES[i]);
HDfflush(stdout);
@@ -2032,11 +3979,26 @@ main(int argc, char **argv)
}
i++;
}
+
HDfprintf(stdout, "Test file construction complete.\n");
exit(0);
+
+ } else if ( ici ) {
+
+ if ( serial_insert_cache_image(file_idx, mpi_size) ) {
+
+ exit(0);
+
+ } else {
+
+ HDfprintf(stderr, "\n\nCache image insertion failed.\n");
+ HDfprintf(stderr, " failure mssg = \"%s\"\n", failure_mssg);
+ exit(1);
+ }
}
HDassert(!setup);
+ HDassert(!ici);
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -2118,14 +4080,21 @@ main(int argc, char **argv)
}
}
- /* can't start test until test file exists */
+ /* can't start test until test files exist */
MPI_Barrier(MPI_COMM_WORLD);
+
+ nerrs += verify_cache_image_RO(0,
+ H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, mpi_rank);
+#if 1
+ nerrs += verify_cache_image_RO(1,
+ H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, mpi_rank);
nerrs += verify_cache_image_RW(0,
H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, mpi_rank);
nerrs += verify_cache_image_RW(1,
H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, mpi_rank);
-
+ nerrs += smoke_check_1(comm, info, mpi_rank, mpi_size);
+#endif
finish:
/* make sure all processes are finished before final report, cleanup