summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDana Robinson <derobins@hdfgroup.org>2021-05-07 19:46:03 (GMT)
committerDana Robinson <derobins@hdfgroup.org>2021-05-07 19:46:03 (GMT)
commit2f92cb5d19c0040326061fd2010bfd56c908879c (patch)
tree6a5df93bd13345e96e89d25ef0f9c49fcf956be9
parent304df18dab6e35e240399f5da538c2f497fbaa59 (diff)
downloadhdf5-2f92cb5d19c0040326061fd2010bfd56c908879c.zip
hdf5-2f92cb5d19c0040326061fd2010bfd56c908879c.tar.gz
hdf5-2f92cb5d19c0040326061fd2010bfd56c908879c.tar.bz2
Normalization of parallel
-rw-r--r--src/H5ACmpio.c17
-rw-r--r--src/H5Dmpio.c32
-rw-r--r--testpar/t_2Gio.c19
-rw-r--r--testpar/t_bigio.c130
-rw-r--r--testpar/t_cache.c28
-rw-r--r--testpar/t_cache_image.c380
-rw-r--r--testpar/t_dset.c73
-rw-r--r--testpar/t_filters_parallel.c134
-rw-r--r--testpar/t_mdset.c153
-rw-r--r--testpar/t_pread.c52
-rw-r--r--testpar/t_prop.c189
-rw-r--r--testpar/t_shapesame.c34
-rw-r--r--testpar/testphdf5.c3
-rw-r--r--testpar/testphdf5.h13
14 files changed, 509 insertions, 748 deletions
diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c
index b0b54eb..afc15d1 100644
--- a/src/H5ACmpio.c
+++ b/src/H5ACmpio.c
@@ -1271,7 +1271,7 @@ H5AC__propagate_and_apply_candidate_list(H5F_t *f)
if (aux_ptr->write_done)
(aux_ptr->write_done)();
- /* to prevent "messages from the past" we must synchronize all
+ /* To prevent "messages from the past" we must synchronize all
* processes again before we go on.
*/
if (MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm)))
@@ -1514,7 +1514,7 @@ H5AC__receive_and_apply_clean_list(H5F_t *f)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't receive clean list")
if (num_entries > 0)
- /* mark the indicated entries as clean */
+ /* Mark the indicated entries as clean */
if (H5C_mark_entries_as_clean(f, num_entries, haddr_buf_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't mark entries clean.")
@@ -1848,6 +1848,8 @@ done:
* Programmer: John Mainzer
* April 28, 2010
*
+ * Changes: None.
+ *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -1876,9 +1878,12 @@ H5AC__rsp__p0_only__flush(H5F_t *f)
* However, when flushing from within the close operation from a file,
* it's possible to skip this barrier (on the second flush of the cache).
*/
- if (!H5CX_get_mpi_file_flushing())
+ if (!H5CX_get_mpi_file_flushing()) {
+
if (MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm)))
+
HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_result)
+ }
/* Flush data to disk, from rank 0 process */
if (aux_ptr->mpi_rank == 0) {
@@ -2075,9 +2080,13 @@ H5AC__run_sync_point(H5F_t *f, int sync_point_op)
/* Sanity checks */
HDassert(f != NULL);
+
cache_ptr = f->shared->cache;
+
HDassert(cache_ptr != NULL);
+
aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr);
+
HDassert(aux_ptr != NULL);
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
HDassert((sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) ||
@@ -2157,6 +2166,7 @@ H5AC__run_sync_point(H5F_t *f, int sync_point_op)
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
done:
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC__run_sync_point() */
@@ -2252,7 +2262,6 @@ H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, unsigned num_candidates, haddr_t *ca
* request to flush all items and something was protected.
*
* Programmer: Quincey Koziol
- * koziol@hdfgroup.org
* Aug 22 2009
*
*-------------------------------------------------------------------------
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index 16a8221..448e92d 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -413,7 +413,7 @@ H5D__mpio_opt_possible(const H5D_io_info_t *io_info, const H5S_t *file_space, co
* collective I/O
*/
if (MPI_SUCCESS !=
- (mpi_code = MPI_Allreduce(&local_cause, &global_cause, 2, MPI_UNSIGNED, MPI_BOR, io_info->comm)))
+ (mpi_code = MPI_Allreduce(local_cause, global_cause, 2, MPI_UNSIGNED, MPI_BOR, io_info->comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Allreduce failed", mpi_code)
} /* end else */
@@ -1094,7 +1094,7 @@ H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *typ
#ifdef H5D_DEBUG
if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "total_chunks = %Zu, num_chunk = %Zu\n", total_chunks, num_chunk);
+ HDfprintf(H5DEBUG(D), "total_chunks = %zu, num_chunk = %zu\n", total_chunks, num_chunk);
#endif
/* Set up MPI datatype for chunks selected */
@@ -1574,7 +1574,7 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *ty
chunk_addr = (haddr_t *)H5MM_calloc(total_chunk * sizeof(haddr_t));
#ifdef H5D_DEBUG
if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "total_chunk %Zu\n", total_chunk);
+ HDfprintf(H5DEBUG(D), "total_chunk %zu\n", total_chunk);
#endif
/* Obtain IO option for each chunk */
@@ -1608,7 +1608,7 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *ty
#ifdef H5D_DEBUG
if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "mpi_rank = %d, chunk index = %Zu\n", mpi_rank, u);
+ HDfprintf(H5DEBUG(D), "mpi_rank = %d, chunk index = %zu\n", mpi_rank, u);
#endif
/* Get the chunk info for this chunk, if there are elements selected */
chunk_info = fm->select_chunk[u];
@@ -1628,7 +1628,7 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *ty
if (chunk_io_option[u] == H5D_CHUNK_IO_MODE_COL) {
#ifdef H5D_DEBUG
if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "inside collective chunk IO mpi_rank = %d, chunk index = %Zu\n",
+ HDfprintf(H5DEBUG(D), "inside collective chunk IO mpi_rank = %d, chunk index = %zu\n",
mpi_rank, u);
#endif
@@ -1667,7 +1667,7 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *ty
else { /* possible independent IO for this chunk */
#ifdef H5D_DEBUG
if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "inside independent IO mpi_rank = %d, chunk index = %Zu\n", mpi_rank,
+ HDfprintf(H5DEBUG(D), "inside independent IO mpi_rank = %d, chunk index = %zu\n", mpi_rank,
u);
#endif
@@ -2838,7 +2838,7 @@ H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t *io_info, const H5D_ty
"unable to allocate number of assigned chunks array")
for (i = 0; i < shared_chunks_info_array_num_entries;) {
- H5D_filtered_collective_io_info_t chunk_entry;
+ H5D_filtered_collective_io_info_t *chunk_entry;
haddr_t last_seen_addr = shared_chunks_info_array[i].chunk_states.chunk_current.offset;
size_t set_begin_index = i;
size_t num_writers = 0;
@@ -2846,17 +2846,17 @@ H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t *io_info, const H5D_ty
/* Process each set of duplicate entries caused by another process writing to the same chunk */
do {
- chunk_entry = shared_chunks_info_array[i];
+ chunk_entry = &shared_chunks_info_array[i];
- send_counts[chunk_entry.owners.original_owner] += (int)sizeof(chunk_entry);
+ send_counts[chunk_entry->owners.original_owner] += (int)sizeof(*chunk_entry);
/* The new owner of the chunk is determined by the process
* writing to the chunk which currently has the least amount
* of chunks assigned to it
*/
- if (num_assigned_chunks_array[chunk_entry.owners.original_owner] <
+ if (num_assigned_chunks_array[chunk_entry->owners.original_owner] <
num_assigned_chunks_array[new_chunk_owner])
- new_chunk_owner = chunk_entry.owners.original_owner;
+ new_chunk_owner = chunk_entry->owners.original_owner;
num_writers++;
} while (++i < shared_chunks_info_array_num_entries &&
@@ -2907,6 +2907,8 @@ H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t *io_info, const H5D_ty
sizeof(unsigned char *))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate modification data buffer array")
+ /* Perform all the sends on the chunks that this rank doesn't own */
+ /* (Sends and recvs must be two separate loops, to avoid deadlock) */
for (i = 0, last_assigned_idx = 0; i < *local_chunk_array_num_entries; i++) {
H5D_filtered_collective_io_info_t *chunk_entry = &local_chunk_array[i];
@@ -2965,7 +2967,13 @@ H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t *io_info, const H5D_ty
num_send_requests++;
} /* end if */
- else {
+ } /* end for */
+
+ /* Perform all the recvs on the chunks this rank owns */
+ for (i = 0, last_assigned_idx = 0; i < *local_chunk_array_num_entries; i++) {
+ H5D_filtered_collective_io_info_t *chunk_entry = &local_chunk_array[i];
+
+ if (mpi_rank == chunk_entry->owners.new_owner) {
/* Allocate all necessary buffers for an asynchronous receive operation */
if (chunk_entry->num_writers > 1) {
MPI_Message message;
diff --git a/testpar/t_2Gio.c b/testpar/t_2Gio.c
index 5a7936b..d5b9038 100644
--- a/testpar/t_2Gio.c
+++ b/testpar/t_2Gio.c
@@ -578,8 +578,9 @@ MpioTest2G(MPI_Comm comm)
MPI_Comm_rank(comm, &mpi_rank);
if (mpi_rank == 0) {
- HDprintf("Using %d process on dataset shape [%llu, %llu, %llu]\n", mpi_size, shape[0], shape[1],
- shape[2]);
+ HDprintf("Using %d process on dataset shape "
+ "[%" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE "]\n",
+ mpi_size, shape[0], shape[1], shape[2]);
}
/*
@@ -606,7 +607,7 @@ MpioTest2G(MPI_Comm comm)
tot_size_bytes *= shape[i];
}
if (mpi_rank == 0) {
- HDprintf("Dataset of %llu bytes\n", tot_size_bytes);
+ HDprintf("Dataset of %zu bytes\n", tot_size_bytes);
}
filespace = H5Screate_simple(3, shape, NULL);
VRFY((filespace >= 0), "H5Screate_simple succeeded");
@@ -681,7 +682,7 @@ MpioTest2G(MPI_Comm comm)
H5Fclose(file_id);
free(data);
- HDprintf("Proc %d - MpioTest2G test succeeded\n", mpi_rank, data_size_bytes);
+ HDprintf("Proc %d - MpioTest2G test succeeded\n", mpi_rank);
if (mpi_rank == 0)
HDremove(FILENAME[1]);
@@ -3861,9 +3862,7 @@ actual_io_mode_tests(void)
* Programmer: Jonathan Kim
* Date: Aug, 2012
*/
-#define DSET_NOCOLCAUSE "nocolcause"
-#define NELM 2
-#define FILE_EXTERNAL "nocolcause_extern.data"
+#define FILE_EXTERNAL "nocolcause_extern.data"
static void
test_no_collective_cause_mode(int selection_mode)
{
@@ -4422,9 +4421,9 @@ no_collective_cause_tests(void)
test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
#ifdef LATER /* fletcher32 */
- /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
- * H5Dwrite is ready for mpio + filter feature.
- */
+ /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
+ * H5Dwrite is ready for mpio + filter feature.
+ */
/* test_no_collective_cause_mode (TEST_FILTERS); */
test_no_collective_cause_mode_filter(TEST_FILTERS_READ);
#endif /* LATER */
diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c
index 60faf68..ed99fc4 100644
--- a/testpar/t_bigio.c
+++ b/testpar/t_bigio.c
@@ -131,12 +131,13 @@ point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], s
}
if (VERBOSE_MED) {
- HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total "
- "datapoints=%lu\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
- (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
- (unsigned long)block[0], (unsigned long)block[1],
- (unsigned long)(block[0] * block[1] * count[0] * count[1]));
+ HDprintf("start[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "count[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "stride[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "block[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "total datapoints=%" PRIuHSIZE "\n",
+ start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1],
+ block[0] * block[1] * count[0] * count[1]);
k = 0;
for (i = 0; i < num_points; i++) {
HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
@@ -157,15 +158,15 @@ dataset_print(hsize_t start[], hsize_t block[], B_DATATYPE *dataset)
/* print the column heading */
HDprintf("%-8s", "Cols:");
for (j = 0; j < block[1]; j++) {
- HDprintf("%3lu ", (unsigned long)(start[1] + j));
+ HDprintf("%3" PRIuHSIZE " ", start[1] + j);
}
HDprintf("\n");
/* print the slab data */
for (i = 0; i < block[0]; i++) {
- HDprintf("Row %2lu: ", (unsigned long)(i + start[0]));
+ HDprintf("Row %2" PRIuHSIZE ": ", i + start[0]);
for (j = 0; j < block[1]; j++) {
- HDprintf("%llu ", *dataptr++);
+ HDprintf("%" PRIuHSIZE " ", *dataptr++);
}
HDprintf("\n");
}
@@ -184,10 +185,11 @@ verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
/* print it if VERBOSE_MED */
if (VERBOSE_MED) {
HDprintf("verify_data dumping:::\n");
- HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
- (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
- (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("start(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "count(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "stride(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "block(%" PRIuHSIZE ", %" PRIuHSIZE ")\n",
+ start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1]);
HDprintf("original values:\n");
dataset_print(start, block, original);
HDprintf("compared values:\n");
@@ -199,9 +201,10 @@ verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
for (j = 0; j < block[1]; j++) {
if (*dataset != *original) {
if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
- HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %llu, got %llu\n",
- (unsigned long)i, (unsigned long)j, (unsigned long)(i + start[0]),
- (unsigned long)(j + start[1]), *(original), *(dataset));
+ HDprintf("Dataset Verify failed at [%" PRIuHSIZE "][%" PRIuHSIZE "]"
+ "(row %" PRIuHSIZE ", col %" PRIuHSIZE "): "
+ "expect %" PRIuHSIZE ", got %" PRIuHSIZE "\n",
+ i, j, i + start[0], j + start[1], *(original), *(dataset));
}
dataset++;
original++;
@@ -1101,7 +1104,7 @@ static void
single_rank_independent_io(void)
{
if (mpi_rank_g == 0)
- HDprintf("single_rank_independent_io\n");
+ HDprintf("\nSingle Rank Independent I/O\n");
if (MAIN_PROCESS) {
hsize_t dims[] = {LARGE_DIM};
@@ -1220,8 +1223,6 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
* Programmer: Unknown
* July 12th, 2004
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
@@ -1247,7 +1248,7 @@ coll_chunk1(void)
{
const char *filename = FILENAME[0];
if (mpi_rank_g == 0)
- HDprintf("coll_chunk1\n");
+ HDprintf("\nCollective chunk I/O Test #1\n");
coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
@@ -1300,7 +1301,7 @@ coll_chunk2(void)
{
const char *filename = FILENAME[0];
if (mpi_rank_g == 0)
- HDprintf("coll_chunk2\n");
+ HDprintf("\nCollective chunk I/O Test #2\n");
coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
@@ -1326,8 +1327,6 @@ coll_chunk2(void)
* Programmer: Unknown
* July 12th, 2004
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
@@ -1354,7 +1353,7 @@ coll_chunk3(void)
{
const char *filename = FILENAME[0];
if (mpi_rank_g == 0)
- HDprintf("coll_chunk3\n");
+ HDprintf("\nCollective chunk I/O Test #3\n");
coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
@@ -1382,17 +1381,9 @@ coll_chunk3(void)
*
* Failure: -1
*
- * Modifications:
- * Remove invalid temporary property checkings for API_LINK_HARD and
- * API_LINK_TRUE cases.
- * Programmer: Jonathan Kim
- * Date: 2012-10-10
- *
* Programmer: Unknown
* July 12th, 2004
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
@@ -1842,55 +1833,10 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap
HDfree(data_origin1);
}
-/*****************************************************************************
- *
- * Function: do_express_test()
- *
- * Purpose: Do an MPI_Allreduce to obtain the maximum value returned
- * by GetTestExpress() across all processes. Return this
- * value.
- *
- * Envirmoment variables can be different across different
- * processes. This function ensures that all processes agree
- * on whether to do an express test.
- *
- * Return: Success: Maximum of the values returned by
- * GetTestExpress() across all processes.
- *
- * Failure: -1
- *
- * Programmer: JRM -- 4/25/06
- *
- *****************************************************************************/
-static int
-do_express_test(int world_mpi_rank)
-{
- int express_test;
- int max_express_test;
- int result;
-
- express_test = GetTestExpress();
-
- result =
- MPI_Allreduce((void *)&express_test, (void *)&max_express_test, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
-
- if (result != MPI_SUCCESS) {
- nerrors++;
- max_express_test = -1;
- if (VERBOSE_MED && (world_mpi_rank == 0)) {
- HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n", world_mpi_rank, FUNC);
- }
- }
-
- return (max_express_test);
-
-} /* do_express_test() */
-
int
main(int argc, char **argv)
{
- int ExpressMode = 0;
- hsize_t newsize = 1048576;
+ hsize_t newsize = 1048576;
/* Set the bigio processing limit to be 'newsize' bytes */
hsize_t oldsize = H5_mpi_set_bigio_count(newsize);
@@ -1899,9 +1845,8 @@ main(int argc, char **argv)
* that we try to ensure that our bigio handling is actually
* envoked and tested.
*/
- if (newsize != oldsize) {
+ if (newsize != oldsize)
bigcount = newsize * 2;
- }
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size_g);
@@ -1912,34 +1857,25 @@ main(int argc, char **argv)
* hang in the atexit post processing in which it may try to make MPI
* calls. By then, MPI calls may not work.
*/
- if (H5dont_atexit() < 0) {
+ if (H5dont_atexit() < 0)
HDprintf("Failed to turn off atexit processing. Continue.\n");
- };
/* set alarm. */
ALARM_ON;
- ExpressMode = do_express_test(mpi_rank_g);
-
dataset_big_write();
MPI_Barrier(MPI_COMM_WORLD);
dataset_big_read();
MPI_Barrier(MPI_COMM_WORLD);
- if (ExpressMode > 0) {
- if (mpi_rank_g == 0)
- HDprintf("***Express test mode on. Several tests are skipped\n");
- }
- else {
- coll_chunk1();
- MPI_Barrier(MPI_COMM_WORLD);
- coll_chunk2();
- MPI_Barrier(MPI_COMM_WORLD);
- coll_chunk3();
- MPI_Barrier(MPI_COMM_WORLD);
- single_rank_independent_io();
- }
+ coll_chunk1();
+ MPI_Barrier(MPI_COMM_WORLD);
+ coll_chunk2();
+ MPI_Barrier(MPI_COMM_WORLD);
+ coll_chunk3();
+ MPI_Barrier(MPI_COMM_WORLD);
+ single_rank_independent_io();
/* turn off alarm */
ALARM_OFF;
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index a8b6d47..4cf1139 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -1467,7 +1467,8 @@ serve_read_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n", world_mpi_rank, FUNC, target_addr);
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, FUNC,
+ target_addr);
}
}
else if (data[target_index].len != mssg_ptr->len) {
@@ -1475,7 +1476,7 @@ serve_read_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: data[i].len = %Zu != mssg->len = %d.\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: data[i].len = %zu != mssg->len = %d.\n", world_mpi_rank, FUNC,
data[target_index].len, mssg_ptr->len);
}
}
@@ -1484,7 +1485,9 @@ serve_read_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: proc %d read invalid entry. idx/base_addr = %d/%a.\n",
+ HDfprintf(stdout,
+ "%d:%s: proc %d read invalid entry. "
+ "idx/base_addr = %d/%" PRIuHADDR ".\n",
world_mpi_rank, FUNC, mssg_ptr->src, target_index, data[target_index].base_addr);
}
}
@@ -1653,7 +1656,8 @@ serve_write_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n", world_mpi_rank, FUNC, target_addr);
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, FUNC,
+ target_addr);
}
}
else if (data[target_index].len != mssg_ptr->len) {
@@ -1919,7 +1923,8 @@ serve_entry_writes_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n", world_mpi_rank, FUNC, target_addr);
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, FUNC,
+ target_addr);
}
}
else {
@@ -2007,7 +2012,8 @@ serve_entry_reads_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n", world_mpi_rank, FUNC, target_addr);
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, FUNC,
+ target_addr);
}
}
else {
@@ -2488,10 +2494,10 @@ datum_notify(H5C_notify_action_t action, void *thing)
HDfprintf(stdout,
"%d:%s: mssg.base_addr != entry_ptr->base_addr.\n",
world_mpi_rank, FUNC);
- HDfprintf(stdout, "%d:%s: mssg.base_addr = %a.\n",
+ HDfprintf(stdout, "%d:%s: mssg.base_addr = %" PRIuHADDR ".\n",
world_mpi_rank, FUNC, mssg.base_addr);
HDfprintf(stdout,
- "%d:%s: entry_ptr->base_addr = %a.\n",
+ "%d:%s: entry_ptr->base_addr = %" PRIuHADDR ".\n",
world_mpi_rank, FUNC,
entry_ptr->base_addr);
}
@@ -2501,7 +2507,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
HDfprintf(stdout,
"%d:%s: mssg.len != entry_ptr->len.\n",
world_mpi_rank, FUNC);
- HDfprintf(stdout, "%d:%s: mssg.len = %a.\n",
+ HDfprintf(stdout, "%d:%s: mssg.len = %" PRIuHADDR ".\n",
world_mpi_rank, FUNC, mssg.len);
}
@@ -4296,8 +4302,8 @@ verify_entry_reads(haddr_t addr, int expected_entry_reads)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: rep/exp entry 0x%llx reads mismatch (%ld/%ld).\n", world_mpi_rank,
- FUNC, (long long)addr, reported_entry_reads, expected_entry_reads);
+ HDfprintf(stdout, "%d:%s: rep/exp entry 0x%" PRIxHADDR " reads mismatch (%d/%d).\n",
+ world_mpi_rank, FUNC, addr, reported_entry_reads, expected_entry_reads);
}
}
}
diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c
index 1fbfbd4..cb75c48 100644
--- a/testpar/t_cache_image.c
+++ b/testpar/t_cache_image.c
@@ -53,9 +53,6 @@ static void verify_data_sets(hid_t file_id, int min_dset, int max_dset);
/* local test function declarations */
-static hbool_t parse_flags(int argc, char *argv[], hbool_t *setup_ptr, hbool_t *ici_ptr, int *file_idx_ptr,
- int *mpi_size_ptr, hbool_t display);
-static void usage(void);
static unsigned construct_test_file(int test_file_index);
static void par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size);
static void par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank);
@@ -1742,13 +1739,6 @@ par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank)
} /* par_delete_dataset() */
-/* This test uses many POSIX things that are not available on
- * Windows. We're using a check for fork(2) here as a proxy for
- * all POSIX/Unix/Linux things until this test can be made
- * more platform-independent.
- */
-#ifdef H5_HAVE_FORK
-
/*-------------------------------------------------------------------------
* Function: par_insert_cache_image()
*
@@ -1780,63 +1770,14 @@ par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank)
static void
par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size)
{
- hbool_t show_progress = FALSE;
-
if (pass) {
if (mpi_rank == 0) { /* insert cache image in supplied test file */
- char file_name_idx_str[32];
- char mpi_size_str[32];
- int child_status;
- pid_t child_pid;
-
- HDsprintf(file_name_idx_str, "%d", file_name_idx);
- HDsprintf(mpi_size_str, "%d", mpi_size);
-
- child_pid = fork();
-
- if (child_pid == 0) { /* this is the child process */
-
- /* fun and games to shutup the compiler */
- char param0[32] = "t_cache_image";
- char param1[32] = "ici";
- char *child_argv[] = {param0, param1, file_name_idx_str, mpi_size_str, NULL};
-
- /* we may need to play with the path here */
- if (execv("t_cache_image", child_argv) == -1) {
-
- HDfprintf(stdout, "execl() of ici process failed. errno = %d(%s)\n", errno,
- strerror(errno));
- HDexit(1);
- }
- }
- else if (child_pid != -1) {
- /* this is the parent process -- wait until child is done */
- if (-1 == waitpid(child_pid, &child_status, WUNTRACED)) {
-
- HDfprintf(stdout, "can't wait on ici process.\n");
- pass = FALSE;
- }
- else if (!WIFEXITED(child_status)) {
-
- HDfprintf(stdout, "ici process hasn't exitied.\n");
- pass = FALSE;
- }
- else if (WEXITSTATUS(child_status) != 0) {
-
- HDfprintf(stdout, "ici process reports failure.\n");
- pass = FALSE;
- }
- else if (show_progress) {
-
- HDfprintf(stdout, "cache image insertion complete.\n");
- }
- }
- else { /* fork failed */
-
- HDfprintf(stdout, "can't create process to insert cache image.\n");
- pass = FALSE;
+ if (!serial_insert_cache_image(file_name_idx, mpi_size)) {
+ HDfprintf(stderr, "\n\nCache image insertion failed.\n");
+ HDfprintf(stderr, " failure mssg = \"%s\"\n", failure_mssg);
+ HDexit(EXIT_FAILURE);
}
}
}
@@ -1852,15 +1793,6 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size)
return;
} /* par_insert_cache_image() */
-#else /* H5_HAVE_FORK */
-
-static void
-par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size)
-{
- return;
-} /* par_insert_cache_image() */
-
-#endif /* H5_HAVE_FORK */
/*-------------------------------------------------------------------------
* Function: par_verify_dataset()
@@ -2461,158 +2393,6 @@ serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size)
} /* serial_verify_dataset() */
/*-------------------------------------------------------------------------
- * Function: parse_flags
- *
- * Purpose: Parse the flags passed to this program, and load the
- * values into the supplied field.
- *
- * Return: Success: 1
- * Failure: 0
- *
- * Programmer: J Mainzer
- * 4/28/11
- *
- *-------------------------------------------------------------------------
- */
-static hbool_t
-parse_flags(int argc, char *argv[], hbool_t *setup_ptr, hbool_t *ici_ptr, int *file_idx_ptr,
- int *mpi_size_ptr, hbool_t display)
-{
- const char *fcn_name = "parse_flags()";
- const char *(ops[]) = {"setup", "ici"};
- int success = TRUE;
-
- HDassert(setup_ptr);
- HDassert(*setup_ptr == FALSE);
- HDassert(ici_ptr);
- HDassert(*ici_ptr == FALSE);
- HDassert(file_idx_ptr);
- HDassert(mpi_size_ptr);
-
- if (setup_ptr == NULL) {
-
- success = FALSE;
- HDfprintf(stdout, "%s: bad arg(s) on entry.\n", fcn_name);
- }
-
- if ((success) && ((argc != 1) && (argc != 2) && (argc != 4))) {
-
- success = FALSE;
- usage();
- }
-
- if ((success) && (argc >= 2)) {
-
- if (strcmp(argv[1], ops[0]) == 0) {
-
- if (argc != 2) {
-
- success = FALSE;
- usage();
- }
- else {
-
- *setup_ptr = TRUE;
- }
- }
- else if (strcmp(argv[1], ops[1]) == 0) {
-
- if (argc != 4) {
-
- success = FALSE;
- usage();
- }
- else {
-
- *ici_ptr = TRUE;
- *file_idx_ptr = atoi(argv[2]);
- *mpi_size_ptr = atoi(argv[3]);
- }
- }
- }
-
- if ((success) && (display)) {
-
- if (*setup_ptr)
-
- HDfprintf(stdout, "t_cache_image setup\n");
-
- else if (*ici_ptr)
-
- HDfprintf(stdout, "t_cache_image ici %d %d\n", *file_idx_ptr, *mpi_size_ptr);
-
- else
-
- HDfprintf(stdout, "t_cache_image\n");
- }
-
- return (success);
-
-} /* parse_flags() */
-
-/*-------------------------------------------------------------------------
- * Function: usage
- *
- * Purpose: Display a brief message describing the purpose and use
- * of the program.
- *
- * Return: void
- *
- * Programmer: John Mainzer
- * 4/28/11
- *
- * Modifications:
- *
- *-------------------------------------------------------------------------
- */
-void
-usage(void)
-{
- const char *s[] = {
- "\n",
- "t_cache_image:\n",
- "\n",
- "Run the parallel cache image tests. \n"
- "\n"
- "In general, this program is run via MPI. However, at present, files\n"
- "with cache images can only be constructed by serial processes.\n",
- "\n",
- "To square this circle, one process in the parallel computation \n"
- "forks a serial version of the test program to handle this detail.\n",
- "The \"setup\" parameter indicates that t_cache_image is being \n",
- "invokde for this purpose.\n",
- "\n",
- "Similarly, only a serial process can add a cache image to an\n",
- "existing file.\n",
- "\n",
- "Here again, one process forks a serial version of the test program\n",
- "with the \"ici\" parameter.\n"
- "\n",
- "usage: t_cache_image [setup|ici m n]\n",
- "\n",
- "where:\n",
- "\n",
- " setup parameter forces creation of test file\n",
- "\n",
- " ici parameter forces insertion of a cache image into the \n",
- " m th test file, created by a parallel computation with .\n",
- " n processes\n",
- "\n",
- "Returns 0 on success, 1 on failure.\n",
- "\n",
- NULL,
- };
- int i = 0;
-
- while (s[i] != NULL) {
- HDfprintf(stdout, "%s", s[i]);
- i++;
- }
-
- return;
-} /* usage() */
-
-/*-------------------------------------------------------------------------
* Function: verify_data_sets()
*
* Purpose: If pass is TRUE on entry, verify that the data sets in the
@@ -3116,8 +2896,9 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
H5_FAILED();
- if (show_progress)
- HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", failure_mssg);
+ if (show_progress) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
+ }
}
}
@@ -3400,8 +3181,9 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
H5_FAILED();
- if (show_progress)
- HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", failure_mssg);
+ if (show_progress) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
+ }
}
}
@@ -3811,13 +3593,6 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
} /* smoke_check_1() */
-/* This test uses many POSIX things that are not available on
- * Windows. We're using a check for fork(2) here as a proxy for
- * all POSIX/Unix/Linux things until this test can be made
- * more platform-independent.
- */
-#ifdef H5_HAVE_FORK
-
/*-------------------------------------------------------------------------
* Function: main
*
@@ -3847,65 +3622,12 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
int
main(int argc, char **argv)
{
- hbool_t setup = FALSE;
- hbool_t ici = FALSE;
unsigned nerrs = 0;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
- int file_idx;
- int i;
int mpi_size;
int mpi_rank;
- if (!parse_flags(argc, argv, &setup, &ici, &file_idx, &mpi_size, FALSE))
- exit(1); /* exit now if unable to parse flags */
-
- if (setup) { /* construct test files and exit */
-
- H5open();
- HDfprintf(stdout, "Constructing test files: \n");
- HDfflush(stdout);
-
- i = 0;
- while ((FILENAMES[i] != NULL) && (i < TEST_FILES_TO_CONSTRUCT)) {
-
- HDfprintf(stdout, " writing %s ... ", FILENAMES[i]);
- HDfflush(stdout);
- construct_test_file(i);
-
- if (pass) {
-
- HDprintf("done.\n");
- HDfflush(stdout);
- }
- else {
-
- HDprintf("failed.\n");
- HDexit(1);
- }
- i++;
- }
-
- HDfprintf(stdout, "Test file construction complete.\n");
- HDexit(0);
- }
- else if (ici) {
-
- if (serial_insert_cache_image(file_idx, mpi_size)) {
-
- HDexit(0);
- }
- else {
-
- HDfprintf(stderr, "\n\nCache image insertion failed.\n");
- HDfprintf(stderr, " failure mssg = \"%s\"\n", failure_mssg);
- HDexit(1);
- }
- }
-
- HDassert(!setup);
- HDassert(!ici);
-
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -3915,9 +3637,8 @@ main(int argc, char **argv)
* hang in the atexit post processing in which it may try to make MPI
* calls. By then, MPI calls may not work.
*/
- if (H5dont_atexit() < 0) {
+ if (H5dont_atexit() < 0)
HDprintf("%d:Failed to turn off atexit processing. Continue.\n", mpi_rank);
- };
H5open();
@@ -3929,71 +3650,45 @@ main(int argc, char **argv)
}
if (mpi_size < 2) {
-
- if (mpi_rank == 0) {
-
+ if (mpi_rank == 0)
HDprintf(" Need at least 2 processes. Exiting.\n");
- }
goto finish;
}
if (mpi_rank == 0) { /* create test files */
+ int i;
- int child_status;
- pid_t child_pid;
-
- child_pid = fork();
-
- if (child_pid == 0) { /* this is the child process */
-
- /* fun and games to shutup the compiler */
- char param0[32] = "t_cache_image";
- char param1[32] = "setup";
- char *child_argv[] = {param0, param1, NULL};
-
- /* we may need to play with the path here */
- if (execv("t_cache_image", child_argv) == -1) {
-
- HDfprintf(stdout, "execl() of setup process failed. errno = %d(%s)\n", errno,
- strerror(errno));
- HDexit(1);
- }
- }
- else if (child_pid != -1) {
- /* this is the parent process -- wait until child is done */
- if (-1 == waitpid(child_pid, &child_status, WUNTRACED)) {
-
- HDfprintf(stdout, "can't wait on setup process.\n");
- }
- else if (!WIFEXITED(child_status)) {
+ HDfprintf(stdout, "Constructing test files: \n");
+ HDfflush(stdout);
- HDfprintf(stdout, "setup process hasn't exitied.\n");
- }
- else if (WEXITSTATUS(child_status) != 0) {
+ i = 0;
+ while ((FILENAMES[i] != NULL) && (i < TEST_FILES_TO_CONSTRUCT)) {
+ HDfprintf(stdout, " writing %s ... ", FILENAMES[i]);
+ HDfflush(stdout);
+ construct_test_file(i);
- HDfprintf(stdout, "setup process reports failure.\n");
+ if (pass) {
+ HDprintf("done.\n");
+ HDfflush(stdout);
}
else {
-
- HDfprintf(stdout, "testfile construction complete -- proceeding with tests.\n");
+ HDprintf("failed.\n");
+ HDexit(EXIT_FAILURE);
}
+ i++;
}
- else { /* fork failed */
-
- HDfprintf(stdout, "can't create process to construct test file.\n");
- }
+ HDfprintf(stdout, "Test file construction complete.\n");
}
/* can't start test until test files exist */
MPI_Barrier(MPI_COMM_WORLD);
nerrs += verify_cache_image_RO(0, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, mpi_rank);
-#if 1
nerrs += verify_cache_image_RO(1, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, mpi_rank);
nerrs += verify_cache_image_RW(0, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, mpi_rank);
nerrs += verify_cache_image_RW(1, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, mpi_rank);
nerrs += smoke_check_1(comm, info, mpi_rank, mpi_size);
-#endif
+
finish:
/* make sure all processes are finished before final report, cleanup
@@ -4002,19 +3697,14 @@ finish:
MPI_Barrier(MPI_COMM_WORLD);
if (mpi_rank == 0) { /* only process 0 reports */
- HDsleep(10);
HDprintf("===================================\n");
- if (nerrs > 0) {
+ if (nerrs > 0)
HDprintf("***metadata cache image tests detected %d failures***\n", nerrs);
- }
- else {
+ else
HDprintf("metadata cache image tests finished with no failures\n");
- }
HDprintf("===================================\n");
}
- /* takedown_derived_types(); */
-
/* close HDF5 library */
H5close();
@@ -4025,13 +3715,3 @@ finish:
return (nerrs > 0);
} /* main() */
-#else /* H5_HAVE_FORK */
-
-int
-main(void)
-{
- HDfprintf(stderr, "Non-POSIX platform. Skipping.\n");
- return EXIT_SUCCESS;
-} /* end main() */
-
-#endif /* H5_HAVE_FORK */
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index d7b2f0b..bbd4b28 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -1627,9 +1627,6 @@ extend_writeInd(void)
VRFY((mem_dataspace >= 0), "");
/* Try write to dataset2 beyond its current dim sizes. Should fail. */
- /* Temporary turn off auto error reporting */
- H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
- H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
/* create a file dataspace independently */
file_dataspace = H5Dget_space(dataset2);
@@ -1638,11 +1635,13 @@ extend_writeInd(void)
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently. Should fail. */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ }
+ H5E_END_TRY
VRFY((ret < 0), "H5Dwrite failed as expected");
- /* restore auto error reporting */
- H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
H5Sclose(file_dataspace);
/* Extend dataset2 and try again. Should succeed. */
@@ -1911,20 +1910,19 @@ extend_readInd(void)
VRFY((dataset2 >= 0), "");
/* Try extend dataset1 which is open RDONLY. Should fail. */
- /* first turn off auto error reporting */
- H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
- H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
dims[0]++;
- ret = H5Dset_extent(dataset1, dims);
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dset_extent(dataset1, dims);
+ }
+ H5E_END_TRY
VRFY((ret < 0), "H5Dset_extent failed as expected");
- /* restore auto error reporting */
- H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
H5Sclose(file_dataspace);
/* Read dataset1 using BYROW pattern */
@@ -2209,9 +2207,6 @@ extend_writeAll(void)
}
/* Try write to dataset2 beyond its current dim sizes. Should fail. */
- /* Temporary turn off auto error reporting */
- H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
- H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
/* create a file dataspace independently */
file_dataspace = H5Dget_space(dataset2);
@@ -2220,11 +2215,13 @@ extend_writeAll(void)
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently. Should fail. */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ }
+ H5E_END_TRY
VRFY((ret < 0), "H5Dwrite failed as expected");
- /* restore auto error reporting */
- H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
H5Sclose(file_dataspace);
/* Extend dataset2 and try again. Should succeed. */
@@ -2331,20 +2328,19 @@ extend_readAll(void)
VRFY((dataset2 >= 0), "");
/* Try extend dataset1 which is open RDONLY. Should fail. */
- /* first turn off auto error reporting */
- H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
- H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
dims[0]++;
- ret = H5Dset_extent(dataset1, dims);
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dset_extent(dataset1, dims);
+ }
+ H5E_END_TRY
VRFY((ret < 0), "H5Dset_extent failed as expected");
- /* restore auto error reporting */
- H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
H5Sclose(file_dataspace);
/* Read dataset1 using BYROW pattern */
@@ -3321,14 +3317,23 @@ test_actual_io_mode(int selection_mode)
/* Release some resources */
ret = H5Sclose(sid);
+ VRFY((ret >= 0), "H5Sclose succeeded");
ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
ret = H5Pclose(dcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
ret = H5Pclose(dxpl_write);
+ VRFY((ret >= 0), "H5Pclose succeeded");
ret = H5Pclose(dxpl_read);
+ VRFY((ret >= 0), "H5Pclose succeeded");
ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
ret = H5Sclose(mem_space);
+ VRFY((ret >= 0), "H5Sclose succeeded");
ret = H5Sclose(file_space);
+ VRFY((ret >= 0), "H5Sclose succeeded");
ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
HDfree(buffer);
return;
}
@@ -3344,9 +3349,7 @@ void
actual_io_mode_tests(void)
{
int mpi_size = -1;
- int mpi_rank = -1;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_rank);
test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE);
@@ -3985,9 +3988,9 @@ no_collective_cause_tests(void)
test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
#ifdef LATER /* fletcher32 */
- /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
- * H5Dwrite is ready for mpio + filter feature.
- */
+ /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
+ * H5Dwrite is ready for mpio + filter feature.
+ */
/* test_no_collective_cause_mode (TEST_FILTERS); */
test_no_collective_cause_mode_filter(TEST_FILTERS_READ);
#endif /* LATER */
@@ -4112,13 +4115,15 @@ dataset_atomicity(void)
if (MAINPROCESS) {
fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
VRFY((fid >= 0), "H5Fopen succeeed");
- }
- /* should fail */
- ret = H5Fset_mpi_atomicity(fid, TRUE);
- VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed");
+ /* should fail */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Fset_mpi_atomicity(fid, TRUE);
+ }
+ H5E_END_TRY
+ VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed");
- if (MAINPROCESS) {
ret = H5Fclose(fid);
VRFY((ret >= 0), "H5Fclose succeeded");
}
diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c
index 37479b3..5153bce 100644
--- a/testpar/t_filters_parallel.c
+++ b/testpar/t_filters_parallel.c
@@ -255,8 +255,9 @@ test_write_one_chunk_filtered_dataset(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -418,8 +419,9 @@ test_write_filtered_dataset_no_overlap(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -578,8 +580,9 @@ test_write_filtered_dataset_overlap(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -746,8 +749,9 @@ test_write_filtered_dataset_single_no_selection(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -1202,8 +1206,9 @@ test_write_filtered_dataset_interleaved_write(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -1379,8 +1384,10 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
start[2] = (hsize_t)mpi_rank;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ "
- "%llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE
+ ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ]\n",
mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1],
start[2], block[0], block[1], block[2]);
HDfflush(stdout);
@@ -1547,8 +1554,10 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
start[2] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ "
- "%llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE
+ ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ]\n",
mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1],
start[2], block[0], block[1], block[2]);
HDfflush(stdout);
@@ -1714,8 +1723,10 @@ test_write_3d_filtered_dataset_overlap(void)
start[2] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ "
- "%llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE
+ ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ]\n",
mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1],
start[2], block[0], block[1], block[2]);
HDfflush(stdout);
@@ -1900,8 +1911,9 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
start[1] = ((hsize_t)mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -2080,8 +2092,9 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -2279,8 +2292,9 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
start[1] = ((hsize_t)mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -2471,8 +2485,9 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -2687,8 +2702,9 @@ test_read_one_chunk_filtered_dataset(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -2890,8 +2906,9 @@ test_read_filtered_dataset_no_overlap(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -3094,8 +3111,9 @@ test_read_filtered_dataset_overlap(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -3324,8 +3342,9 @@ test_read_filtered_dataset_single_no_selection(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -3906,8 +3925,9 @@ test_read_filtered_dataset_interleaved_read(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -4135,8 +4155,9 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
start[2] = (hsize_t)mpi_rank;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -4351,8 +4372,9 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
start[2] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -4574,8 +4596,9 @@ test_read_3d_filtered_dataset_overlap(void)
start[2] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -4807,8 +4830,9 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
start[1] = ((hsize_t)mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -5035,8 +5059,9 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -5265,8 +5290,9 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
start[1] = ((hsize_t)mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -5502,8 +5528,9 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -5803,8 +5830,10 @@ test_write_parallel_read_serial(void)
offset[2] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ "
- "%llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], offset[ %" PRIuHSIZE
+ ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ]\n",
mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0],
offset[1], offset[2], block[0], block[1], block[2]);
HDfflush(stdout);
@@ -5973,8 +6002,9 @@ test_shrinking_growing_chunks(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index 8fe5e14..3041e77 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -129,12 +129,6 @@ zero_dim_dset(void)
/*
* Example of using PHDF5 to create ndatasets datasets. Each process write
* a slab of array to the file.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/11/04
*/
void
multiple_dset_write(void)
@@ -218,12 +212,6 @@ multiple_dset_write(void)
}
/* Example of using PHDF5 to create, write, and read compact dataset.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/11/04
*/
void
compact_dataset(void)
@@ -353,14 +341,6 @@ compact_dataset(void)
/*
* Example of using PHDF5 to create, write, and read dataset and attribute
* of Null dataspace.
- *
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
- *
- * JRM - 8/24/04
*/
void
null_dataset(void)
@@ -465,14 +445,6 @@ null_dataset(void)
* Actual data is _not_ written to these datasets. Dataspaces are exact
* sizes(2GB, 4GB, etc.), but the metadata for the file pushes the file over
* the boundary of interest.
- *
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
- *
- * JRM - 8/11/04
*/
void
big_dataset(void)
@@ -594,16 +566,6 @@ big_dataset(void)
/* Example of using PHDF5 to read a partial written dataset. The dataset does
* not have actual data written to the entire raw data area and relies on the
* default fill value of zeros to work correctly.
- *
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
- *
- * Also added code to free dynamically allocated buffers.
- *
- * JRM - 8/11/04
*/
void
dataset_fillvalue(void)
@@ -710,15 +672,16 @@ dataset_fillvalue(void)
for (i = 0; i < (int)dset_dims[0]; i++)
for (j = 0; j < (int)dset_dims[1]; j++)
for (k = 0; k < (int)dset_dims[2]; k++)
- for (l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++)
+ for (l = 0; l < (int)dset_dims[3]; l++, trdata++)
if (*trdata != 0)
if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,
- j, k, l, *trdata);
+ HDprintf(
+ "Rank %d: Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n",
+ mpi_rank, i, j, k, l, *trdata);
if (err_num > MAX_ERR_REPORT && !VERBOSE_MED)
- HDprintf("[more errors ...]\n");
+ HDprintf("Rank %d: [more errors ...]\n", mpi_rank);
if (err_num) {
- HDprintf("%d errors found in check_value\n", err_num);
+ HDprintf("Rank %d: %d errors found in check_value\n", mpi_rank, err_num);
nerrors++;
}
}
@@ -856,12 +819,6 @@ collective_group_write_independent_group_read(void)
/* Write multiple groups with a chunked dataset in each group collectively.
* These groups and datasets are for testing independent read later.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/16/04
*/
void
collective_group_write(void)
@@ -896,6 +853,7 @@ collective_group_write(void)
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+ VRFY((fid >= 0), "H5Fcreate");
H5Pclose(plist);
/* decide the hyperslab according to process number. */
@@ -909,13 +867,13 @@ collective_group_write(void)
ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
VRFY((memspace >= 0), "memspace");
VRFY((filespace >= 0), "filespace");
- VRFY((ret1 >= 0), "mgroup memspace selection");
- VRFY((ret2 >= 0), "mgroup filespace selection");
+ VRFY((ret1 == 0), "mgroup memspace selection");
+ VRFY((ret2 == 0), "mgroup filespace selection");
dcpl = H5Pcreate(H5P_DATASET_CREATE);
ret1 = H5Pset_chunk(dcpl, 2, chunk_size);
VRFY((dcpl >= 0), "dataset creation property");
- VRFY((ret1 >= 0), "set chunk for dataset creation property");
+ VRFY((ret1 == 0), "set chunk for dataset creation property");
/* creates ngroups groups under the root group, writes chunked
* datasets in parallel. */
@@ -932,10 +890,14 @@ collective_group_write(void)
for (j = 0; j < size; j++)
outme[(i * size) + j] = (i + j) * 1000 + mpi_rank;
- H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
+ ret1 = H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
+ VRFY((ret1 == 0), "H5Dwrite");
- H5Dclose(did);
- H5Gclose(gid);
+ ret1 = H5Dclose(did);
+ VRFY((ret1 == 0), "H5Dclose");
+
+ ret1 = H5Gclose(gid);
+ VRFY((ret1 == 0), "H5Gclose");
#ifdef BARRIER_CHECKS
if (!((m + 1) % 10)) {
@@ -948,7 +910,9 @@ collective_group_write(void)
H5Pclose(dcpl);
H5Sclose(filespace);
H5Sclose(memspace);
- H5Fclose(fid);
+
+ ret1 = H5Fclose(fid);
+ VRFY((ret1 == 0), "H5Fclose");
HDfree(outme);
}
@@ -964,6 +928,7 @@ independent_group_read(void)
const H5Ptest_param_t *pt;
char * filename;
int ngroups;
+ herr_t ret;
pt = GetTestParameters();
filename = pt->name;
@@ -975,6 +940,7 @@ independent_group_read(void)
H5Pset_all_coll_metadata_ops(plist, FALSE);
fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
+ VRFY((fid > 0), "H5Fopen");
H5Pclose(plist);
/* open groups and read datasets. Odd number processes read even number
@@ -989,20 +955,11 @@ independent_group_read(void)
group_dataset_read(fid, mpi_rank, m);
}
- H5Fclose(fid);
+ ret = H5Fclose(fid);
+ VRFY((ret == 0), "H5Fclose");
}
/* Open and read datasets and compare data
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * Also added code to verify the results of dynamic memory
- * allocations, and to free dynamically allocated memeory
- * when we are done with it.
- *
- * JRM - 8/16/04
*/
static void
group_dataset_read(hid_t fid, int mpi_rank, int m)
@@ -1035,16 +992,17 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
/* this is the original value */
for (i = 0; i < size; i++)
- for (j = 0; j < size; j++) {
+ for (j = 0; j < size; j++)
outdata[(i * size) + j] = (i + j) * 1000 + mpi_rank;
- }
/* compare the original value(outdata) to the value in file(indata).*/
ret = check_value(indata, outdata, size);
VRFY((ret == 0), "check the data");
- H5Dclose(did);
- H5Gclose(gid);
+ ret = H5Dclose(did);
+ VRFY((ret == 0), "H5Dclose");
+ ret = H5Gclose(gid);
+ VRFY((ret == 0), "H5Gclose");
HDfree(indata);
HDfree(outdata);
@@ -1076,11 +1034,6 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
* + means the group has attribute(s).
* ' means the datasets in the groups have attribute(s).
*
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/16/04
*/
void
multiple_group_write(void)
@@ -1164,12 +1117,6 @@ multiple_group_write(void)
/*
* In a group, creates NDATASETS datasets. Each process writes a hyperslab
* of a data array to the file.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/16/04
*/
static void
write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
@@ -1243,12 +1190,6 @@ create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter)
/*
* This function is to verify the data from multiple group testing. It opens
* every dataset in every group and check their correctness.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/11/04
*/
void
multiple_group_read(void)
@@ -1323,12 +1264,6 @@ multiple_group_read(void)
/*
* This function opens all the datasets in a certain, checks the data using
* dataset_vrfy function.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/11/04
*/
static int
read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
@@ -1456,10 +1391,8 @@ read_attribute(hid_t obj_id, int this_type, int num)
if (this_type == is_group) {
HDsprintf(attr_name, "Group Attribute %d", num);
aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
- if (MAINPROCESS) {
- H5Aread(aid, H5T_NATIVE_INT, &in_num);
- vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num);
- }
+ H5Aread(aid, H5T_NATIVE_INT, &in_num);
+ vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num);
H5Aclose(aid);
}
else if (this_type == is_dset) {
@@ -1467,10 +1400,8 @@ read_attribute(hid_t obj_id, int this_type, int num)
for (i = 0; i < 8; i++)
out_data[i] = i;
aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
- if (MAINPROCESS) {
- H5Aread(aid, H5T_NATIVE_INT, in_data);
- vrfy_errors = dataset_vrfy(NULL, NULL, NULL, dset_block, in_data, out_data);
- }
+ H5Aread(aid, H5T_NATIVE_INT, in_data);
+ vrfy_errors = dataset_vrfy(NULL, NULL, NULL, dset_block, in_data, out_data);
H5Aclose(aid);
}
@@ -1479,12 +1410,6 @@ read_attribute(hid_t obj_id, int this_type, int num)
/* This functions compares the original data with the read-in data for its
* hyperslab part only by process ID.
- *
- * Changes: Modified function to use a passed in size parameter
- * instead of the old SIZE #define. This should let us
- * run with an arbitrary number of processes.
- *
- * JRM - 8/16/04
*/
static int
check_value(DATATYPE *indata, DATATYPE *outdata, int size)
@@ -1517,12 +1442,6 @@ check_value(DATATYPE *indata, DATATYPE *outdata, int size)
}
/* Decide the portion of data chunk in dataset by process ID.
- *
- * Changes: Modified function to use a passed in size parameter
- * instead of the old SIZE #define. This should let us
- * run with an arbitrary number of processes.
- *
- * JRM - 8/11/04
*/
static void
@@ -1564,8 +1483,6 @@ get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[], hsize_t
* This function reproduces this situation. At present the test hangs
* on failure.
* JRM - 9/13/04
- *
- * Changes: None.
*/
#define N 4
@@ -1809,10 +1726,6 @@ io_mode_confusion(void)
* cache clients will have to construct on disk images on demand.
*
* JRM -- 10/13/10
- *
- * Changes:
- * Break it into two parts, a writer to write the file and a reader
- * the correctness of the writer. AKC -- 2010/10/27
*/
#define NUM_DATA_SETS 4
diff --git a/testpar/t_pread.c b/testpar/t_pread.c
index cf91b31..17091cb 100644
--- a/testpar/t_pread.c
+++ b/testpar/t_pread.c
@@ -35,7 +35,7 @@ const char *FILENAMES[NFILENAME + 1] = {"reloc_t_pread_data_file", "reloc_t_prea
#define LIMIT_NPROC 6
-hbool_t pass = true;
+hbool_t pass = TRUE;
static const char *random_hdf5_text = "Now is the time for all first-time-users of HDF5 to read their \
manual or go thru the tutorials!\n\
While you\'re at it, now is also the time to read up on MPI-IO.";
@@ -112,7 +112,7 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
float nextValue;
float * data_slice = NULL;
- pass = true;
+ pass = TRUE;
HDassert(comm != MPI_COMM_NULL);
@@ -256,14 +256,14 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
if (pass) {
if ((dset_id = H5Dcreate2(file_id, "dataset0", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT, H5P_DEFAULT,
H5P_DEFAULT)) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Dcreate2() failed.\n";
}
}
if (pass) {
if ((H5Dwrite(dset_id, H5T_NATIVE_FLOAT, memspace, filespace, dxpl_id, data_slice)) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Dwrite() failed.\n";
}
}
@@ -273,14 +273,14 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
if (pass) {
if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Pcreate() failed.\n";
}
}
if (pass) {
if ((H5Pset_chunk(dcpl_id, 1, chunk)) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Pset_chunk() failed.\n";
}
}
@@ -289,27 +289,27 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
if ((dset_id_ch = H5Dcreate2(file_id, "dataset0_chunked", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT,
dcpl_id, H5P_DEFAULT)) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Dcreate2() failed.\n";
}
}
if (pass) {
if ((H5Dwrite(dset_id_ch, H5T_NATIVE_FLOAT, memspace, filespace, dxpl_id, data_slice)) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Dwrite() failed.\n";
}
}
if (pass || (dcpl_id != -1)) {
if (H5Pclose(dcpl_id) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Pclose(dcpl_id) failed.\n";
}
}
if (pass || (dset_id_ch != -1)) {
if (H5Dclose(dset_id_ch) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Dclose(dset_id_ch) failed.\n";
}
}
@@ -317,49 +317,49 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
/* close file, etc. */
if (pass || (dset_id != -1)) {
if (H5Dclose(dset_id) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Dclose(dset_id) failed.\n";
}
}
if (pass || (memspace != -1)) {
if (H5Sclose(memspace) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Sclose(memspace) failed.\n";
}
}
if (pass || (filespace != -1)) {
if (H5Sclose(filespace) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Sclose(filespace) failed.\n";
}
}
if (pass || (file_id != -1)) {
if (H5Fclose(file_id) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Fclose(file_id) failed.\n";
}
}
if (pass || (dxpl_id != -1)) {
if (H5Pclose(dxpl_id) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Pclose(dxpl_id) failed.\n";
}
}
if (pass || (fapl_id != -1)) {
if (H5Pclose(fapl_id) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Pclose(fapl_id) failed.\n";
}
}
if (pass || (fctmpl != -1)) {
if (H5Pclose(fctmpl) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Pclose(fctmpl) failed.\n";
}
}
@@ -661,14 +661,14 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
if (pass || (memspace != -1)) {
if (H5Sclose(memspace) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Sclose(memspace) failed.\n";
}
}
if (pass || (filespace != -1)) {
if (H5Sclose(filespace) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Sclose(filespace) failed.\n";
}
}
@@ -859,7 +859,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
if (pass || (filespace != -1)) {
if (H5Sclose(filespace) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Sclose(filespace) failed.\n";
}
}
@@ -956,7 +956,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
if (pass || (memspace != -1)) {
if (H5Sclose(memspace) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Sclose(memspace) failed.\n";
}
}
@@ -969,7 +969,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
if (pass || (dxpl_id != -1)) {
if (H5Pclose(dxpl_id) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Pclose(dxpl_id) failed.\n";
}
}
@@ -978,28 +978,28 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
/* close file, etc. */
if (pass || (dset_id != -1)) {
if (H5Dclose(dset_id) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Dclose(dset_id) failed.\n";
}
}
if (pass || (dset_id_ch != -1)) {
if (H5Dclose(dset_id_ch) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Dclose(dset_id_ch) failed.\n";
}
}
if (pass || (file_id != -1)) {
if (H5Fclose(file_id) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Fclose(file_id) failed.\n";
}
}
if (pass || (fapl_id != -1)) {
if (H5Pclose(fapl_id) < 0) {
- pass = false;
+ pass = FALSE;
failure_mssg = "H5Pclose(fapl_id) failed.\n";
}
}
diff --git a/testpar/t_prop.c b/testpar/t_prop.c
index 27de777..92c8bc0 100644
--- a/testpar/t_prop.c
+++ b/testpar/t_prop.c
@@ -441,3 +441,192 @@ test_plist_ed(void)
ret = H5Pclose(acpl);
VRFY((ret >= 0), "H5Pclose succeeded");
}
+
+void
+external_links(void)
+{
+ hid_t lcpl = H5I_INVALID_HID; /* link create prop. list */
+ hid_t lapl = H5I_INVALID_HID; /* link access prop. list */
+ hid_t fapl = H5I_INVALID_HID; /* file access prop. list */
+ hid_t gapl = H5I_INVALID_HID; /* group access prop. list */
+ hid_t fid = H5I_INVALID_HID; /* file id */
+ hid_t group = H5I_INVALID_HID; /* group id */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm;
+ int doIO;
+ int i, mrc;
+
+ herr_t ret; /* Generic return value */
+ htri_t tri_status; /* tri return value */
+
+ const char *filename = "HDF5test.h5";
+ const char *filename_ext = "HDF5test_ext.h5";
+ const char *group_path = "/Base/Block/Step";
+ const char *link_name = "link"; /* external link */
+ char link_path[50];
+
+ if (VERBOSE_MED)
+ HDprintf("Check external links\n");
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Check MPI communicator access properties are passed to
+ linked external files */
+
+ if (mpi_rank == 0) {
+
+ lcpl = H5Pcreate(H5P_LINK_CREATE);
+ VRFY((lcpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_create_intermediate_group(lcpl, 1);
+ VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded");
+
+ /* Create file to serve as target for external link.*/
+ fid = H5Fcreate(filename_ext, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ group = H5Gcreate2(fid, group_path, lcpl, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((group >= 0), "H5Gcreate succeeded");
+
+ ret = H5Gclose(group);
+ VRFY((ret >= 0), "H5Gclose succeeded");
+
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "H5Pcreate succeeded");
+
+ /* Create a new file using the file access property list. */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ group = H5Gcreate2(fid, group_path, lcpl, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((group >= 0), "H5Gcreate succeeded");
+
+ /* Create external links to the target files. */
+ ret = H5Lcreate_external(filename_ext, group_path, group, link_name, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((ret >= 0), "H5Lcreate_external succeeded");
+
+ /* Close and release resources. */
+ ret = H5Pclose(lcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Gclose(group);
+ VRFY((ret >= 0), "H5Gclose succeeded");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /*
+ * For the first case, use all the processes. For the second case
+ * use a sub-communicator to verify the correct communicator is
+ * being used for the externally linked files.
+ * There is no way to determine if MPI info is being used for the
+ * externally linked files.
+ */
+
+ for (i = 0; i < 2; i++) {
+
+ if (i == 0) {
+ doIO = 1;
+ comm = MPI_COMM_WORLD;
+ }
+ else {
+ doIO = mpi_rank % 2;
+ mrc = MPI_Comm_split(MPI_COMM_WORLD, doIO, mpi_rank, &comm);
+ VRFY((mrc == MPI_SUCCESS), "");
+ }
+
+ if (doIO) {
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_fapl_mpio(fapl, comm, MPI_INFO_NULL);
+ VRFY((fapl >= 0), "H5Pset_fapl_mpio succeeded");
+
+ fid = H5Fopen(filename, H5F_ACC_RDWR, fapl);
+ VRFY((fid >= 0), "H5Fopen succeeded");
+
+ /* test opening a group that is to an external link, the external linked
+ file should inherit the source file's access properties */
+ HDsprintf(link_path, "%s%s%s", group_path, "/", link_name);
+ group = H5Gopen2(fid, link_path, H5P_DEFAULT);
+ VRFY((group >= 0), "H5Gopen succeeded");
+ ret = H5Gclose(group);
+ VRFY((ret >= 0), "H5Gclose succeeded");
+
+ /* test opening a group that is external link by setting group
+ creation property */
+ gapl = H5Pcreate(H5P_GROUP_ACCESS);
+ VRFY((gapl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_elink_fapl(gapl, fapl);
+ VRFY((ret >= 0), "H5Pset_elink_fapl succeeded");
+
+ group = H5Gopen2(fid, link_path, gapl);
+ VRFY((group >= 0), "H5Gopen succeeded");
+
+ ret = H5Gclose(group);
+ VRFY((ret >= 0), "H5Gclose succeeded");
+
+ ret = H5Pclose(gapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* test link APIs */
+ lapl = H5Pcreate(H5P_LINK_ACCESS);
+ VRFY((lapl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_elink_fapl(lapl, fapl);
+ VRFY((ret >= 0), "H5Pset_elink_fapl succeeded");
+
+ tri_status = H5Lexists(fid, link_path, H5P_DEFAULT);
+ VRFY((tri_status == TRUE), "H5Lexists succeeded");
+
+ tri_status = H5Lexists(fid, link_path, lapl);
+ VRFY((tri_status == TRUE), "H5Lexists succeeded");
+
+ group = H5Oopen(fid, link_path, H5P_DEFAULT);
+ VRFY((group >= 0), "H5Oopen succeeded");
+
+ ret = H5Oclose(group);
+ VRFY((ret >= 0), "H5Oclose succeeded");
+
+ group = H5Oopen(fid, link_path, lapl);
+ VRFY((group >= 0), "H5Oopen succeeded");
+
+ ret = H5Oclose(group);
+ VRFY((ret >= 0), "H5Oclose succeeded");
+
+ ret = H5Pclose(lapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* close the remaining resources */
+
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ if (i == 1) {
+ mrc = MPI_Comm_free(&comm);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free succeeded");
+ }
+ }
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* delete the test files */
+ if (mpi_rank == 0) {
+ MPI_File_delete(filename, MPI_INFO_NULL);
+ MPI_File_delete(filename_ext, MPI_INFO_NULL);
+ }
+}
diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c
index 6ad6694..2dd867a 100644
--- a/testpar/t_shapesame.c
+++ b/testpar/t_shapesame.c
@@ -441,11 +441,8 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker
VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded");
/* sync with the other processes before checking data */
- if (!use_collective_io) {
-
- mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes");
- }
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes");
/* read the small data set back to verify that it contains the
* expected data. Note that each process reads in the entire
@@ -515,11 +512,8 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker
VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded");
/* sync with the other processes before checking data */
- if (!use_collective_io) {
-
- mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes");
- }
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes");
/* read the large data set back to verify that it contains the
* expected data. Note that each process reads in the entire
@@ -547,12 +541,8 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker
VRFY((mis_match == FALSE), "large ds init data good.");
/* sync with the other processes before changing data */
-
- if (!use_collective_io) {
-
- mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc == MPI_SUCCESS), "Sync initial values check");
- }
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync initial values check");
return;
@@ -2000,8 +1990,8 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
}
if ((MAINPROCESS) && (tests_skipped > 0)) {
- HDfprintf(stdout, " %lld of %lld subtests skipped to expedite testing.\n", tests_skipped,
- total_tests);
+ HDfprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n",
+ tests_skipped, total_tests);
}
return;
@@ -3913,16 +3903,16 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
} /* end of switch(sstest_type) */
#if CONTIG_HS_DR_PIO_TEST__DEBUG
if ((MAINPROCESS) && (tests_skipped > 0)) {
- HDfprintf(stdout, " run/skipped/total = %lld/%lld/%lld.\n", tests_run, tests_skipped,
- total_tests);
+ HDfprintf(stdout, " run/skipped/total = %" PRId64 "/%" PRId64 "/%" PRId64 ".\n",
+ tests_run, tests_skipped, total_tests);
}
#endif /* CONTIG_HS_DR_PIO_TEST__DEBUG */
}
}
if ((MAINPROCESS) && (tests_skipped > 0)) {
- HDfprintf(stdout, " %lld of %lld subtests skipped to expedite testing.\n", tests_skipped,
- total_tests);
+ HDfprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n",
+ tests_skipped, total_tests);
}
return;
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index cce8aff..0beb6d7 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -33,9 +33,6 @@ int ngroups = 512; /* number of groups to create in root
int facc_type = FACC_MPIO; /*Test file access type */
int dxfer_coll_type = DXFER_COLLECTIVE_IO;
-H5E_auto2_t old_func; /* previous error handler */
-void * old_client_data; /* previous error handler arg.*/
-
/* other option flags */
/* FILENAME and filenames must have the same number of names.
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index 56b966d..e3075a3 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -225,16 +225,15 @@ typedef enum {
} ShapeSameTestMethods;
/* Shared global variables */
-extern int dim0, dim1; /*Dataset dimensions */
-extern int chunkdim0, chunkdim1; /*Chunk dimensions */
-extern int nerrors; /*errors count */
-extern H5E_auto2_t old_func; /* previous error handler */
-extern void * old_client_data; /*previous error handler arg.*/
-extern int facc_type; /*Test file access type */
-extern int dxfer_coll_type;
+extern int dim0, dim1; /*Dataset dimensions */
+extern int chunkdim0, chunkdim1; /*Chunk dimensions */
+extern int nerrors; /*errors count */
+extern int facc_type; /*Test file access type */
+extern int dxfer_coll_type;
/* Test program prototypes */
void test_plist_ed(void);
+void external_links(void);
void zero_dim_dset(void);
void test_file_properties(void);
void test_delete(void);