summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
authorAllen Byrne <50328838+byrnHDF@users.noreply.github.com>2021-04-30 16:07:50 (GMT)
committerGitHub <noreply@github.com>2021-04-30 16:07:50 (GMT)
commit27198d814a58640a6088f44b72d43d3bd4bb6629 (patch)
tree81ea462c0cd993553c7f3820d8ab288e9bea8ed5 /testpar
parent4155e595e2ab16d81de79ed19d7de4fab5eb3302 (diff)
downloadhdf5-27198d814a58640a6088f44b72d43d3bd4bb6629.zip
hdf5-27198d814a58640a6088f44b72d43d3bd4bb6629.tar.gz
hdf5-27198d814a58640a6088f44b72d43d3bd4bb6629.tar.bz2
Hdf5 1 10 merge of printf specifiers and VS2015 min changes (#612)
* HDFFV-10865 - merge from dev, HDFArray perf fix. * Remove duplicate setting * Whitespace changes after clang format * Undo version 11 clang format changes * Merge CMake changes from develop * test testing script merge from develop * Update supported platforms * PR#3 merge from develop * Merge gcc 10 diagnostics option from develop * Merge #318 OSX changes from develop * Merge small changes from develop * Minor non-space formatting changes * #386 copyright corrections for java folder * Merges from develop #358 patches from vtk #361 fix header guard spelling * Merge updates #358 patches from vtk #361 fix header guard spelling * format fix * Fix missing underscore and make H5public.h closer to dev * Merges from develop #340 clang -Wformat-security warnings #360 Fixed uninitialized warnings header guard underscore cleanup JNI cleanup * format alignment * Add missing test ref file * Merge #380 from develop * Finish java merges from develop * Fix java issues with tests and javadoc * Correct use of attribute access plist * Remove debug code * Remove unused variable * Change file access to read only for java tests * Split clang format operations. * More javadoc comments * Remove pre-split setting * format source * Change windows TS to use older VS. * Mostly all javadoc fixes, one argument rename. * synch file * Merge of long double fix and compiler flags * HDFFV-11229 merge changes from develop * HDFFV-11229 correct test script * HDFFV-11229 update autotools test script for two ref files * HDFFV-11229 merge dev changes for long double display in tools * Committing clang-format changes * minor whitespace * remove unneeded macro * Committing clang-format changes * Add "option" command for clang options * Rework CMake add_custom to use the BYPRODUCTS argument Update pkgconfig scripts for parallel builds. Fix install COPYING file reference. Remove unused round defines. Change CMake default setting of BUILD_CPP to off. * Fortran target depends * Remove incorrect source attribute * Revert define removal * printf specifiers and VS2015 min changes * Committing clang-format changes * Add time struct Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com>
Diffstat (limited to 'testpar')
-rw-r--r--testpar/t_bigio.c53
-rw-r--r--testpar/t_cache.c98
-rw-r--r--testpar/t_cache_image.c8
-rw-r--r--testpar/t_chunk_alloc.c10
-rw-r--r--testpar/t_coll_chunk.c21
-rw-r--r--testpar/t_coll_md_read.c16
-rw-r--r--testpar/t_dset.c219
-rw-r--r--testpar/t_file.c78
-rw-r--r--testpar/t_filter_read.c8
-rw-r--r--testpar/t_filters_parallel.c134
-rw-r--r--testpar/t_mdset.c132
-rw-r--r--testpar/t_mpi.c2
-rw-r--r--testpar/t_prestart.c15
-rw-r--r--testpar/t_prop.c2
-rw-r--r--testpar/t_pshutdown.c8
-rw-r--r--testpar/t_shapesame.c63
-rw-r--r--testpar/t_span_tree.c56
17 files changed, 392 insertions, 531 deletions
diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c
index 1a8976c..2fbe1ac 100644
--- a/testpar/t_bigio.c
+++ b/testpar/t_bigio.c
@@ -131,12 +131,13 @@ point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], s
}
if (VERBOSE_MED) {
- HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total "
- "datapoints=%lu\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
- (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
- (unsigned long)block[0], (unsigned long)block[1],
- (unsigned long)(block[0] * block[1] * count[0] * count[1]));
+ HDprintf("start[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "count[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "stride[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "block[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "total datapoints=%" PRIuHSIZE "\n",
+ start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1],
+ block[0] * block[1] * count[0] * count[1]);
k = 0;
for (i = 0; i < num_points; i++) {
HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
@@ -157,15 +158,15 @@ dataset_print(hsize_t start[], hsize_t block[], B_DATATYPE *dataset)
/* print the column heading */
HDprintf("%-8s", "Cols:");
for (j = 0; j < block[1]; j++) {
- HDprintf("%3lu ", (unsigned long)(start[1] + j));
+ HDprintf("%3" PRIuHSIZE " ", start[1] + j);
}
HDprintf("\n");
/* print the slab data */
for (i = 0; i < block[0]; i++) {
- HDprintf("Row %2lu: ", (unsigned long)(i + start[0]));
+ HDprintf("Row %2" PRIuHSIZE ": ", i + start[0]);
for (j = 0; j < block[1]; j++) {
- HDprintf("%llu ", *dataptr++);
+ HDprintf("%" PRIuHSIZE " ", *dataptr++);
}
HDprintf("\n");
}
@@ -184,10 +185,11 @@ verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
/* print it if VERBOSE_MED */
if (VERBOSE_MED) {
HDprintf("verify_data dumping:::\n");
- HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
- (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
- (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("start(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "count(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "stride(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "block(%" PRIuHSIZE ", %" PRIuHSIZE ")\n",
+ start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1]);
HDprintf("original values:\n");
dataset_print(start, block, original);
HDprintf("compared values:\n");
@@ -199,9 +201,10 @@ verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
for (j = 0; j < block[1]; j++) {
if (*dataset != *original) {
if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
- HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %llu, got %llu\n",
- (unsigned long)i, (unsigned long)j, (unsigned long)(i + start[0]),
- (unsigned long)(j + start[1]), *(original), *(dataset));
+ HDprintf("Dataset Verify failed at [%" PRIuHSIZE "][%" PRIuHSIZE "]"
+ "(row %" PRIuHSIZE ", col %" PRIuHSIZE "): "
+ "expect %" PRIuHSIZE ", got %" PRIuHSIZE "\n",
+ i, j, i + start[0], j + start[1], *(original), *(dataset));
}
dataset++;
original++;
@@ -1163,8 +1166,6 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
* Programmer: Unknown
* July 12th, 2004
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
@@ -1269,8 +1270,6 @@ coll_chunk2(void)
* Programmer: Unknown
* July 12th, 2004
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
@@ -1325,17 +1324,9 @@ coll_chunk3(void)
*
* Failure: -1
*
- * Modifications:
- * Remove invalid temporary property checkings for API_LINK_HARD and
- * API_LINK_TRUE cases.
- * Programmer: Jonathan Kim
- * Date: 2012-10-10
- *
* Programmer: Unknown
* July 12th, 2004
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
@@ -1842,9 +1833,8 @@ main(int argc, char **argv)
* that we try to ensure that our bigio handling is actually
* envoked and tested.
*/
- if (newsize != oldsize) {
+ if (newsize != oldsize)
bigcount = newsize * 2;
- }
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size_g);
@@ -1855,9 +1845,8 @@ main(int argc, char **argv)
* hang in the atexit post processing in which it may try to make MPI
* calls. By then, MPI calls may not work.
*/
- if (H5dont_atexit() < 0) {
+ if (H5dont_atexit() < 0)
HDprintf("Failed to turn off atexit processing. Continue.\n");
- };
/* set alarm. */
ALARM_ON;
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index d084f5f..94d09e6 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -1251,7 +1251,7 @@ reset_server_counters(void)
success = FALSE;
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: actual/total reads mismatch (%ld/%ld).\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: actual/total reads mismatch (%ld/%d).\n", world_mpi_rank, FUNC,
actual_total_reads, total_reads);
}
}
@@ -1261,7 +1261,7 @@ reset_server_counters(void)
success = FALSE;
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: actual/total writes mismatch (%ld/%ld).\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: actual/total writes mismatch (%ld/%d).\n", world_mpi_rank, FUNC,
actual_total_writes, total_writes);
}
}
@@ -1465,7 +1465,8 @@ serve_read_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n", world_mpi_rank, FUNC, target_addr);
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, FUNC,
+ target_addr);
}
}
else if (data[target_index].len != mssg_ptr->len) {
@@ -1473,7 +1474,7 @@ serve_read_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: data[i].len = %Zu != mssg->len = %d.\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: data[i].len = %zu != mssg->len = %d.\n", world_mpi_rank, FUNC,
data[target_index].len, mssg_ptr->len);
}
}
@@ -1482,7 +1483,9 @@ serve_read_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: proc %d read invalid entry. idx/base_addr = %d/%a.\n",
+ HDfprintf(stdout,
+ "%d:%s: proc %d read invalid entry. "
+ "idx/base_addr = %d/%" PRIuHADDR ".\n",
world_mpi_rank, FUNC, mssg_ptr->src, target_index, data[target_index].base_addr);
}
}
@@ -1651,7 +1654,8 @@ serve_write_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n", world_mpi_rank, FUNC, target_addr);
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, FUNC,
+ target_addr);
}
}
else if (data[target_index].len != mssg_ptr->len) {
@@ -1659,7 +1663,7 @@ serve_write_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: data[i].len = %Zu != mssg->len = %d.\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: data[i].len = %zu != mssg->len = %d.\n", world_mpi_rank, FUNC,
data[target_index].len, mssg_ptr->len);
}
}
@@ -1788,11 +1792,11 @@ serve_total_writes_request(struct mssg_t *mssg_ptr)
if (success) {
- HDfprintf(stdout, "%d request total writes %ld.\n", (int)(mssg_ptr->src), total_writes);
+ HDfprintf(stdout, "%d request total writes %d.\n", (int)(mssg_ptr->src), total_writes);
}
else {
- HDfprintf(stdout, "%d request total writes %ld -- FAILED.\n", (int)(mssg_ptr->src), total_writes);
+ HDfprintf(stdout, "%d request total writes %d -- FAILED.\n", (int)(mssg_ptr->src), total_writes);
}
}
@@ -1858,11 +1862,11 @@ serve_total_reads_request(struct mssg_t *mssg_ptr)
if (success) {
- HDfprintf(stdout, "%d request total reads %ld.\n", (int)(mssg_ptr->src), total_reads);
+ HDfprintf(stdout, "%d request total reads %d.\n", (int)(mssg_ptr->src), total_reads);
}
else {
- HDfprintf(stdout, "%d request total reads %ld -- FAILED.\n", (int)(mssg_ptr->src), total_reads);
+ HDfprintf(stdout, "%d request total reads %d -- FAILED.\n", (int)(mssg_ptr->src), total_reads);
}
}
@@ -1917,7 +1921,8 @@ serve_entry_writes_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n", world_mpi_rank, FUNC, target_addr);
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, FUNC,
+ target_addr);
}
}
else {
@@ -2005,7 +2010,8 @@ serve_entry_reads_request(struct mssg_t *mssg_ptr)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n", world_mpi_rank, FUNC, target_addr);
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, FUNC,
+ target_addr);
}
}
else {
@@ -2489,10 +2495,10 @@ datum_notify(H5C_notify_action_t action, void *thing)
HDfprintf(stdout,
"%d:%s: mssg.base_addr != entry_ptr->base_addr.\n",
world_mpi_rank, FUNC);
- HDfprintf(stdout, "%d:%s: mssg.base_addr = %a.\n",
+ HDfprintf(stdout, "%d:%s: mssg.base_addr = %" PRIuHADDR ".\n",
world_mpi_rank, FUNC, mssg.base_addr);
HDfprintf(stdout,
- "%d:%s: entry_ptr->base_addr = %a.\n",
+ "%d:%s: entry_ptr->base_addr = %" PRIuHADDR ".\n",
world_mpi_rank, FUNC,
entry_ptr->base_addr);
}
@@ -2502,7 +2508,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
HDfprintf(stdout,
"%d:%s: mssg.len != entry_ptr->len.\n",
world_mpi_rank, FUNC);
- HDfprintf(stdout, "%d:%s: mssg.len = %a.\n",
+ HDfprintf(stdout, "%d:%s: mssg.len = %" PRIuHADDR ".\n",
world_mpi_rank, FUNC, mssg.len);
}
@@ -4297,8 +4303,8 @@ verify_entry_reads(haddr_t addr, int expected_entry_reads)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: rep/exp entry 0x%llx reads mismatch (%ld/%ld).\n", world_mpi_rank,
- FUNC, (long long)addr, reported_entry_reads, expected_entry_reads);
+ HDfprintf(stdout, "%d:%s: rep/exp entry 0x%" PRIxHADDR " reads mismatch (%d/%d).\n",
+ world_mpi_rank, FUNC, addr, reported_entry_reads, expected_entry_reads);
}
}
}
@@ -4394,7 +4400,7 @@ verify_entry_writes(haddr_t addr, int expected_entry_writes)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: rep/exp entry 0x%llx writes mismatch (%ld/%ld).\n", world_mpi_rank,
+ HDfprintf(stdout, "%d:%s: rep/exp entry 0x%llx writes mismatch (%d/%d).\n", world_mpi_rank,
FUNC, (long long)addr, reported_entry_writes, expected_entry_writes);
}
}
@@ -4486,8 +4492,8 @@ verify_total_reads(int expected_total_reads)
nerrors++;
success = FALSE;
if (verbose) {
- HDfprintf(stdout, "%d:%s: reported/expected total reads mismatch (%ld/%ld).\n",
- world_mpi_rank, FUNC, reported_total_reads, expected_total_reads);
+ HDfprintf(stdout, "%d:%s: reported/expected total reads mismatch (%ld/%d).\n", world_mpi_rank,
+ FUNC, reported_total_reads, expected_total_reads);
}
}
}
@@ -6514,15 +6520,15 @@ trace_file_check(int metadata_write_strategy)
HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank,
FUNC, i);
if (expected_line_len == 0) {
- HDfprintf(stdout, "%d:%s: expected = \"%s\" %d\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, FUNC,
"<EMPTY>", expected_line_len);
- HDfprintf(stdout, "%d:%s: actual = \"%s\" %d\n", world_mpi_rank, FUNC, buffer,
+ HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, FUNC, buffer,
actual_line_len);
}
if (actual_line_len == 0) {
- HDfprintf(stdout, "%d:%s: expected = \"%s\" %d\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, FUNC,
(*expected_output)[i], expected_line_len);
- HDfprintf(stdout, "%d:%s: actual = \"%s\" %d\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, FUNC,
"<EMPTY>", actual_line_len);
}
}
@@ -6538,9 +6544,9 @@ trace_file_check(int metadata_write_strategy)
if (verbose) {
HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank,
FUNC, i);
- HDfprintf(stdout, "%d:%s: expected = \"%s\" %d\n", world_mpi_rank, FUNC,
+ HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, FUNC,
(*expected_output)[i], expected_line_len);
- HDfprintf(stdout, "%d:%s: actual = \"%s\" %d\n", world_mpi_rank, FUNC, buffer,
+ HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, FUNC, buffer,
actual_line_len);
}
}
@@ -6868,9 +6874,9 @@ main(int argc, char **argv)
* hang in the atexit post processing in which it may try to make MPI
* calls. By then, MPI calls may not work.
*/
- if (H5dont_atexit() < 0) {
+ if (H5dont_atexit() < 0)
HDprintf("%d:Failed to turn off atexit processing. Continue.\n", mpi_rank);
- };
+
H5open();
express_test = do_express_test();
@@ -6887,9 +6893,8 @@ main(int argc, char **argv)
}
#ifdef H5_HAVE_MPE
- if (MAINPROCESS) {
+ if (MAINPROCESS)
HDprintf(" Tests compiled for MPE.\n");
- }
virt_num_data_entries = MPE_VIRT_NUM_DATA_ENTIES;
#endif /* H5_HAVE_MPE */
@@ -6902,11 +6907,8 @@ main(int argc, char **argv)
}
if (mpi_size < 3) {
-
- if (MAINPROCESS) {
-
+ if (MAINPROCESS)
HDprintf(" Need at least 3 processes. Exiting.\n");
- }
goto finish;
}
@@ -6924,17 +6926,14 @@ main(int argc, char **argv)
/* setup file access property list with the world communicator */
if (FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS))) {
nerrors++;
- if (verbose) {
+ if (verbose)
HDfprintf(stdout, "%d:%s: H5Pcreate() failed 1.\n", world_mpi_rank, FUNC);
- }
}
if (H5Pset_fapl_mpio(fapl, world_mpi_comm, MPI_INFO_NULL) < 0) {
-
nerrors++;
- if (verbose) {
+ if (verbose)
HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 1.\n", world_mpi_rank, FUNC);
- }
}
/* fix the file names */
@@ -6942,9 +6941,8 @@ main(int argc, char **argv)
if (h5_fixname(FILENAME[u], fapl, filenames[u], sizeof(filenames[u])) == NULL) {
nerrors++;
- if (verbose) {
+ if (verbose)
HDfprintf(stdout, "%d:%s: h5_fixname() failed.\n", world_mpi_rank, FUNC);
- }
break;
}
}
@@ -6952,9 +6950,8 @@ main(int argc, char **argv)
/* close the fapl before we set it up again */
if (H5Pclose(fapl) < 0) {
nerrors++;
- if (verbose) {
+ if (verbose)
HDfprintf(stdout, "%d:%s: H5Pclose() failed.\n", world_mpi_rank, FUNC);
- }
}
/* now create the fapl again, excluding the server process. */
@@ -6963,32 +6960,25 @@ main(int argc, char **argv)
/* setup file access property list */
if (FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS))) {
nerrors++;
- if (verbose) {
+ if (verbose)
HDfprintf(stdout, "%d:%s: H5Pcreate() failed 2.\n", world_mpi_rank, FUNC);
- }
}
if (H5Pset_fapl_mpio(fapl, file_mpi_comm, MPI_INFO_NULL) < 0) {
-
nerrors++;
- if (verbose) {
+ if (verbose)
HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 2.\n", world_mpi_rank, FUNC);
- }
}
}
setup_rand();
max_nerrors = get_max_nerrors();
-
if (max_nerrors != 0) {
/* errors in setup -- no point in continuing */
-
- if (world_mpi_rank == 0) {
-
+ if (world_mpi_rank == 0)
HDfprintf(stdout, "Errors in test initialization. Exiting.\n");
- }
goto finish;
}
diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c
index ee411bd..fcbe83b 100644
--- a/testpar/t_cache_image.c
+++ b/testpar/t_cache_image.c
@@ -3916,9 +3916,8 @@ main(int argc, char **argv)
* hang in the atexit post processing in which it may try to make MPI
* calls. By then, MPI calls may not work.
*/
- if (H5dont_atexit() < 0) {
+ if (H5dont_atexit() < 0)
HDprintf("%d:Failed to turn off atexit processing. Continue.\n", mpi_rank);
- };
H5open();
@@ -3930,11 +3929,8 @@ main(int argc, char **argv)
}
if (mpi_size < 2) {
-
- if (mpi_rank == 0) {
-
+ if (mpi_rank == 0)
HDprintf(" Need at least 2 processes. Exiting.\n");
- }
goto finish;
}
diff --git a/testpar/t_chunk_alloc.c b/testpar/t_chunk_alloc.c
index b530d5f..865e1f8 100644
--- a/testpar/t_chunk_alloc.c
+++ b/testpar/t_chunk_alloc.c
@@ -85,7 +85,7 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_
/* Only MAINPROCESS should create the file. Others just wait. */
if (MAINPROCESS) {
nchunks = chunk_factor * mpi_size;
- dims[0] = nchunks * CHUNK_SIZE;
+ dims[0] = (hsize_t)(nchunks * CHUNK_SIZE);
/* Create the data space with unlimited dimensions. */
dataspace = H5Screate_simple(1, dims, maxdims);
VRFY((dataspace >= 0), "");
@@ -118,7 +118,7 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_
count[0] = 1;
stride[0] = 1;
block[0] = chunk_dims[0];
- offset[0] = (nchunks - 2) * chunk_dims[0];
+ offset[0] = (hsize_t)(nchunks - 2) * chunk_dims[0];
hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
VRFY((hrc >= 0), "");
@@ -223,7 +223,7 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti
dataspace = H5Dget_space(*dataset);
VRFY((dataspace >= 0), "");
- size[0] = nchunks * CHUNK_SIZE;
+ size[0] = (hsize_t)nchunks * CHUNK_SIZE;
switch (action) {
@@ -235,7 +235,7 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti
stride[0] = 1;
block[0] = chunk_dims[0];
for (i = 0; i < nchunks / mpi_size; i++) {
- offset[0] = (i * mpi_size + mpi_rank) * chunk_dims[0];
+ offset[0] = (hsize_t)(i * mpi_size + mpi_rank) * chunk_dims[0];
hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
VRFY((hrc >= 0), "");
@@ -364,7 +364,7 @@ verify_data(const char *filename, int chunk_factor, write_type write_pattern, in
/* reset buffer values */
HDmemset(buffer, -1, CHUNK_SIZE);
- offset[0] = i * chunk_dims[0];
+ offset[0] = (hsize_t)i * chunk_dims[0];
hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
VRFY((hrc >= 0), "");
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index fc117e3..651a392 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -609,7 +609,6 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap
size_t num_points; /* for point selection */
hsize_t *coords = NULL; /* for point selection */
hsize_t current_dims; /* for point selection */
- int i;
/* set up MPI parameters */
MPI_Comm_size(comm, &mpi_size);
@@ -627,7 +626,7 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap
VRFY((status >= 0), "");
/* setup dimensionality object */
- dims[0] = SPACE_DIM1 * mpi_size;
+ dims[0] = (hsize_t)(SPACE_DIM1 * mpi_size);
dims[1] = SPACE_DIM2;
/* allocate memory for data buffer */
@@ -660,7 +659,7 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap
VRFY((crp_plist >= 0), "");
/* Set up chunk information. */
- chunk_dims[0] = dims[0] / chunk_factor;
+ chunk_dims[0] = dims[0] / (hsize_t)chunk_factor;
/* to decrease the testing time, maintain bigger chunk size */
(chunk_factor == 1) ? (chunk_dims[1] = SPACE_DIM2) : (chunk_dims[1] = SPACE_DIM2 / 2);
@@ -1044,7 +1043,7 @@ ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t
stride[1] = 1;
count[0] = SPACE_DIM1;
count[1] = SPACE_DIM2;
- start[0] = mpi_rank * count[0];
+ start[0] = (hsize_t)mpi_rank * count[0];
start[1] = 0;
break;
@@ -1057,7 +1056,7 @@ ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t
stride[1] = 3;
count[0] = SPACE_DIM1 / (stride[0] * block[0]);
count[1] = (SPACE_DIM2) / (stride[1] * block[1]);
- start[0] = SPACE_DIM1 * mpi_rank;
+ start[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_rank;
start[1] = 0;
break;
@@ -1071,7 +1070,7 @@ ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t
stride[1] = 1;
count[0] = ((mpi_rank >= MAX(1, (mpi_size - 2))) ? 0 : SPACE_DIM1);
count[1] = SPACE_DIM2;
- start[0] = mpi_rank * count[0];
+ start[0] = (hsize_t)mpi_rank * count[0];
start[1] = 0;
break;
@@ -1083,15 +1082,15 @@ ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t
block[0] = 1;
count[0] = 2;
- stride[0] = SPACE_DIM1 * mpi_size / 4 + 1;
+ stride[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_size / 4 + 1;
block[1] = SPACE_DIM2;
count[1] = 1;
start[1] = 0;
stride[1] = 1;
if ((mpi_rank * 3) < (mpi_size * 2))
- start[0] = mpi_rank;
+ start[0] = (hsize_t)mpi_rank;
else
- start[0] = 1 + SPACE_DIM1 * mpi_size / 2 + (mpi_rank - 2 * mpi_size / 3);
+ start[0] = (hsize_t)(1 + SPACE_DIM1 * mpi_size / 2 + (mpi_rank - 2 * mpi_size / 3));
break;
case BYROW_SELECTINCHUNK:
@@ -1099,7 +1098,7 @@ ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t
block[0] = 1;
count[0] = 1;
- start[0] = mpi_rank * SPACE_DIM1;
+ start[0] = (hsize_t)(mpi_rank * SPACE_DIM1);
stride[0] = 1;
block[1] = SPACE_DIM2;
count[1] = 1;
@@ -1110,7 +1109,7 @@ ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t
default:
/* Unknown mode. Set it to cover the whole dataset. */
- block[0] = SPACE_DIM1 * mpi_size;
+ block[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_size;
block[1] = SPACE_DIM2;
stride[0] = block[0];
stride[1] = block[1];
diff --git a/testpar/t_coll_md_read.c b/testpar/t_coll_md_read.c
index 6f148ea..951882d 100644
--- a/testpar/t_coll_md_read.c
+++ b/testpar/t_coll_md_read.c
@@ -99,8 +99,8 @@ test_partial_no_selection_coll_md_read(void)
dataset_dims = HDmalloc(PARTIAL_NO_SELECTION_DATASET_NDIMS * sizeof(*dataset_dims));
VRFY((dataset_dims != NULL), "malloc succeeded");
- dataset_dims[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE * mpi_size;
- dataset_dims[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE * mpi_size;
+ dataset_dims[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_size;
+ dataset_dims[1] = (hsize_t)PARTIAL_NO_SELECTION_X_DIM_SCALE * (hsize_t)mpi_size;
max_dataset_dims[0] = H5S_UNLIMITED;
max_dataset_dims[1] = H5S_UNLIMITED;
@@ -125,12 +125,12 @@ test_partial_no_selection_coll_md_read(void)
*
* The ranks will write rows across the dataset.
*/
- start[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE * mpi_rank;
+ start[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_rank;
start[1] = 0;
stride[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE;
stride[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE;
count[0] = 1;
- count[1] = mpi_size;
+ count[1] = (hsize_t)mpi_size;
block[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE;
block[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE;
@@ -416,8 +416,8 @@ test_link_chunk_io_sort_chunk_issue(void)
dataset_dims = HDmalloc(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS * sizeof(*dataset_dims));
VRFY((dataset_dims != NULL), "malloc succeeded");
- dataset_dims[0] =
- LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE * mpi_size * LINK_CHUNK_IO_SORT_CHUNK_ISSUE_Y_DIM_SCALE;
+ dataset_dims[0] = (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE * (hsize_t)mpi_size *
+ (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_Y_DIM_SCALE;
max_dataset_dims[0] = H5S_UNLIMITED;
fspace_id = H5Screate_simple(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, dataset_dims, max_dataset_dims);
@@ -442,8 +442,8 @@ test_link_chunk_io_sort_chunk_issue(void)
* The ranks will write rows across the dataset.
*/
stride[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE;
- count[0] = (dataset_dims[0] / LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) / mpi_size;
- start[0] = count[0] * mpi_rank;
+ count[0] = (dataset_dims[0] / LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) / (hsize_t)mpi_size;
+ start[0] = count[0] * (hsize_t)mpi_rank;
block[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE;
VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index dbc466f..400dc36 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -48,61 +48,61 @@ slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t s
switch (mode) {
case BYROW:
/* Each process takes a slabs of rows. */
- block[0] = dim0 / mpi_size;
- block[1] = dim1;
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)dim1;
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
- start[0] = mpi_rank * block[0];
+ start[0] = (hsize_t)mpi_rank * block[0];
start[1] = 0;
if (VERBOSE_MED)
HDprintf("slab_set BYROW\n");
break;
case BYCOL:
/* Each process takes a block of columns. */
- block[0] = dim0;
- block[1] = dim1 / mpi_size;
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)(dim1 / mpi_size);
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
start[0] = 0;
- start[1] = mpi_rank * block[1];
+ start[1] = (hsize_t)mpi_rank * block[1];
if (VERBOSE_MED)
HDprintf("slab_set BYCOL\n");
break;
case ZROW:
/* Similar to BYROW except process 0 gets 0 row */
- block[0] = (mpi_rank ? dim0 / mpi_size : 0);
- block[1] = dim1;
+ block[0] = (hsize_t)(mpi_rank ? dim0 / mpi_size : 0);
+ block[1] = (hsize_t)dim1;
stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
- start[0] = (mpi_rank ? mpi_rank * block[0] : 0);
+ start[0] = (mpi_rank ? (hsize_t)mpi_rank * block[0] : 0);
start[1] = 0;
if (VERBOSE_MED)
HDprintf("slab_set ZROW\n");
break;
case ZCOL:
/* Similar to BYCOL except process 0 gets 0 column */
- block[0] = dim0;
- block[1] = (mpi_rank ? dim1 / mpi_size : 0);
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)(mpi_rank ? dim1 / mpi_size : 0);
stride[0] = block[0];
- stride[1] = (mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */
+ stride[1] = (hsize_t)(mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */
count[0] = 1;
count[1] = 1;
start[0] = 0;
- start[1] = (mpi_rank ? mpi_rank * block[1] : 0);
+ start[1] = (mpi_rank ? (hsize_t)mpi_rank * block[1] : 0);
if (VERBOSE_MED)
HDprintf("slab_set ZCOL\n");
break;
default:
/* Unknown mode. Set it to cover the whole dataset. */
HDprintf("unknown slab_set mode (%d)\n", mode);
- block[0] = dim0;
- block[1] = dim1;
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)dim1;
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
@@ -303,7 +303,7 @@ dataset_writeInd(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* ----------------------------------------
@@ -326,8 +326,8 @@ dataset_writeInd(void)
* and the slabs local to the MPI process.
* ------------------------------------------- */
/* setup dimensionality object */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
@@ -441,9 +441,9 @@ dataset_readInd(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* setup file access template */
@@ -555,7 +555,6 @@ dataset_writeAll(void)
size_t num_points; /* for point selection */
hsize_t *coords = NULL; /* for point selection */
hsize_t current_dims; /* for point selection */
- int i;
herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
@@ -572,12 +571,12 @@ dataset_writeAll(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* set up the coords array selection */
- num_points = dim1;
- coords = (hsize_t *)HDmalloc(dim1 * RANK * sizeof(hsize_t));
+ num_points = (size_t)dim1;
+ coords = (hsize_t *)HDmalloc((size_t)dim1 * (size_t)RANK * sizeof(hsize_t));
VRFY((coords != NULL), "coords malloc succeeded");
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* -------------------
@@ -600,8 +599,8 @@ dataset_writeAll(void)
* and create the dataset
* ------------------------- */
/* setup 2-D dimensionality object */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
@@ -889,16 +888,16 @@ dataset_writeAll(void)
if (data_array1)
free(data_array1);
- data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
block[0] = 1;
- block[1] = dim1;
+ block[1] = (hsize_t)dim1;
stride[0] = 1;
- stride[1] = dim1;
+ stride[1] = (hsize_t)dim1;
count[0] = 1;
count[1] = 1;
- start[0] = dim0 / mpi_size * mpi_rank;
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
start[1] = 0;
dataset_fill(start, block, data_array1);
@@ -944,7 +943,7 @@ dataset_writeAll(void)
/* Dataset6: point selection in File - Point selection in Memory*/
/* create a file dataspace independently */
- start[0] = dim0 / mpi_size * mpi_rank;
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
start[1] = 0;
point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
file_dataspace = H5Dget_space(dataset6);
@@ -981,7 +980,7 @@ dataset_writeAll(void)
/* Dataset7: point selection in File - All selection in Memory*/
/* create a file dataspace independently */
- start[0] = dim0 / mpi_size * mpi_rank;
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
start[1] = 0;
point_set(start, count, stride, block, num_points, coords, IN_ORDER);
file_dataspace = H5Dget_space(dataset7);
@@ -1071,7 +1070,6 @@ dataset_readAll(void)
size_t num_points; /* for point selection */
hsize_t *coords = NULL; /* for point selection */
- hsize_t current_dims; /* for point selection */
int i, j, k;
herr_t ret; /* Generic return value */
@@ -1089,14 +1087,14 @@ dataset_readAll(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* set up the coords array selection */
- num_points = dim1;
- coords = (hsize_t *)HDmalloc(dim0 * dim1 * RANK * sizeof(hsize_t));
+ num_points = (size_t)dim1;
+ coords = (hsize_t *)HDmalloc((size_t)dim0 * (size_t)dim1 * RANK * sizeof(hsize_t));
VRFY((coords != NULL), "coords malloc succeeded");
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* -------------------
@@ -1273,18 +1271,18 @@ dataset_readAll(void)
free(data_array1);
if (data_origin1)
free(data_origin1);
- data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
block[0] = 1;
- block[1] = dim1;
+ block[1] = (hsize_t)dim1;
stride[0] = 1;
- stride[1] = dim1;
+ stride[1] = (hsize_t)dim1;
count[0] = 1;
count[1] = 1;
- start[0] = dim0 / mpi_size * mpi_rank;
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
start[1] = 0;
dataset_fill(start, block, data_origin1);
@@ -1334,12 +1332,12 @@ dataset_readAll(void)
if (data_array1)
free(data_array1);
- data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
/* Dataset6: point selection in File - Point selection in Memory*/
/* create a file dataspace independently */
- start[0] = dim0 / mpi_size * mpi_rank;
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
start[1] = 0;
point_set(start, count, stride, block, num_points, coords, IN_ORDER);
file_dataspace = H5Dget_space(dataset6);
@@ -1380,7 +1378,7 @@ dataset_readAll(void)
if (data_array1)
free(data_array1);
- data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
/* Dataset7: point selection in memory - All selection in file*/
@@ -1390,12 +1388,12 @@ dataset_readAll(void)
ret = H5Sselect_all(file_dataspace);
VRFY((ret >= 0), "H5Sselect_all succeeded");
- num_points = dim0 * dim1;
+ num_points = (size_t)(dim0 * dim1);
k = 0;
for (i = 0; i < dim0; i++) {
for (j = 0; j < dim1; j++) {
- coords[k++] = i;
- coords[k++] = j;
+ coords[k++] = (hsize_t)i;
+ coords[k++] = (hsize_t)j;
}
}
mem_dataspace = H5Dget_space(dataset7);
@@ -1417,7 +1415,7 @@ dataset_readAll(void)
ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset7 succeeded");
- start[0] = dim0 / mpi_size * mpi_rank;
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
start[1] = 0;
ret = dataset_vrfy(start, count, stride, block, data_array1 + (dim0 / mpi_size * dim1 * mpi_rank),
data_origin1);
@@ -1503,11 +1501,11 @@ extend_writeInd(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* setup chunk-size. Make sure sizes are > 0 */
- chunk_dims[0] = chunkdim0;
- chunk_dims[1] = chunkdim1;
+ chunk_dims[0] = (hsize_t)chunkdim0;
+ chunk_dims[1] = (hsize_t)chunkdim1;
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* -------------------
@@ -1591,8 +1589,8 @@ extend_writeInd(void)
VRFY((mem_dataspace >= 0), "");
/* Extend its current dim sizes before writing */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
ret = H5Dset_extent(dataset1, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
@@ -1648,8 +1646,8 @@ extend_writeInd(void)
H5Sclose(file_dataspace);
/* Extend dataset2 and try again. Should succeed. */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
ret = H5Dset_extent(dataset2, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
@@ -1807,7 +1805,7 @@ extend_writeInd2(void)
* Write to the second half of the dataset
* -------------------------*/
for (i = 0; i < (int)orig_size; i++)
- written[i] = orig_size + i;
+ written[i] = (int)orig_size + i;
MESG("data array re-initialized");
if (VERBOSE_MED) {
MESG("writing at offset 10: ");
@@ -1881,11 +1879,11 @@ extend_readInd(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_array2 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_array2 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* -------------------
@@ -2063,11 +2061,11 @@ extend_writeAll(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* setup chunk-size. Make sure sizes are > 0 */
- chunk_dims[0] = chunkdim0;
- chunk_dims[1] = chunkdim1;
+ chunk_dims[0] = (hsize_t)chunkdim0;
+ chunk_dims[1] = (hsize_t)chunkdim1;
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* -------------------
@@ -2151,8 +2149,8 @@ extend_writeAll(void)
VRFY((mem_dataspace >= 0), "");
/* Extend its current dim sizes before writing */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
ret = H5Dset_extent(dataset1, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
@@ -2229,8 +2227,8 @@ extend_writeAll(void)
H5Sclose(file_dataspace);
/* Extend dataset2 and try again. Should succeed. */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
ret = H5Dset_extent(dataset2, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
@@ -2301,11 +2299,11 @@ extend_readAll(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_array2 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_array2 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* -------------------
@@ -2471,7 +2469,7 @@ compress_readAll(void)
hid_t dataspace; /* Dataspace ID */
hid_t dataset; /* Dataset ID */
int rank = 1; /* Dataspace rank */
- hsize_t dim = dim0; /* Dataspace dimensions */
+ hsize_t dim = (hsize_t)dim0; /* Dataspace dimensions */
unsigned u; /* Local index variable */
unsigned chunk_opts; /* Chunk options */
unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
@@ -2684,8 +2682,8 @@ none_selection_chunk(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* setup chunk-size. Make sure sizes are > 0 */
- chunk_dims[0] = chunkdim0;
- chunk_dims[1] = chunkdim1;
+ chunk_dims[0] = (hsize_t)chunkdim0;
+ chunk_dims[1] = (hsize_t)chunkdim1;
/* -------------------
* START AN HDF5 FILE
@@ -2715,8 +2713,8 @@ none_selection_chunk(void)
VRFY((ret >= 0), "H5Pset_chunk succeeded");
/* setup dimensionality object */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
@@ -2989,8 +2987,8 @@ test_actual_io_mode(int selection_mode)
VRFY((fid >= 0), "H5Fcreate succeeded");
/* Create the basic Space */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
@@ -3001,7 +2999,7 @@ test_actual_io_mode(int selection_mode)
/* If we are not testing contiguous datasets */
if (is_chunked) {
/* Set up chunk information. */
- chunk_dims[0] = dims[0] / mpi_size;
+ chunk_dims[0] = dims[0] / (hsize_t)mpi_size;
chunk_dims[1] = dims[1];
ret = H5Pset_chunk(dcpl, 2, chunk_dims);
VRFY((ret >= 0), "chunk creation property list succeeded");
@@ -3067,14 +3065,14 @@ test_actual_io_mode(int selection_mode)
}
else {
/* Select the first and the nth chunk in the nth column */
- block[0] = dim0 / mpi_size;
- block[1] = dim1 / mpi_size;
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)(dim1 / mpi_size);
count[0] = 2;
count[1] = 1;
- stride[0] = mpi_rank * block[0];
+ stride[0] = (hsize_t)mpi_rank * block[0];
stride[1] = 1;
start[0] = 0;
- start[1] = mpi_rank * block[1];
+ start[1] = (hsize_t)mpi_rank * block[1];
}
test_name = "Multi Chunk - Mixed";
@@ -3105,18 +3103,18 @@ test_actual_io_mode(int selection_mode)
if (mpi_rank == 0) {
/* Select the first chunk in the first column */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
- block[0] = block[0] / mpi_size;
+ block[0] = block[0] / (hsize_t)mpi_size;
}
else {
/* Select the first and the nth chunk in the nth column */
- block[0] = dim0 / mpi_size;
- block[1] = dim1 / mpi_size;
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)(dim1 / mpi_size);
count[0] = 2;
count[1] = 1;
- stride[0] = mpi_rank * block[0];
+ stride[0] = (hsize_t)mpi_rank * block[0];
stride[1] = 1;
start[0] = 0;
- start[1] = mpi_rank * block[1];
+ start[1] = (hsize_t)mpi_rank * block[1];
}
/* If the testname was not already set by the RESET case */
@@ -3189,7 +3187,7 @@ test_actual_io_mode(int selection_mode)
length = dim0 * dim1;
/* Allocate and initialize the buffer */
- buffer = (int *)HDmalloc(sizeof(int) * length);
+ buffer = (int *)HDmalloc(sizeof(int) * (size_t)length);
VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
for (i = 0; i < length; i++)
buffer[i] = i;
@@ -3437,7 +3435,6 @@ test_no_collective_cause_mode(int selection_mode)
uint32_t no_collective_cause_global_write = 0;
uint32_t no_collective_cause_global_read = 0;
uint32_t no_collective_cause_global_expected = 0;
- hsize_t coord[NELM][RANK];
const char *filename;
const char *test_name;
@@ -3525,8 +3522,8 @@ test_no_collective_cause_mode(int selection_mode)
dims[1] = COL_FACTOR * 6;
}
else {
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
}
sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
@@ -3547,7 +3544,7 @@ test_no_collective_cause_mode(int selection_mode)
/* If we are not testing contiguous datasets */
if (is_chunked) {
/* Set up chunk information. */
- chunk_dims[0] = dims[0] / mpi_size;
+ chunk_dims[0] = dims[0] / (hsize_t)mpi_size;
chunk_dims[1] = dims[1];
ret = H5Pset_chunk(dcpl, 2, chunk_dims);
VRFY((ret >= 0), "chunk creation property list succeeded");
@@ -3629,7 +3626,7 @@ test_no_collective_cause_mode(int selection_mode)
length = dims[0] * dims[1];
/* Allocate and initialize the buffer */
- buffer = (int *)HDmalloc(sizeof(int) * length);
+ buffer = (int *)HDmalloc(sizeof(int) * (size_t)length);
VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
for (i = 0; i < length; i++)
buffer[i] = i;
@@ -3821,8 +3818,8 @@ test_no_collective_cause_mode_filter(int selection_mode)
}
/* Create the basic Space */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
@@ -3839,7 +3836,7 @@ test_no_collective_cause_mode_filter(int selection_mode)
/* If we are not testing contiguous datasets */
if (is_chunked) {
/* Set up chunk information. */
- chunk_dims[0] = dims[0] / mpi_size;
+ chunk_dims[0] = dims[0] / (hsize_t)mpi_size;
chunk_dims[1] = dims[1];
ret = H5Pset_chunk(dcpl, 2, chunk_dims);
VRFY((ret >= 0), "chunk creation property list succeeded");
@@ -4055,10 +4052,10 @@ dataset_atomicity(void)
buf_size = dim0 * dim1;
/* allocate memory for data buffer */
- write_buf = (int *)HDcalloc(buf_size, sizeof(int));
+ write_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
VRFY((write_buf != NULL), "write_buf HDcalloc succeeded");
/* allocate memory for data buffer */
- read_buf = (int *)HDcalloc(buf_size, sizeof(int));
+ read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
VRFY((read_buf != NULL), "read_buf HDcalloc succeeded");
/* setup file access template */
@@ -4074,8 +4071,8 @@ dataset_atomicity(void)
VRFY((ret >= 0), "H5Pclose succeeded");
/* setup dimensionality object */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
@@ -4214,10 +4211,10 @@ dataset_atomicity(void)
VRFY((dataset2 >= 0), "H5Dopen2 succeeded");
/* allocate memory for data buffer */
- write_buf = (int *)HDcalloc(buf_size, sizeof(int));
+ write_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
VRFY((write_buf != NULL), "write_buf HDcalloc succeeded");
/* allocate memory for data buffer */
- read_buf = (int *)HDcalloc(buf_size, sizeof(int));
+ read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
VRFY((read_buf != NULL), "read_buf HDcalloc succeeded");
for (i = 0; i < buf_size; i++) {
@@ -4233,12 +4230,12 @@ dataset_atomicity(void)
VRFY((ret >= 0), "atomcity get failed");
VRFY((atomicity == TRUE), "atomcity set failed");
- block[0] = dim0 / mpi_size - 1;
- block[1] = dim1 / mpi_size - 1;
+ block[0] = (hsize_t)(dim0 / mpi_size - 1);
+ block[1] = (hsize_t)(dim1 / mpi_size - 1);
stride[0] = block[0] + 1;
stride[1] = block[1] + 1;
- count[0] = mpi_size;
- count[1] = mpi_size;
+ count[0] = (hsize_t)mpi_size;
+ count[1] = (hsize_t)mpi_size;
start[0] = 0;
start[1] = 0;
@@ -4296,19 +4293,19 @@ dataset_atomicity(void)
compare = 5;
for (i = 0; i < dim0; i++) {
- if (i >= mpi_rank * (block[0] + 1)) {
+ if (i >= mpi_rank * ((int)block[0] + 1)) {
break;
}
- if ((i + 1) % (block[0] + 1) == 0) {
+ if ((i + 1) % ((int)block[0] + 1) == 0) {
k += dim1;
continue;
}
for (j = 0; j < dim1; j++) {
- if (j >= mpi_rank * (block[1] + 1)) {
- k += dim1 - mpi_rank * (block[1] + 1);
+ if (j >= mpi_rank * ((int)block[1] + 1)) {
+ k += dim1 - mpi_rank * ((int)block[1] + 1);
break;
}
- if ((j + 1) % (block[1] + 1) == 0) {
+ if ((j + 1) % ((int)block[1] + 1) == 0) {
k++;
continue;
}
diff --git a/testpar/t_file.c b/testpar/t_file.c
index 4365aef..7807052 100644
--- a/testpar/t_file.c
+++ b/testpar/t_file.c
@@ -190,7 +190,7 @@ test_page_buffer_access(void)
fapl_self = create_faccess_plist(MPI_COMM_SELF, MPI_INFO_NULL, facc_type);
- ret = H5Pset_page_buffer_size(fapl_self, sizeof(int)*1000, 0, 0);
+ ret = H5Pset_page_buffer_size(fapl_self, sizeof(int) * 1000, 0, 0);
VRFY((ret == 0), "");
/* collective metadata writes do not work with page buffering */
ret = H5Pset_coll_metadata_write(fapl_self, FALSE);
@@ -210,49 +210,49 @@ test_page_buffer_access(void)
VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process");
/* allocate space for 200 raw elements */
- raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int)*(size_t)num_elements);
+ raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int) * (size_t)num_elements);
VRFY((raw_addr != HADDR_UNDEF), "");
/* allocate space for 200 metadata elements */
- meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int)*(size_t)num_elements);
+ meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int) * (size_t)num_elements);
VRFY((meta_addr != HADDR_UNDEF), "");
page_count = 0;
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, data);
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*(size_t)num_elements, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * (size_t)num_elements, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
/* update the first 50 elements */
- for(i=0 ; i<50 ; i++)
+ for (i = 0; i < 50; i++)
data[i] = i;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
H5Eprint2(H5E_DEFAULT, stderr);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
VRFY((ret == 0), "");
page_count += 2;
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
/* update the second 50 elements */
- for(i=0 ; i<50 ; i++)
- data[i] = i+50;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*50), sizeof(int)*50, data);
+ for (i = 0; i < 50; i++)
+ data[i] = i + 50;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*50), sizeof(int)*50, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
/* update 100 - 200 */
- for(i=0 ; i<100 ; i++)
- data[i] = i+100;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*100), sizeof(int)*100, data);
+ for (i = 0; i < 100; i++)
+ data[i] = i + 100;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*100), sizeof(int)*100, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
@@ -260,37 +260,41 @@ test_page_buffer_access(void)
VRFY((ret == 0), "");
/* read elements 0 - 200 */
- ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*200, data);
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 200, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i=0; i < 200; i++)
+ for (i = 0; i < 200; i++)
VRFY((data[i] == i), "Read different values than written");
ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*200, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i=0; i < 200; i++)
+ for (i = 0; i < 200; i++)
VRFY((data[i] == i), "Read different values than written");
/* read elements 0 - 50 */
- ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i=0; i < 50; i++)
+ for (i = 0; i < 50; i++)
VRFY((data[i] == i), "Read different values than written");
- ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i=0; i < 50; i++)
+ for (i = 0; i < 50; i++)
VRFY((data[i] == i), "Read different values than written");
/* close the file */
ret = H5Fclose(file_id);
VRFY((ret >= 0), "H5Fclose succeeded");
ret = H5Pclose(fapl_self);
- VRFY((ret>=0), "H5Pclose succeeded");
+ VRFY((ret >= 0), "H5Pclose succeeded");
/* Pop API context */
- if(api_ctx_pushed) { ret = H5CX_pop(); VRFY((ret == 0), "H5CX_pop()"); api_ctx_pushed = FALSE; }
+ if (api_ctx_pushed) {
+ ret = H5CX_pop();
+ VRFY((ret == 0), "H5CX_pop()");
+ api_ctx_pushed = FALSE;
+ }
}
#endif
@@ -484,19 +488,19 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
grp_id = H5Gcreate2(file_id, "GROUP", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((grp_id >= 0), "");
- dims[0] = ROW_FACTOR * mpi_size;
- dims[1] = COL_FACTOR * mpi_size;
+ dims[0] = (hsize_t)(ROW_FACTOR * mpi_size);
+ dims[1] = (hsize_t)(COL_FACTOR * mpi_size);
sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* Each process takes a slabs of rows. */
- block[0] = dims[0] / mpi_size;
+ block[0] = dims[0] / (hsize_t)mpi_size;
block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
- start[0] = mpi_rank * block[0];
+ start[0] = (hsize_t)mpi_rank * block[0];
start[1] = 0;
num_elements = block[0] * block[1];
@@ -645,17 +649,17 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t
grp_id = H5Gopen2(file_id, "GROUP", H5P_DEFAULT);
VRFY((grp_id >= 0), "");
- dims[0] = ROW_FACTOR * mpi_size;
- dims[1] = COL_FACTOR * mpi_size;
+ dims[0] = (hsize_t)(ROW_FACTOR * mpi_size);
+ dims[1] = (hsize_t)(COL_FACTOR * mpi_size);
/* Each process takes a slabs of rows. */
- block[0] = dims[0] / mpi_size;
+ block[0] = dims[0] / (hsize_t)mpi_size;
block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
- start[0] = mpi_rank * block[0];
+ start[0] = (hsize_t)mpi_rank * block[0];
start[1] = 0;
num_elements = block[0] * block[1];
@@ -677,8 +681,8 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t
ndims = H5Sget_simple_extent_dims(sid, dims, NULL);
VRFY((ndims == 2), "H5Sget_simple_extent_dims succeeded");
- VRFY(dims[0] == ROW_FACTOR * mpi_size, "Wrong dataset dimensions");
- VRFY(dims[1] == COL_FACTOR * mpi_size, "Wrong dataset dimensions");
+ VRFY(dims[0] == (hsize_t)(ROW_FACTOR * mpi_size), "Wrong dataset dimensions");
+ VRFY(dims[1] == (hsize_t)(COL_FACTOR * mpi_size), "Wrong dataset dimensions");
ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
@@ -691,7 +695,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t
ret = H5Sclose(sid);
VRFY((ret == 0), "");
- for (i = 0; i < num_elements; i++)
+ for (i = 0; i < (int)num_elements; i++)
VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed");
}
diff --git a/testpar/t_filter_read.c b/testpar/t_filter_read.c
index 9d07f23..5c0d10f 100644
--- a/testpar/t_filter_read.c
+++ b/testpar/t_filter_read.c
@@ -71,10 +71,10 @@ filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size)
hs_size[0] = size[0] = HS_DIM1;
hs_size[1] = HS_DIM2;
- size[1] = hs_size[1] * mpi_size;
+ size[1] = hs_size[1] * (hsize_t)mpi_size;
hs_offset[0] = 0;
- hs_offset[1] = hs_size[1] * mpi_rank;
+ hs_offset[1] = hs_size[1] * (hsize_t)mpi_rank;
/* Create the data space */
sid = H5Screate_simple(2, size, NULL);
@@ -208,7 +208,9 @@ test_filter_read(void)
unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
herr_t hrc;
const char * filename;
- hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */
+#ifdef H5_HAVE_FILTER_FLETCHER32
+ hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */
+#endif
#ifdef H5_HAVE_FILTER_DEFLATE
hsize_t deflate_size; /* Size of dataset with deflate filter */
diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c
index 37479b3..5153bce 100644
--- a/testpar/t_filters_parallel.c
+++ b/testpar/t_filters_parallel.c
@@ -255,8 +255,9 @@ test_write_one_chunk_filtered_dataset(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -418,8 +419,9 @@ test_write_filtered_dataset_no_overlap(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -578,8 +580,9 @@ test_write_filtered_dataset_overlap(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -746,8 +749,9 @@ test_write_filtered_dataset_single_no_selection(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -1202,8 +1206,9 @@ test_write_filtered_dataset_interleaved_write(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -1379,8 +1384,10 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
start[2] = (hsize_t)mpi_rank;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ "
- "%llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE
+ ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ]\n",
mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1],
start[2], block[0], block[1], block[2]);
HDfflush(stdout);
@@ -1547,8 +1554,10 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
start[2] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ "
- "%llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE
+ ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ]\n",
mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1],
start[2], block[0], block[1], block[2]);
HDfflush(stdout);
@@ -1714,8 +1723,10 @@ test_write_3d_filtered_dataset_overlap(void)
start[2] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ "
- "%llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE
+ ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ]\n",
mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1],
start[2], block[0], block[1], block[2]);
HDfflush(stdout);
@@ -1900,8 +1911,9 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
start[1] = ((hsize_t)mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -2080,8 +2092,9 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -2279,8 +2292,9 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
start[1] = ((hsize_t)mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -2471,8 +2485,9 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -2687,8 +2702,9 @@ test_read_one_chunk_filtered_dataset(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -2890,8 +2906,9 @@ test_read_filtered_dataset_no_overlap(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -3094,8 +3111,9 @@ test_read_filtered_dataset_overlap(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -3324,8 +3342,9 @@ test_read_filtered_dataset_single_no_selection(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -3906,8 +3925,9 @@ test_read_filtered_dataset_interleaved_read(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -4135,8 +4155,9 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
start[2] = (hsize_t)mpi_rank;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -4351,8 +4372,9 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
start[2] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -4574,8 +4596,9 @@ test_read_3d_filtered_dataset_overlap(void)
start[2] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -4807,8 +4830,9 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
start[1] = ((hsize_t)mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -5035,8 +5059,9 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -5265,8 +5290,9 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
start[1] = ((hsize_t)mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -5502,8 +5528,9 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -5803,8 +5830,10 @@ test_write_parallel_read_serial(void)
offset[2] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ "
- "%llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], offset[ %" PRIuHSIZE
+ ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ]\n",
mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0],
offset[1], offset[2], block[0], block[1], block[2]);
HDfflush(stdout);
@@ -5973,8 +6002,9 @@ test_shrinking_growing_chunks(void)
start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], "
- "block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index 5bacc12..a75ffe3 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -128,12 +128,6 @@ zero_dim_dset(void)
/*
* Example of using PHDF5 to create ndatasets datasets. Each process write
* a slab of array to the file.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/11/04
*/
void
multiple_dset_write(void)
@@ -161,7 +155,7 @@ multiple_dset_write(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- outme = HDmalloc((size_t)(size * size * sizeof(double)));
+ outme = HDmalloc((size_t)size * (size_t)size * sizeof(double));
VRFY((outme != NULL), "HDmalloc succeeded for outme");
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
@@ -216,12 +210,6 @@ multiple_dset_write(void)
}
/* Example of using PHDF5 to create, write, and read compact dataset.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/11/04
*/
void
compact_dataset(void)
@@ -241,15 +229,15 @@ compact_dataset(void)
size = get_size();
for (i = 0; i < DIM; i++)
- file_dims[i] = size;
+ file_dims[i] = (hsize_t)size;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- outme = HDmalloc((size_t)(size * size * sizeof(double)));
+ outme = HDmalloc((size_t)((size_t)size * (size_t)size * sizeof(double)));
VRFY((outme != NULL), "HDmalloc succeeded for outme");
- inme = HDmalloc((size_t)(size * size * sizeof(double)));
+ inme = HDmalloc((size_t)size * (size_t)size * sizeof(double));
VRFY((outme != NULL), "HDmalloc succeeded for inme");
filename = GetTestParameters();
@@ -351,14 +339,6 @@ compact_dataset(void)
/*
* Example of using PHDF5 to create, write, and read dataset and attribute
* of Null dataspace.
- *
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
- *
- * JRM - 8/24/04
*/
void
null_dataset(void)
@@ -463,14 +443,6 @@ null_dataset(void)
* Actual data is _not_ written to these datasets. Dataspaces are exact
* sizes(2GB, 4GB, etc.), but the metadata for the file pushes the file over
* the boundary of interest.
- *
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
- *
- * JRM - 8/11/04
*/
void
big_dataset(void)
@@ -592,16 +564,6 @@ big_dataset(void)
/* Example of using PHDF5 to read a partial written dataset. The dataset does
* not have actual data written to the entire raw data area and relies on the
* default fill value of zeros to work correctly.
- *
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
- *
- * Also added code to free dynamically allocated buffers.
- *
- * JRM - 8/11/04
*/
void
dataset_fillvalue(void)
@@ -728,7 +690,7 @@ dataset_fillvalue(void)
* Each process writes 1 row of data. Thus last row is not written.
*/
/* Create hyperslabs in memory and file dataspaces */
- req_start[0] = mpi_rank;
+ req_start[0] = (hsize_t)mpi_rank;
ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, req_start, NULL, req_count, NULL);
VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace");
ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, req_start, NULL, req_count, NULL);
@@ -854,12 +816,6 @@ collective_group_write_independent_group_read(void)
/* Write multiple groups with a chunked dataset in each group collectively.
* These groups and datasets are for testing independent read later.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/16/04
*/
void
collective_group_write(void)
@@ -889,7 +845,7 @@ collective_group_write(void)
chunk_size[0] = (hsize_t)(size / 2);
chunk_size[1] = (hsize_t)(size / 2);
- outme = HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
+ outme = HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
VRFY((outme != NULL), "HDmalloc succeeded for outme");
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
@@ -991,16 +947,6 @@ independent_group_read(void)
}
/* Open and read datasets and compare data
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * Also added code to verify the results of dynamic memory
- * allocations, and to free dynamically allocated memeory
- * when we are done with it.
- *
- * JRM - 8/16/04
*/
static void
group_dataset_read(hid_t fid, int mpi_rank, int m)
@@ -1013,10 +959,10 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
size = get_size();
- indata = (DATATYPE *)HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
+ indata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
VRFY((indata != NULL), "HDmalloc succeeded for indata");
- outdata = (DATATYPE *)HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
+ outdata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
VRFY((outdata != NULL), "HDmalloc succeeded for outdata");
/* open every group under root group. */
@@ -1033,9 +979,8 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
/* this is the original value */
for (i = 0; i < size; i++)
- for (j = 0; j < size; j++) {
+ for (j = 0; j < size; j++)
outdata[(i * size) + j] = (i + j) * 1000 + mpi_rank;
- }
/* compare the original value(outdata) to the value in file(indata).*/
ret = check_value(indata, outdata, size);
@@ -1074,11 +1019,6 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
* + means the group has attribute(s).
* ' means the datasets in the groups have attribute(s).
*
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/16/04
*/
void
multiple_group_write(void)
@@ -1162,12 +1102,6 @@ multiple_group_write(void)
/*
* In a group, creates NDATASETS datasets. Each process writes a hyperslab
* of a data array to the file.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/16/04
*/
static void
write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
@@ -1183,7 +1117,7 @@ write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
size = get_size();
- outme = HDmalloc((size_t)(size * size * sizeof(double)));
+ outme = HDmalloc((size_t)size * (size_t)size * sizeof(double));
VRFY((outme != NULL), "HDmalloc succeeded for outme");
for (n = 0; n < NDATASET; n++) {
@@ -1241,12 +1175,6 @@ create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter)
/*
* This function is to verify the data from multiple group testing. It opens
* every dataset in every group and check their correctness.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/11/04
*/
void
multiple_group_read(void)
@@ -1321,12 +1249,6 @@ multiple_group_read(void)
/*
* This function opens all the datasets in a certain, checks the data using
* dataset_vrfy function.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/11/04
*/
static int
read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
@@ -1341,10 +1263,10 @@ read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
size = get_size();
- indata = (DATATYPE *)HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
+ indata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
VRFY((indata != NULL), "HDmalloc succeeded for indata");
- outdata = (DATATYPE *)HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
+ outdata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
VRFY((outdata != NULL), "HDmalloc succeeded for outdata");
for (n = 0; n < NDATASET; n++) {
@@ -1477,12 +1399,6 @@ read_attribute(hid_t obj_id, int this_type, int num)
/* This functions compares the original data with the read-in data for its
* hyperslab part only by process ID.
- *
- * Changes: Modified function to use a passed in size parameter
- * instead of the old SIZE #define. This should let us
- * run with an arbitrary number of processes.
- *
- * JRM - 8/16/04
*/
static int
check_value(DATATYPE *indata, DATATYPE *outdata, int size)
@@ -1497,8 +1413,8 @@ check_value(DATATYPE *indata, DATATYPE *outdata, int size)
get_slab(chunk_origin, chunk_dims, count, NULL, size);
- indata += chunk_origin[0] * size;
- outdata += chunk_origin[0] * size;
+ indata += chunk_origin[0] * (hsize_t)size;
+ outdata += chunk_origin[0] * (hsize_t)size;
for (i = chunk_origin[0]; i < (chunk_origin[0] + chunk_dims[0]); i++)
for (j = chunk_origin[1]; j < (chunk_origin[1] + chunk_dims[1]); j++) {
if (*indata != *outdata)
@@ -1515,12 +1431,6 @@ check_value(DATATYPE *indata, DATATYPE *outdata, int size)
}
/* Decide the portion of data chunk in dataset by process ID.
- *
- * Changes: Modified function to use a passed in size parameter
- * instead of the old SIZE #define. This should let us
- * run with an arbitrary number of processes.
- *
- * JRM - 8/11/04
*/
static void
@@ -1532,15 +1442,15 @@ get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[], hsize_t
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
if (chunk_origin != NULL) {
- chunk_origin[0] = mpi_rank * (size / mpi_size);
+ chunk_origin[0] = (hsize_t)mpi_rank * (hsize_t)(size / mpi_size);
chunk_origin[1] = 0;
}
if (chunk_dims != NULL) {
- chunk_dims[0] = size / mpi_size;
- chunk_dims[1] = size;
+ chunk_dims[0] = (hsize_t)(size / mpi_size);
+ chunk_dims[1] = (hsize_t)size;
}
if (file_dims != NULL)
- file_dims[0] = file_dims[1] = size;
+ file_dims[0] = file_dims[1] = (hsize_t)size;
if (count != NULL)
count[0] = count[1] = 1;
}
@@ -1562,8 +1472,6 @@ get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[], hsize_t
* This function reproduces this situation. At present the test hangs
* on failure.
* JRM - 9/13/04
- *
- * Changes: None.
*/
#define N 4
@@ -1807,10 +1715,6 @@ io_mode_confusion(void)
* cache clients will have to construct on disk images on demand.
*
* JRM -- 10/13/10
- *
- * Changes:
- * Break it into two parts, a writer to write the file and a reader
- * the correctness of the writer. AKC -- 2010/10/27
*/
#define NUM_DATA_SETS 4
diff --git a/testpar/t_mpi.c b/testpar/t_mpi.c
index 67ec6cb..a883f55 100644
--- a/testpar/t_mpi.c
+++ b/testpar/t_mpi.c
@@ -513,7 +513,7 @@ test_mpio_1wMr(char *filename, int special_request)
* ==================================================*/
irank = 0;
for (i = 0; i < DIMSIZE; i++)
- writedata[i] = irank * DIMSIZE + i;
+ writedata[i] = (uint8_t)(irank * DIMSIZE + i);
mpi_off = irank * DIMSIZE;
/* Only one process writes */
diff --git a/testpar/t_prestart.c b/testpar/t_prestart.c
index 8c88d47..384fb1b 100644
--- a/testpar/t_prestart.c
+++ b/testpar/t_prestart.c
@@ -32,7 +32,7 @@ main(int argc, char **argv)
hid_t fapl, sid, mem_dataspace;
herr_t ret;
char filename[1024];
- int mpi_size, mpi_rank, ndims, i, j;
+ int mpi_size, mpi_rank, ndims;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
hsize_t dims[RANK];
@@ -40,6 +40,7 @@ main(int argc, char **argv)
hsize_t count[RANK];
hsize_t stride[RANK];
hsize_t block[RANK];
+ hsize_t i, j;
DATATYPE *data_array = NULL, *dataptr; /* data buffer */
MPI_Init(&argc, &argv);
@@ -70,21 +71,21 @@ main(int argc, char **argv)
ndims = H5Sget_simple_extent_dims(sid, dims, NULL);
VRFY((ndims == 2), "H5Sget_simple_extent_dims succeeded");
- VRFY(dims[0] == ROW_FACTOR * mpi_size, "Wrong dataset dimensions");
- VRFY(dims[1] == COL_FACTOR * mpi_size, "Wrong dataset dimensions");
+ VRFY(dims[0] == (hsize_t)(ROW_FACTOR * mpi_size), "Wrong dataset dimensions");
+ VRFY(dims[1] == (hsize_t)(COL_FACTOR * mpi_size), "Wrong dataset dimensions");
/* allocate memory for data buffer */
data_array = (DATATYPE *)HDmalloc(dims[0] * dims[1] * sizeof(DATATYPE));
VRFY((data_array != NULL), "data_array HDmalloc succeeded");
/* Each process takes a slabs of rows. */
- block[0] = dims[0] / mpi_size;
+ block[0] = dims[0] / (hsize_t)mpi_size;
block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
- start[0] = mpi_rank * block[0];
+ start[0] = (hsize_t)mpi_rank * block[0];
start[1] = 0;
ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
@@ -104,8 +105,8 @@ main(int argc, char **argv)
for (j = 0; j < block[1]; j++) {
if (*dataptr != mpi_rank + 1) {
HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
- (unsigned long)i, (unsigned long)j, (unsigned long)(i + start[0]),
- (unsigned long)(j + start[1]), mpi_rank + 1, *(dataptr));
+ (unsigned long)i, (unsigned long)j, (unsigned long)((hsize_t)i + start[0]),
+ (unsigned long)((hsize_t)j + start[1]), mpi_rank + 1, *(dataptr));
nerrors++;
}
dataptr++;
diff --git a/testpar/t_prop.c b/testpar/t_prop.c
index e88c656..bcde00e 100644
--- a/testpar/t_prop.c
+++ b/testpar/t_prop.c
@@ -53,7 +53,7 @@ test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc)
void *rbuf;
MPI_Recv(&recv_size, 1, MPI_INT, 0, 123, MPI_COMM_WORLD, &status);
- buf_size = recv_size;
+ buf_size = (size_t)recv_size;
rbuf = (uint8_t *)HDmalloc(buf_size);
MPI_Recv(rbuf, recv_size, MPI_BYTE, 0, 124, MPI_COMM_WORLD, &status);
diff --git a/testpar/t_pshutdown.c b/testpar/t_pshutdown.c
index a426c09..b8028a4 100644
--- a/testpar/t_pshutdown.c
+++ b/testpar/t_pshutdown.c
@@ -65,8 +65,8 @@ main(int argc, char **argv)
grp_id = H5Gcreate2(file_id, "Group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((grp_id >= 0), "H5Gcreate succeeded");
- dims[0] = ROW_FACTOR * mpi_size;
- dims[1] = COL_FACTOR * mpi_size;
+ dims[0] = (hsize_t)ROW_FACTOR * (hsize_t)mpi_size;
+ dims[1] = (hsize_t)COL_FACTOR * (hsize_t)mpi_size;
sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
@@ -78,13 +78,13 @@ main(int argc, char **argv)
VRFY((data_array != NULL), "data_array HDmalloc succeeded");
/* Each process takes a slabs of rows. */
- block[0] = dims[0] / mpi_size;
+ block[0] = dims[0] / (hsize_t)mpi_size;
block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
- start[0] = mpi_rank * block[0];
+ start[0] = (hsize_t)mpi_rank * block[0];
start[1] = 0;
/* put some trivial data in the data_array */
diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c
index 0f35293..16226ee 100644
--- a/testpar/t_shapesame.c
+++ b/testpar/t_shapesame.c
@@ -1701,21 +1701,6 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
*
* Programmer: JRM -- 9/18/09
*
- * Modifications:
- *
- * JRM -- 9/16/10
- * Added express_test parameter. Use it to control whether
- * we set up the chunks so that no chunk is shared between
- * processes, and also whether we set an alignment when we
- * create the test file.
- *
- * JRM -- 8/11/11
- * Refactored function heavily & broke it into six functions.
- * Added the skips_ptr, max_skips, total_tests_ptr,
- * tests_run_ptr, and tests_skiped_ptr parameters to support
- * skipping portions of the test according to the express
- * test value.
- *
*-------------------------------------------------------------------------
*/
@@ -1905,20 +1890,6 @@ contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const i
*
* Programmer: JRM -- 9/18/09
*
- * Modifications:
- *
- * Modified function to take a sample of the run times
- * of the different tests, and skip some of them if
- * run times are too long.
- *
- * We need to do this because Lustre runns very slowly
- * if two or more processes are banging on the same
- * block of memory.
- * JRM -- 9/10/10
- * Break this one big test into 4 smaller tests according
- * to {independent,collective}x{contigous,chunked} datasets.
- * AKC -- 2010/01/14
- *
*-------------------------------------------------------------------------
*/
@@ -2038,8 +2009,8 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
}
if ((MAINPROCESS) && (tests_skipped > 0)) {
- HDfprintf(stdout, " %lld of %lld subtests skipped to expedite testing.\n", tests_skipped,
- total_tests);
+ HDfprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n",
+ tests_skipped, total_tests);
}
return;
@@ -3671,14 +3642,6 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
*
* Programmer: JRM -- 10/10/09
*
- * Modifications:
- *
- * JRM -- 9/16/10
- * Added the express_test parameter. Use it to control
- * whether we set an alignment, and whether we allocate
- * chunks such that no two processes will normally touch
- * the same chunk.
- *
*-------------------------------------------------------------------------
*/
@@ -3854,20 +3817,6 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const i
*
* Programmer: JRM -- 9/18/09
*
- * Modifications:
- *
- * Modified function to take a sample of the run times
- * of the different tests, and skip some of them if
- * run times are too long.
- *
- * We need to do this because Lustre runns very slowly
- * if two or more processes are banging on the same
- * block of memory.
- * JRM -- 9/10/10
- * Break this one big test into 4 smaller tests according
- * to {independent,collective}x{contigous,chunked} datasets.
- * AKC -- 2010/01/17
- *
*-------------------------------------------------------------------------
*/
@@ -3983,16 +3932,16 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
} /* end of switch(sstest_type) */
#if CONTIG_HS_DR_PIO_TEST__DEBUG
if ((MAINPROCESS) && (tests_skipped > 0)) {
- HDfprintf(stdout, " run/skipped/total = %lld/%lld/%lld.\n", tests_run, tests_skipped,
- total_tests);
+ HDfprintf(stdout, " run/skipped/total = %" PRId64 "/%" PRId64 "/%" PRId64 ".\n",
+ tests_run, tests_skipped, total_tests);
}
#endif /* CONTIG_HS_DR_PIO_TEST__DEBUG */
}
}
if ((MAINPROCESS) && (tests_skipped > 0)) {
- HDfprintf(stdout, " %lld of %lld subtests skipped to expedite testing.\n", tests_skipped,
- total_tests);
+ HDfprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n",
+ tests_skipped, total_tests);
}
return;
diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c
index 293a945..32d0265 100644
--- a/testpar/t_span_tree.c
+++ b/testpar/t_span_tree.c
@@ -238,17 +238,17 @@ coll_write_test(int chunk_factor)
* Buffers' initialization.
*/
- mdim1[0] = MSPACE1_DIM * mpi_size;
+ mdim1[0] = (hsize_t)(MSPACE1_DIM * mpi_size);
mdim[0] = MSPACE_DIM1;
- mdim[1] = MSPACE_DIM2 * mpi_size;
+ mdim[1] = (hsize_t)(MSPACE_DIM2 * mpi_size);
fsdim[0] = FSPACE_DIM1;
- fsdim[1] = FSPACE_DIM2 * mpi_size;
+ fsdim[1] = (hsize_t)(FSPACE_DIM2 * mpi_size);
- vector = (int *)HDmalloc(sizeof(int) * mdim1[0] * mpi_size);
- matrix_out = (int *)HDmalloc(sizeof(int) * mdim[0] * mdim[1] * mpi_size);
- matrix_out1 = (int *)HDmalloc(sizeof(int) * mdim[0] * mdim[1] * mpi_size);
+ vector = (int *)HDmalloc(sizeof(int) * (size_t)mdim1[0] * (size_t)mpi_size);
+ matrix_out = (int *)HDmalloc(sizeof(int) * (size_t)mdim[0] * (size_t)mdim[1] * (size_t)mpi_size);
+ matrix_out1 = (int *)HDmalloc(sizeof(int) * (size_t)mdim[0] * (size_t)mdim[1] * (size_t)mpi_size);
- HDmemset(vector, 0, sizeof(int) * mdim1[0] * mpi_size);
+ HDmemset(vector, 0, sizeof(int) * (size_t)mdim1[0] * (size_t)mpi_size);
vector[0] = vector[MSPACE1_DIM * mpi_size - 1] = -1;
for (i = 1; i < MSPACE1_DIM * mpi_size - 1; i++)
vector[i] = i;
@@ -273,8 +273,8 @@ coll_write_test(int chunk_factor)
VRFY((ret >= 0), "Fill value creation property list succeeded");
if (chunk_factor != 0) {
- chunk_dims[0] = fsdim[0] / chunk_factor;
- chunk_dims[1] = fsdim[1] / chunk_factor;
+ chunk_dims[0] = fsdim[0] / (hsize_t)chunk_factor;
+ chunk_dims[1] = fsdim[1] / (hsize_t)chunk_factor;
ret = H5Pset_chunk(dcrt_plist, 2, chunk_dims);
VRFY((ret >= 0), "chunk creation property list succeeded");
}
@@ -312,7 +312,7 @@ coll_write_test(int chunk_factor)
*/
start[0] = FHSTART0;
- start[1] = FHSTART1 + mpi_rank * FHSTRIDE1 * FHCOUNT1;
+ start[1] = (hsize_t)(FHSTART1 + mpi_rank * FHSTRIDE1 * FHCOUNT1);
stride[0] = FHSTRIDE0;
stride[1] = FHSTRIDE1;
count[0] = FHCOUNT0;
@@ -333,7 +333,7 @@ coll_write_test(int chunk_factor)
*/
start[0] = SHSTART0;
- start[1] = SHSTART1 + SHCOUNT1 * SHBLOCK1 * mpi_rank;
+ start[1] = (hsize_t)(SHSTART1 + SHCOUNT1 * SHBLOCK1 * mpi_rank);
stride[0] = SHSTRIDE0;
stride[1] = SHSTRIDE1;
count[0] = SHCOUNT0;
@@ -469,7 +469,7 @@ coll_write_test(int chunk_factor)
*
*/
start[0] = RFFHSTART0;
- start[1] = RFFHSTART1 + mpi_rank * RFFHCOUNT1;
+ start[1] = (hsize_t)(RFFHSTART1 + mpi_rank * RFFHCOUNT1);
block[0] = RFFHBLOCK0;
block[1] = RFFHBLOCK1;
stride[0] = RFFHSTRIDE0;
@@ -495,7 +495,7 @@ coll_write_test(int chunk_factor)
*/
start[0] = RFSHSTART0;
- start[1] = RFSHSTART1 + RFSHCOUNT1 * mpi_rank;
+ start[1] = (hsize_t)(RFSHSTART1 + RFSHCOUNT1 * mpi_rank);
block[0] = RFSHBLOCK0;
block[1] = RFSHBLOCK1;
stride[0] = RFSHSTRIDE0;
@@ -533,7 +533,7 @@ coll_write_test(int chunk_factor)
*/
start[0] = RMFHSTART0;
- start[1] = RMFHSTART1 + mpi_rank * RMFHCOUNT1;
+ start[1] = (hsize_t)(RMFHSTART1 + mpi_rank * RMFHCOUNT1);
block[0] = RMFHBLOCK0;
block[1] = RMFHBLOCK1;
stride[0] = RMFHSTRIDE0;
@@ -556,7 +556,7 @@ coll_write_test(int chunk_factor)
*
*/
start[0] = RMSHSTART0;
- start[1] = RMSHSTART1 + mpi_rank * RMSHCOUNT1;
+ start[1] = (hsize_t)(RMSHSTART1 + mpi_rank * RMSHCOUNT1);
block[0] = RMSHBLOCK0;
block[1] = RMSHBLOCK1;
stride[0] = RMSHSTRIDE0;
@@ -571,8 +571,8 @@ coll_write_test(int chunk_factor)
* Initialize data buffer.
*/
- HDmemset(matrix_out, 0, sizeof(int) * MSPACE_DIM1 * MSPACE_DIM2 * mpi_size);
- HDmemset(matrix_out1, 0, sizeof(int) * MSPACE_DIM1 * MSPACE_DIM2 * mpi_size);
+ HDmemset(matrix_out, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+ HDmemset(matrix_out1, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
/*
* Read data back to the buffer matrix_out.
*/
@@ -690,9 +690,9 @@ coll_read_test(int chunk_factor)
/* Initialize the buffer */
mdim[0] = MSPACE_DIM1;
- mdim[1] = MSPACE_DIM2 * mpi_size;
- matrix_out = (int *)HDmalloc(sizeof(int) * MSPACE_DIM1 * MSPACE_DIM2 * mpi_size);
- matrix_out1 = (int *)HDmalloc(sizeof(int) * MSPACE_DIM1 * MSPACE_DIM2 * mpi_size);
+ mdim[1] = (hsize_t)(MSPACE_DIM2 * mpi_size);
+ matrix_out = (int *)HDmalloc(sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+ matrix_out1 = (int *)HDmalloc(sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
/*** For testing collective hyperslab selection read ***/
@@ -727,7 +727,7 @@ coll_read_test(int chunk_factor)
*
*/
start[0] = RFFHSTART0;
- start[1] = RFFHSTART1 + mpi_rank * RFFHCOUNT1;
+ start[1] = (hsize_t)(RFFHSTART1 + mpi_rank * RFFHCOUNT1);
block[0] = RFFHBLOCK0;
block[1] = RFFHBLOCK1;
stride[0] = RFFHSTRIDE0;
@@ -747,7 +747,7 @@ coll_read_test(int chunk_factor)
*
*/
start[0] = RFSHSTART0;
- start[1] = RFSHSTART1 + RFSHCOUNT1 * mpi_rank;
+ start[1] = (hsize_t)(RFSHSTART1 + RFSHCOUNT1 * mpi_rank);
block[0] = RFSHBLOCK0;
block[1] = RFSHBLOCK1;
stride[0] = RFSHSTRIDE0;
@@ -776,7 +776,7 @@ coll_read_test(int chunk_factor)
*/
start[0] = RMFHSTART0;
- start[1] = RMFHSTART1 + mpi_rank * RMFHCOUNT1;
+ start[1] = (hsize_t)(RMFHSTART1 + mpi_rank * RMFHCOUNT1);
block[0] = RMFHBLOCK0;
block[1] = RMFHBLOCK1;
stride[0] = RMFHSTRIDE0;
@@ -798,7 +798,7 @@ coll_read_test(int chunk_factor)
*
*/
start[0] = RMSHSTART0;
- start[1] = RMSHSTART1 + mpi_rank * RMSHCOUNT1;
+ start[1] = (hsize_t)(RMSHSTART1 + mpi_rank * RMSHCOUNT1);
block[0] = RMSHBLOCK0;
block[1] = RMSHBLOCK1;
stride[0] = RMSHSTRIDE0;
@@ -812,8 +812,8 @@ coll_read_test(int chunk_factor)
* Initialize data buffer.
*/
- HDmemset(matrix_out, 0, sizeof(int) * MSPACE_DIM1 * MSPACE_DIM2 * mpi_size);
- HDmemset(matrix_out1, 0, sizeof(int) * MSPACE_DIM1 * MSPACE_DIM2 * mpi_size);
+ HDmemset(matrix_out, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+ HDmemset(matrix_out1, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
/*
* Read data back to the buffer matrix_out.
@@ -973,9 +973,9 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
* pre-C99 compilers again.
*/
- base_count = dims[sel_offset] / (checker_edge_size * 2);
+ base_count = dims[sel_offset] / (hsize_t)(checker_edge_size * 2);
- if ((dims[sel_rank] % (checker_edge_size * 2)) > 0) {
+ if ((dims[sel_rank] % (hsize_t)(checker_edge_size * 2)) > 0) {
base_count++;
}