summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
Diffstat (limited to 'testpar')
-rw-r--r--testpar/CMakeLists.txt1
-rw-r--r--testpar/Makefile.am2
-rw-r--r--testpar/t_cache.c28
-rw-r--r--testpar/t_cache_image.c2
-rw-r--r--testpar/t_coll_md_read.c199
-rw-r--r--testpar/t_dset.c1050
-rw-r--r--testpar/t_filters_parallel.c359
-rw-r--r--testpar/t_pread.c7
-rw-r--r--testpar/testphdf5.c3
-rw-r--r--testpar/testphdf5.h1
10 files changed, 968 insertions, 684 deletions
diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt
index c08a69e..0b3cbe3 100644
--- a/testpar/CMakeLists.txt
+++ b/testpar/CMakeLists.txt
@@ -17,6 +17,7 @@ set (testphdf5_SOURCES
${HDF5_TEST_PAR_SOURCE_DIR}/t_chunk_alloc.c
${HDF5_TEST_PAR_SOURCE_DIR}/t_filter_read.c
${HDF5_TEST_PAR_SOURCE_DIR}/t_prop.c
+ ${HDF5_TEST_PAR_SOURCE_DIR}/t_coll_md_read.c
)
#-- Adding test for testhdf5
diff --git a/testpar/Makefile.am b/testpar/Makefile.am
index 5c7cb26..7262ca6 100644
--- a/testpar/Makefile.am
+++ b/testpar/Makefile.am
@@ -29,7 +29,7 @@ check_PROGRAMS = $(TEST_PROG_PARA)
testphdf5_SOURCES=testphdf5.c t_dset.c t_file.c t_file_image.c t_mdset.c \
t_ph5basic.c t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c \
- t_prop.c
+ t_prop.c t_coll_md_read.c
# The tests all depend on the hdf5 library and the test library
LDADD = $(LIBH5TEST) $(LIBHDF5)
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index caa578e..da83884 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -1217,20 +1217,20 @@ setup_derived_types(void)
struct mssg_t sample; /* used to compute displacements */
/* setup the displacements array */
- if ( ( MPI_SUCCESS != MPI_Address(&sample.req, &displs[0]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.src, &displs[1]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.dest, &displs[2]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.mssg_num, &displs[3]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.base_addr, &displs[4]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.len, &displs[5]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.ver, &displs[6]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.count, &displs[7]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.magic, &displs[8]) ) ) {
+ if ( ( MPI_SUCCESS != MPI_Get_address(&sample.req, &displs[0]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.src, &displs[1]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.dest, &displs[2]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.mssg_num, &displs[3]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.base_addr, &displs[4]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.len, &displs[5]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.ver, &displs[6]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.count, &displs[7]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.magic, &displs[8]) ) ) {
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: MPI_Address() call failed.\n",
+ HDfprintf(stdout, "%d:%s: MPI_Get_address() call failed.\n",
world_mpi_rank, FUNC);
}
@@ -1245,14 +1245,14 @@ setup_derived_types(void)
if ( success ) {
- result = MPI_Type_struct(9, block_len, displs, mpi_types, &mpi_mssg_t);
+ result = MPI_Type_create_struct(9, block_len, displs, mpi_types, &mpi_mssg_t);
if ( result != MPI_SUCCESS ) {
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: MPI_Type_struct() call failed.\n",
+ HDfprintf(stdout, "%d:%s: MPI_Type_create_struct() call failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -4091,7 +4091,7 @@ setup_cache_for_test(hid_t * fid_ptr,
world_mpi_rank, FUNC);
}
} else {
- file_ptr = (H5F_t *)H5I_object_verify(fid, H5I_FILE);
+ file_ptr = (H5F_t *)H5VL_object_verify(fid, H5I_FILE);
}
if ( file_ptr == NULL ) {
@@ -7134,7 +7134,7 @@ trace_file_check(int metadata_write_strategy)
if ( HDfgets(buffer, 255, trace_file_ptr) != NULL ) {
- actual_line_len = strlen(buffer);
+ actual_line_len = HDstrlen(buffer);
} else {
diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c
index 5b512d6..e158d69 100644
--- a/testpar/t_cache_image.c
+++ b/testpar/t_cache_image.c
@@ -1187,7 +1187,7 @@ open_hdf5_file(const hbool_t create_file,
} else {
- file_ptr = (struct H5F_t *)H5I_object_verify(file_id, H5I_FILE);
+ file_ptr = (struct H5F_t *)H5VL_object_verify(file_id, H5I_FILE);
if ( file_ptr == NULL ) {
diff --git a/testpar/t_coll_md_read.c b/testpar/t_coll_md_read.c
new file mode 100644
index 0000000..f945d2b
--- /dev/null
+++ b/testpar/t_coll_md_read.c
@@ -0,0 +1,199 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * A test suite to test HDF5's collective metadata read capabilities, as enabled
+ * by making a call to H5Pset_all_coll_metadata_ops().
+ */
+
+#include "testphdf5.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/*
+ * Define the non-participating process as the "last"
+ * rank to avoid any weirdness potentially caused by
+ * an if (mpi_rank == 0) check.
+ */
+#define PARTIAL_NO_SELECTION_NO_SEL_PROCESS (mpi_rank == mpi_size - 1)
+#define PARTIAL_NO_SELECTION_DATASET_NAME "partial_no_selection_dset"
+#define PARTIAL_NO_SELECTION_DATASET_NDIMS 2
+#define PARTIAL_NO_SELECTION_Y_DIM_SCALE 5
+#define PARTIAL_NO_SELECTION_X_DIM_SCALE 5
+
+/*
+ * A test for issue HDFFV-10501. A parallel hang was reported which occurred
+ * in linked-chunk I/O when collective metadata reads are enabled and some ranks
+ * do not have any selection in a dataset's dataspace, while others do. The ranks
+ * which have no selection during the read/write operation called H5D__chunk_addrmap()
+ * to retrieve the lowest chunk address, since we require that the read/write be done
+ * in strictly non-decreasing order of chunk address. For version 1 and 2 B-trees,
+ * this caused the non-participating ranks to issue a collective MPI_Bcast() call
+ * which the other ranks did not issue, thus causing a hang.
+ *
+ * However, since these ranks are not actually reading/writing anything, this call
+ * can simply be removed and the address used for the read/write can be set to an
+ * arbitrary number (0 was chosen).
+ */
+void test_partial_no_selection_coll_md_read(void)
+{
+ const char *filename;
+ hsize_t *dataset_dims = NULL;
+ hsize_t max_dataset_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hsize_t sel_dims[1];
+ hsize_t chunk_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS] = { PARTIAL_NO_SELECTION_Y_DIM_SCALE, PARTIAL_NO_SELECTION_X_DIM_SCALE };
+ hsize_t start[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hsize_t stride[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hsize_t count[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hsize_t block[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hid_t file_id = -1;
+ hid_t fapl_id = -1;
+ hid_t dset_id = -1;
+ hid_t dcpl_id = -1;
+ hid_t dxpl_id = -1;
+ hid_t fspace_id = -1;
+ hid_t mspace_id = -1;
+ int mpi_rank, mpi_size;
+ void *data = NULL;
+ void *read_buf = NULL;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ filename = GetTestParameters();
+
+ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Even though the testphdf5 framework currently sets collective metadata reads
+ * on the FAPL, we call it here just to be sure this is futureproof, since
+ * demonstrating this issue relies upon it.
+ */
+ VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ dataset_dims = malloc(PARTIAL_NO_SELECTION_DATASET_NDIMS * sizeof(*dataset_dims));
+ VRFY((dataset_dims != NULL), "malloc succeeded");
+
+ dataset_dims[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE * mpi_size;
+ dataset_dims[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE * mpi_size;
+ max_dataset_dims[0] = H5S_UNLIMITED;
+ max_dataset_dims[1] = H5S_UNLIMITED;
+
+ fspace_id = H5Screate_simple(PARTIAL_NO_SELECTION_DATASET_NDIMS, dataset_dims, max_dataset_dims);
+ VRFY((fspace_id >= 0), "H5Screate_simple succeeded");
+
+ /*
+ * Set up chunking on the dataset in order to reproduce the problem.
+ */
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
+
+ VRFY((H5Pset_chunk(dcpl_id, PARTIAL_NO_SELECTION_DATASET_NDIMS, chunk_dims) >= 0), "H5Pset_chunk succeeded");
+
+ dset_id = H5Dcreate2(file_id, PARTIAL_NO_SELECTION_DATASET_NAME, H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
+
+ /*
+ * Setup hyperslab selection to split the dataset among the ranks.
+ *
+ * The ranks will write rows across the dataset.
+ */
+ start[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE * mpi_rank;
+ start[1] = 0;
+ stride[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE;
+ stride[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE;
+ count[0] = 1;
+ count[1] = mpi_size;
+ block[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE;
+ block[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE;
+
+ VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), "H5Sselect_hyperslab succeeded");
+
+ sel_dims[0] = count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE);
+
+ mspace_id = H5Screate_simple(1, sel_dims, NULL);
+ VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
+
+ data = calloc(1, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int));
+ VRFY((data != NULL), "calloc succeeded");
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id >= 0), "H5Pcreate succeeded");
+
+ /*
+ * Enable collective access for the data transfer.
+ */
+ VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded");
+
+ VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded");
+
+ /*
+ * Ensure that linked-chunk I/O is performed since this is
+ * the particular code path where the issue lies and we don't
+ * want the library doing multi-chunk I/O behind our backs.
+ */
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ read_buf = malloc(count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int));
+ VRFY((read_buf != NULL), "malloc succeeded");
+
+ /*
+ * Make sure to call H5Sselect_none() on the non-participating process.
+ */
+ if (PARTIAL_NO_SELECTION_NO_SEL_PROCESS) {
+ VRFY((H5Sselect_none(fspace_id) >= 0), "H5Sselect_none succeeded");
+ VRFY((H5Sselect_none(mspace_id) >= 0), "H5Sselect_none succeeded");
+ }
+
+ /*
+ * Finally have each rank read their section of data back from the dataset.
+ */
+ VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0), "H5Dread succeeded");
+
+ /*
+ * Check data integrity just to be sure.
+ */
+ if (!PARTIAL_NO_SELECTION_NO_SEL_PROCESS) {
+ VRFY((!memcmp(data, read_buf, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int))), "memcmp succeeded");
+ }
+
+ if (dataset_dims) {
+ free(dataset_dims);
+ dataset_dims = NULL;
+ }
+
+ if (data) {
+ free(data);
+ data = NULL;
+ }
+
+ if (read_buf) {
+ free(read_buf);
+ read_buf = NULL;
+ }
+
+ VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded");
+ VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+}
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index b315772..281d027 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -36,83 +36,91 @@
* Setup the dimensions of the hyperslab.
* Two modes--by rows or by columns.
* Assume dimension rank is 2.
- * BYROW divide into slabs of rows
- * BYCOL divide into blocks of columns
- * ZROW same as BYROW except process 0 gets 0 rows
- * ZCOL same as BYCOL except process 0 gets 0 columns
+ * BYROW divide into slabs of rows
+ * BYCOL divide into blocks of columns
+ * ZROW same as BYROW except process 0 gets 0 rows
+ * ZCOL same as BYCOL except process 0 gets 0 columns
*/
static void
slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[],
- hsize_t stride[], hsize_t block[], int mode)
+ hsize_t stride[], hsize_t block[], int mode)
{
- switch (mode){
+ switch (mode) {
case BYROW:
- /* Each process takes a slabs of rows. */
- block[0] = dim0/mpi_size;
- block[1] = dim1;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = mpi_rank*block[0];
- start[1] = 0;
-if(VERBOSE_MED) printf("slab_set BYROW\n");
- break;
+ /* Each process takes a slabs of rows. */
+ block[0] = dim0 / mpi_size;
+ block[1] = dim1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = mpi_rank * block[0];
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set BYROW\n");
+ break;
case BYCOL:
- /* Each process takes a block of columns. */
- block[0] = dim0;
- block[1] = dim1/mpi_size;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = mpi_rank*block[1];
-if(VERBOSE_MED) printf("slab_set BYCOL\n");
- break;
+ /* Each process takes a block of columns. */
+ block[0] = dim0;
+ block[1] = dim1 / mpi_size;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = mpi_rank * block[1];
+ if (VERBOSE_MED)
+ HDprintf("slab_set BYCOL\n");
+ break;
case ZROW:
- /* Similar to BYROW except process 0 gets 0 row */
- block[0] = (mpi_rank ? dim0/mpi_size : 0);
- block[1] = dim1;
- stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (mpi_rank? mpi_rank*block[0] : 0);
- start[1] = 0;
-if(VERBOSE_MED) printf("slab_set ZROW\n");
- break;
+ /* Similar to BYROW except process 0 gets 0 row */
+ block[0] = (mpi_rank ? dim0 / mpi_size : 0);
+ block[1] = dim1;
+ stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (mpi_rank ? mpi_rank * block[0] : 0);
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set ZROW\n");
+ break;
case ZCOL:
- /* Similar to BYCOL except process 0 gets 0 column */
- block[0] = dim0;
- block[1] = (mpi_rank ? dim1/mpi_size : 0);
- stride[0] = block[0];
- stride[1] = (mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = (mpi_rank? mpi_rank*block[1] : 0);
-if(VERBOSE_MED) printf("slab_set ZCOL\n");
- break;
+ /* Similar to BYCOL except process 0 gets 0 column */
+ block[0] = dim0;
+ block[1] = (mpi_rank ? dim1 / mpi_size : 0);
+ stride[0] = block[0];
+ stride[1] = (mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (mpi_rank ? mpi_rank * block[1] : 0);
+ if (VERBOSE_MED)
+ HDprintf("slab_set ZCOL\n");
+ break;
default:
- /* Unknown mode. Set it to cover the whole dataset. */
- printf("unknown slab_set mode (%d)\n", mode);
- block[0] = dim0;
- block[1] = dim1;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = 0;
-if(VERBOSE_MED) printf("slab_set wholeset\n");
- break;
+ /* Unknown mode. Set it to cover the whole dataset. */
+ HDprintf("unknown slab_set mode (%d)\n", mode);
+ block[0] = dim0;
+ block[1] = dim1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set wholeset\n");
+ break;
}
-if(VERBOSE_MED){
- printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
- (unsigned long)(block[0]*block[1]*count[0]*count[1]));
+ if (VERBOSE_MED) {
+ HDprintf(
+ "start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
+ (unsigned long) start[0], (unsigned long) start[1],
+ (unsigned long) count[0], (unsigned long) count[1],
+ (unsigned long) stride[0], (unsigned long) stride[1],
+ (unsigned long) block[0], (unsigned long) block[1],
+ (unsigned long) (block[0] * block[1] * count[0] * count[1]));
}
}
@@ -123,7 +131,7 @@ void point_set(hsize_t start[],
hsize_t count[],
hsize_t stride[],
hsize_t block[],
- size_t num_points,
+ size_t num_points,
hsize_t coords[],
int order)
{
@@ -153,13 +161,13 @@ void point_set(hsize_t start[],
}
if(VERBOSE_MED) {
- printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n",
+ HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n",
(unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
(unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
(unsigned long)(block[0] * block[1] * count[0] * count[1]));
k = 0;
for(i = 0; i < num_points ; i++) {
- printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
+ HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
k += 2;
}
}
@@ -177,10 +185,10 @@ dataset_fill(hsize_t start[], hsize_t block[], DATATYPE * dataset)
/* put some trivial data in the data_array */
for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- *dataptr = (DATATYPE)((i+start[0])*100 + (j+start[1]+1));
- dataptr++;
- }
+ for (j=0; j < block[1]; j++){
+ *dataptr = (DATATYPE)((i+start[0])*100 + (j+start[1]+1));
+ dataptr++;
+ }
}
}
@@ -195,19 +203,19 @@ dataset_print(hsize_t start[], hsize_t block[], DATATYPE * dataset)
hsize_t i, j;
/* print the column heading */
- printf("%-8s", "Cols:");
+ HDprintf("%-8s", "Cols:");
for (j=0; j < block[1]; j++){
- printf("%3lu ", (unsigned long)(start[1]+j));
+ HDprintf("%3lu ", (unsigned long)(start[1]+j));
}
- printf("\n");
+ HDprintf("\n");
/* print the slab data */
for (i=0; i < block[0]; i++){
- printf("Row %2lu: ", (unsigned long)(i+start[0]));
- for (j=0; j < block[1]; j++){
- printf("%03d ", *dataptr++);
- }
- printf("\n");
+ HDprintf("Row %2lu: ", (unsigned long)(i+start[0]));
+ for (j=0; j < block[1]; j++){
+ HDprintf("%03d ", *dataptr++);
+ }
+ HDprintf("\n");
}
}
@@ -223,35 +231,35 @@ dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[]
/* print it if VERBOSE_MED */
if(VERBOSE_MED) {
- printf("dataset_vrfy dumping:::\n");
- printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
- printf("original values:\n");
- dataset_print(start, block, original);
- printf("compared values:\n");
- dataset_print(start, block, dataset);
+ HDprintf("dataset_vrfy dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("original values:\n");
+ dataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ dataset_print(start, block, dataset);
}
vrfyerrs = 0;
for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- if(*dataset != *original){
- if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
- printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
- (unsigned long)i, (unsigned long)j,
- (unsigned long)(i+start[0]), (unsigned long)(j+start[1]),
- *(original), *(dataset));
- }
- dataset++;
- original++;
- }
- }
+ for (j=0; j < block[1]; j++){
+ if(*dataset != *original){
+ if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j,
+ (unsigned long)(i+start[0]), (unsigned long)(j+start[1]),
+ *(original), *(dataset));
+ }
+ dataset++;
+ original++;
+ }
+ }
}
if(vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
+ HDprintf("[more errors ...]\n");
if(vrfyerrs)
- printf("%d errors found in dataset_vrfy\n", vrfyerrs);
+ HDprintf("%d errors found in dataset_vrfy\n", vrfyerrs);
return(vrfyerrs);
}
@@ -272,20 +280,20 @@ void
dataset_writeInd(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -293,7 +301,7 @@ dataset_writeInd(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Independent write test on file %s\n", filename);
+ HDprintf("Independent write test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
@@ -332,12 +340,12 @@ dataset_writeInd(void)
/* create a dataset collectively */
dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
/* create another dataset collectively */
dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
@@ -366,28 +374,28 @@ dataset_writeInd(void)
/* write data independently */
ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
/* write data independently */
ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
/* setup dimensions again to write with zero rows for process 0 */
if(VERBOSE_MED)
- printf("writeInd by some with zero row\n");
+ HDprintf("writeInd by some with zero row\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("writeInd by some with zero row");
if((mpi_rank/2)*2 != mpi_rank){
ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
}
#ifdef BARRIER_CHECKS
@@ -418,19 +426,19 @@ void
dataset_readInd(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hid_t acc_tpl; /* File access templates */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -438,7 +446,7 @@ dataset_readInd(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Independent read test on file %s\n", filename);
+ HDprintf("Independent read test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
@@ -489,7 +497,7 @@ dataset_readInd(void)
/* read data independently */
ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
@@ -498,7 +506,7 @@ dataset_readInd(void)
/* read data independently */
ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
@@ -540,28 +548,28 @@ void
dataset_writeAll(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */
hid_t dataset5, dataset6, dataset7; /* Dataset ID */
- hid_t datatype; /* Datatype ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
+ hid_t datatype; /* Datatype ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
size_t num_points; /* for point selection */
hsize_t *coords = NULL; /* for point selection */
hsize_t current_dims; /* for point selection */
int i;
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -569,7 +577,7 @@ dataset_writeAll(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Collective write test on file %s\n", filename);
+ HDprintf("Collective write test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
@@ -669,8 +677,8 @@ dataset_writeAll(void)
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
@@ -687,23 +695,23 @@ dataset_writeAll(void)
/* write data collectively */
MESG("writeAll by Row");
ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
/* setup dimensions again to writeAll with zero rows for process 0 */
if(VERBOSE_MED)
- printf("writeAll by some with zero row\n");
+ HDprintf("writeAll by some with zero row\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("writeAll by some with zero row");
ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
/* release all temporary handles. */
@@ -720,8 +728,8 @@ dataset_writeAll(void)
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a file dataspace independently */
@@ -738,8 +746,8 @@ dataset_writeAll(void)
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
@@ -755,23 +763,23 @@ dataset_writeAll(void)
/* write data independently */
ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
/* setup dimensions again to writeAll with zero columns for process 0 */
if(VERBOSE_MED)
- printf("writeAll by some with zero col\n");
+ HDprintf("writeAll by some with zero col\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("writeAll by some with zero col");
ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 by ZCOL succeeded");
/* release all temporary handles. */
@@ -789,8 +797,8 @@ dataset_writeAll(void)
file_dataspace = H5Dget_space (dataset3);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
if(MAINPROCESS) {
- ret = H5Sselect_none(file_dataspace);
- VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded");
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded");
} /* end if */
else {
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
@@ -801,16 +809,16 @@ dataset_writeAll(void)
mem_dataspace = H5Screate_simple (RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
if(MAINPROCESS) {
- ret = H5Sselect_none(mem_dataspace);
- VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded");
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded");
} /* end if */
/* fill the local slab with some trivial data */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
} /* end if */
/* set up the collective transfer properties list */
@@ -827,13 +835,13 @@ dataset_writeAll(void)
/* write data collectively */
MESG("writeAll with none");
ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
/* write data collectively (with datatype conversion) */
MESG("writeAll with none");
ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
/* release all temporary handles. */
@@ -850,8 +858,8 @@ dataset_writeAll(void)
file_dataspace = H5Dget_space (dataset4);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
if(MAINPROCESS) {
- ret = H5Sselect_none(file_dataspace);
- VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded");
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded");
} /* end if */
else {
ret = H5Sselect_all(file_dataspace);
@@ -862,8 +870,8 @@ dataset_writeAll(void)
mem_dataspace = H5Screate(H5S_SCALAR);
VRFY((mem_dataspace >= 0), "");
if(MAINPROCESS) {
- ret = H5Sselect_none(mem_dataspace);
- VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded");
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded");
} /* end if */
else {
ret = H5Sselect_all(mem_dataspace);
@@ -874,8 +882,8 @@ dataset_writeAll(void)
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
} /* end if */
/* set up the collective transfer properties list */
@@ -891,13 +899,13 @@ dataset_writeAll(void)
/* write data collectively */
MESG("writeAll with scalar dataspace");
ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
/* write data collectively (with datatype conversion) */
MESG("writeAll with scalar dataspace");
ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
/* release all temporary handles. */
@@ -907,7 +915,7 @@ dataset_writeAll(void)
if(data_array1) free(data_array1);
- data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
block[0] = 1;
@@ -922,15 +930,15 @@ dataset_writeAll(void)
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* Dataset5: point selection in File - Hyperslab selection in Memory*/
/* create a file dataspace independently */
point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
file_dataspace = H5Dget_space (dataset5);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
@@ -1005,7 +1013,7 @@ dataset_writeAll(void)
start[1] = 0;
point_set (start, count, stride, block, num_points, coords, IN_ORDER);
file_dataspace = H5Dget_space (dataset7);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
@@ -1075,25 +1083,25 @@ void
dataset_readAll(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
size_t num_points; /* for point selection */
hsize_t *coords = NULL; /* for point selection */
hsize_t current_dims; /* for point selection */
int i,j,k;
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -1101,7 +1109,7 @@ dataset_readAll(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Collective read test on file %s\n", filename);
+ HDprintf("Collective read test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
@@ -1174,8 +1182,8 @@ dataset_readAll(void)
dataset_fill(start, block, data_origin1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
}
/* set up the collective transfer properties list */
@@ -1191,7 +1199,7 @@ dataset_readAll(void)
/* read data collectively */
ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset1 succeeded");
/* verify the read data with original expected data */
@@ -1200,18 +1208,18 @@ dataset_readAll(void)
/* setup dimensions again to readAll with zero columns for process 0 */
if(VERBOSE_MED)
- printf("readAll by some with zero col\n");
+ HDprintf("readAll by some with zero col\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("readAll by some with zero col");
ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset1 by ZCOL succeeded");
/* verify the read data with original expected data */
@@ -1242,8 +1250,8 @@ dataset_readAll(void)
dataset_fill(start, block, data_origin1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
}
/* set up the collective transfer properties list */
@@ -1259,7 +1267,7 @@ dataset_readAll(void)
/* read data collectively */
ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset2 succeeded");
/* verify the read data with original expected data */
@@ -1268,18 +1276,18 @@ dataset_readAll(void)
/* setup dimensions again to readAll with zero rows for process 0 */
if(VERBOSE_MED)
- printf("readAll by some with zero row\n");
+ HDprintf("readAll by some with zero row\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("readAll by some with zero row");
ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset1 by ZROW succeeded");
/* verify the read data with original expected data */
@@ -1293,9 +1301,9 @@ dataset_readAll(void)
if(data_array1) free(data_array1);
if(data_origin1) free(data_origin1);
- data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
- data_origin1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
block[0] = 1;
@@ -1310,8 +1318,8 @@ dataset_readAll(void)
dataset_fill(start, block, data_origin1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
}
/* Dataset5: point selection in memory - Hyperslab selection in file*/
@@ -1344,7 +1352,7 @@ dataset_readAll(void)
xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset5 succeeded");
-
+
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
if(ret) nerrors++;
@@ -1355,7 +1363,7 @@ dataset_readAll(void)
if(data_array1) free(data_array1);
- data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
/* Dataset6: point selection in File - Point selection in Memory*/
@@ -1400,7 +1408,7 @@ dataset_readAll(void)
H5Pclose(xfer_plist);
if(data_array1) free(data_array1);
- data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
/* Dataset7: point selection in memory - All selection in file*/
@@ -1488,25 +1496,25 @@ void
extend_writeInd(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t dims[RANK]; /* dataset dim sizes */
hsize_t max_dims[RANK] =
- {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- hsize_t chunk_dims[RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
+ {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK]; /* for hyperslab setting */
- hsize_t stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -1514,7 +1522,7 @@ extend_writeInd(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Extend independent write test on file %s\n", filename);
+ HDprintf("Extend independent write test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
@@ -1567,7 +1575,7 @@ extend_writeInd(void)
/* set up dataset storage chunk sizes and creation property list */
if(VERBOSE_MED)
- printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
@@ -1603,8 +1611,8 @@ extend_writeInd(void)
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
@@ -1625,7 +1633,7 @@ extend_writeInd(void)
/* write data independently */
ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
@@ -1643,8 +1651,8 @@ extend_writeInd(void)
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
@@ -1664,7 +1672,7 @@ extend_writeInd(void)
/* write data independently. Should fail. */
ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret < 0), "H5Dwrite failed as expected");
/* restore auto error reporting */
@@ -1685,7 +1693,7 @@ extend_writeInd(void)
/* write data independently */
ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
@@ -1719,25 +1727,25 @@ extend_writeInd2(void)
{
const char *filename;
hid_t fid; /* HDF5 file ID */
- hid_t fapl; /* File access templates */
- hid_t fs; /* File dataspace ID */
- hid_t ms; /* Memory dataspace ID */
- hid_t dataset; /* Dataset ID */
- hsize_t orig_size=10; /* Original dataset dim size */
- hsize_t new_size=20; /* Extended dataset dim size */
+ hid_t fapl; /* File access templates */
+ hid_t fs; /* File dataspace ID */
+ hid_t ms; /* Memory dataspace ID */
+ hid_t dataset; /* Dataset ID */
+ hsize_t orig_size=10; /* Original dataset dim size */
+ hsize_t new_size=20; /* Extended dataset dim size */
hsize_t one=1;
- hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */
- hsize_t chunk_size = 16384; /* chunk size */
- hid_t dcpl; /* dataset create prop. list */
+ hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */
+ hsize_t chunk_size = 16384; /* chunk size */
+ hid_t dcpl; /* dataset create prop. list */
int written[10], /* Data to write */
retrieved[10]; /* Data read in */
int mpi_size, mpi_rank; /* MPI settings */
int i; /* Local index variable */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Extend independent write test #2 on file %s\n", filename);
+ HDprintf("Extend independent write test #2 on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
@@ -1794,10 +1802,10 @@ extend_writeInd2(void)
written[i] = i;
MESG("data array initialized");
if(VERBOSE_MED) {
- MESG("writing at offset zero: ");
+ MESG("writing at offset zero: ");
for(i = 0; i < (int)orig_size; i++)
- printf("%s%d", i?", ":"", written[i]);
- printf("\n");
+ HDprintf("%s%d", i?", ":"", written[i]);
+ HDprintf("\n");
}
ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written);
VRFY((ret >= 0), "H5Dwrite succeeded");
@@ -1809,15 +1817,15 @@ extend_writeInd2(void)
VRFY((ret >= 0), "H5Dread succeeded");
for (i=0; i<(int)orig_size; i++)
if(written[i]!=retrieved[i]) {
- printf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
+ HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
i,written[i], i,retrieved[i]);
nerrors++;
}
if(VERBOSE_MED){
- MESG("read at offset zero: ");
+ MESG("read at offset zero: ");
for (i=0; i<(int)orig_size; i++)
- printf("%s%d", i?", ":"", retrieved[i]);
- printf("\n");
+ HDprintf("%s%d", i?", ":"", retrieved[i]);
+ HDprintf("\n");
}
/* -------------------------
@@ -1837,10 +1845,10 @@ extend_writeInd2(void)
written[i] = orig_size + i;
MESG("data array re-initialized");
if(VERBOSE_MED) {
- MESG("writing at offset 10: ");
+ MESG("writing at offset 10: ");
for (i=0; i<(int)orig_size; i++)
- printf("%s%d", i?", ":"", written[i]);
- printf("\n");
+ HDprintf("%s%d", i?", ":"", written[i]);
+ HDprintf("\n");
}
ret = H5Sselect_hyperslab(fs, H5S_SELECT_SET, &orig_size, NULL, &one, &orig_size);
VRFY((ret >= 0), "H5Sselect_hyperslab succeeded");
@@ -1854,15 +1862,15 @@ extend_writeInd2(void)
VRFY((ret >= 0), "H5Dread succeeded");
for (i=0; i<(int)orig_size; i++)
if(written[i]!=retrieved[i]) {
- printf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
+ HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
i,written[i], i,retrieved[i]);
nerrors++;
}
if(VERBOSE_MED){
- MESG("read at offset 10: ");
+ MESG("read at offset 10: ");
for (i=0; i<(int)orig_size; i++)
- printf("%s%d", i?", ":"", retrieved[i]);
- printf("\n");
+ HDprintf("%s%d", i?", ":"", retrieved[i]);
+ HDprintf("\n");
}
@@ -1879,22 +1887,22 @@ extend_writeInd2(void)
void
extend_readInd(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_array2 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_array2 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -1902,7 +1910,7 @@ extend_readInd(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Extend independent read test on file %s\n", filename);
+ HDprintf("Extend independent read test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
@@ -1974,13 +1982,13 @@ extend_readInd(void)
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* read data independently */
ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
@@ -2009,13 +2017,13 @@ extend_readInd(void)
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* read data independently */
ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
@@ -2058,26 +2066,26 @@ void
extend_writeAll(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t dims[RANK]; /* dataset dim sizes */
hsize_t max_dims[RANK] =
- {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- hsize_t chunk_dims[RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
+ {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK]; /* for hyperslab setting */
- hsize_t stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -2085,7 +2093,7 @@ extend_writeAll(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Extend independent write test on file %s\n", filename);
+ HDprintf("Extend independent write test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
@@ -2138,7 +2146,7 @@ extend_writeAll(void)
/* set up dataset storage chunk sizes and creation property list */
if(VERBOSE_MED)
- printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
@@ -2174,8 +2182,8 @@ extend_writeAll(void)
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
@@ -2207,7 +2215,7 @@ extend_writeAll(void)
/* write data collectively */
ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
@@ -2226,8 +2234,8 @@ extend_writeAll(void)
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
@@ -2258,7 +2266,7 @@ extend_writeAll(void)
/* write data independently. Should fail. */
ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret < 0), "H5Dwrite failed as expected");
/* restore auto error reporting */
@@ -2279,7 +2287,7 @@ extend_writeAll(void)
/* write data independently */
ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
@@ -2308,23 +2316,23 @@ extend_writeAll(void)
void
extend_readAll(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_array2 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_array2 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -2332,7 +2340,7 @@ extend_readAll(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Extend independent read test on file %s\n", filename);
+ HDprintf("Extend independent read test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
@@ -2404,8 +2412,8 @@ extend_readAll(void)
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
@@ -2421,7 +2429,7 @@ extend_readAll(void)
/* read data collectively */
ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
@@ -2451,8 +2459,8 @@ extend_readAll(void)
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
@@ -2468,7 +2476,7 @@ extend_readAll(void)
/* read data collectively */
ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
@@ -2505,27 +2513,27 @@ void
compress_readAll(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
+ hid_t acc_tpl; /* File access templates */
hid_t dcpl; /* Dataset creation property list */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t dataspace; /* Dataspace ID */
- hid_t dataset; /* Dataset ID */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t dataspace; /* Dataspace ID */
+ hid_t dataset; /* Dataset ID */
int rank=1; /* Dataspace rank */
hsize_t dim=dim0; /* Dataspace dimensions */
unsigned u; /* Local index variable */
unsigned chunk_opts; /* Chunk options */
unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
- DATATYPE *data_read = NULL; /* data buffer */
+ DATATYPE *data_read = NULL; /* data buffer */
DATATYPE *data_orig = NULL; /* expected data buffer */
const char *filename;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
int mpi_size, mpi_rank;
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Collective chunked dataset read test on file %s\n", filename);
+ HDprintf("Collective chunked dataset read test on file %s\n", filename);
/* Retrieve MPI parameters */
MPI_Comm_size(comm,&mpi_size);
@@ -2646,7 +2654,7 @@ compress_readAll(void)
/* Verify data read */
for(u=0; u<dim; u++)
if(data_orig[u]!=data_read[u]) {
- printf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__,
+ HDprintf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__,
(unsigned)u,data_orig[u],(unsigned)u,data_read[u]);
nerrors++;
}
@@ -2687,26 +2695,26 @@ void
none_selection_chunk(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_origin = NULL; /* data buffer */
- DATATYPE *data_array = NULL; /* data buffer */
- hsize_t chunk_dims[RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
-
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK]; /* for hyperslab setting */
- hsize_t stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
- hsize_t mstart[RANK]; /* for data buffer in memory */
-
- herr_t ret; /* Generic return value */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_origin = NULL; /* data buffer */
+ DATATYPE *data_array = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t mstart[RANK]; /* for data buffer in memory */
+
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -2714,7 +2722,7 @@ none_selection_chunk(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Extend independent write test on file %s\n", filename);
+ HDprintf("Extend independent write test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
@@ -2745,7 +2753,7 @@ none_selection_chunk(void)
/* set up dataset storage chunk sizes and creation property list */
if(VERBOSE_MED)
- printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
@@ -2789,8 +2797,8 @@ none_selection_chunk(void)
dataset_fill(mstart, block, data_origin);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(mstart, block, data_origin);
+ MESG("data_array created");
+ dataset_print(mstart, block, data_origin);
}
}
@@ -2824,12 +2832,12 @@ none_selection_chunk(void)
/* write data collectively */
ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_origin);
+ xfer_plist, data_origin);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* read data independently */
ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array);
+ H5P_DEFAULT, data_array);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
@@ -2846,12 +2854,12 @@ none_selection_chunk(void)
/* write data collectively */
ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_origin);
+ xfer_plist, data_origin);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* read data independently */
ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array);
+ H5P_DEFAULT, data_array);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
@@ -2883,20 +2891,20 @@ none_selection_chunk(void)
if(data_array) HDfree(data_array);
}
-
+
/* Function: test_actual_io_mode
*
- * Purpose: tests one specific case of collective I/O and checks that the
+ * Purpose: tests one specific case of collective I/O and checks that the
* actual_chunk_opt_mode property and the actual_io_mode
* properties in the DXPL have the correct values.
*
* Input: selection_mode: changes the way processes select data from the space, as well
* as some dxpl flags to get collective I/O to break in different ways.
- *
+ *
* The relevant I/O function and expected response for each mode:
* TEST_ACTUAL_IO_MULTI_CHUNK_IND:
* H5D_mpi_chunk_collective_io, each process reports independent I/O
- *
+ *
* TEST_ACTUAL_IO_MULTI_CHUNK_COL:
* H5D_mpi_chunk_collective_io, each process reports collective I/O
*
@@ -2908,7 +2916,7 @@ none_selection_chunk(void)
* collective, the rest report independent I/O
*
* TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND:
- * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND.
+ * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND.
* Set directly go to multi-chunk-io without num threshold calc.
* TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL:
* Same test TEST_ACTUAL_IO_MULTI_CHUNK_COL.
@@ -2933,20 +2941,20 @@ none_selection_chunk(void)
*
* Note: DIRECT_MULTI_CHUNK_MIX and DIRECT_MULTI_CHUNK_MIX_DISAGREE
* is not needed as they are covered by DIRECT_CHUNK_MIX and
- * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing
+ * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing
* path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO insted of num-threshold.
*
* Modification:
- * - Refctore to remove multi-chunk-without-opimization test and update for
- * testing direct to multi-chunk-io
+ * - Refctore to remove multi-chunk-without-opimization test and update for
+ * testing direct to multi-chunk-io
* Programmer: Jonathan Kim
* Date: 2012-10-10
*
- *
+ *
* Programmer: Jacob Gruber
* Date: 2011-04-06
*/
-static void
+static void
test_actual_io_mode(int selection_mode) {
H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = -1;
H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = -1;
@@ -2960,7 +2968,7 @@ test_actual_io_mode(int selection_mode) {
hbool_t multi_chunk_io;
hbool_t is_chunked;
hbool_t is_collective;
- int mpi_size = -1;
+ int mpi_size = -1;
int mpi_rank = -1;
int length;
int * buffer;
@@ -2985,12 +2993,12 @@ test_actual_io_mode(int selection_mode) {
hsize_t block[RANK];
char message[256];
herr_t ret;
-
+
/* Set up some flags to make some future if statements slightly more readable */
direct_multi_chunk_io = (
selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND ||
selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL );
-
+
/* Note: RESET performs the same tests as MULTI_CHUNK_MIX_DISAGREE and then
* tests independent I/O
*/
@@ -3000,11 +3008,11 @@ test_actual_io_mode(int selection_mode) {
selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX ||
selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE ||
selection_mode == TEST_ACTUAL_IO_RESET );
-
+
is_chunked = (
selection_mode != TEST_ACTUAL_IO_CONTIGUOUS &&
selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE);
-
+
is_collective = selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE;
/* Set up MPI parameters */
@@ -3012,7 +3020,7 @@ test_actual_io_mode(int selection_mode) {
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Barrier(MPI_COMM_WORLD);
-
+
HDassert(mpi_size >= 1);
mpi_comm = MPI_COMM_WORLD;
@@ -3029,7 +3037,7 @@ test_actual_io_mode(int selection_mode) {
fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
VRFY((fid >= 0), "H5Fcreate succeeded");
- /* Create the basic Space */
+ /* Create the basic Space */
dims[0] = dim0;
dims[1] = dim1;
sid = H5Screate_simple (RANK, dims, NULL);
@@ -3057,10 +3065,10 @@ test_actual_io_mode(int selection_mode) {
file_space = H5Dget_space(dataset);
VRFY((file_space >= 0), "H5Dget_space succeeded");
- /* Choose a selection method based on the type of I/O we want to occur,
+ /* Choose a selection method based on the type of I/O we want to occur,
* and also set up some selection-dependeent test info. */
switch(selection_mode) {
-
+
/* Independent I/O with optimization */
case TEST_ACTUAL_IO_MULTI_CHUNK_IND:
case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND:
@@ -3069,7 +3077,7 @@ test_actual_io_mode(int selection_mode) {
* independent.
*/
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
+
test_name = "Multi Chunk - Independent";
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
@@ -3083,7 +3091,7 @@ test_actual_io_mode(int selection_mode) {
* selections to each chunk, the operation is purely collective.
*/
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
-
+
test_name = "Multi Chunk - Collective";
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
if(mpi_size > 1)
@@ -3091,7 +3099,7 @@ test_actual_io_mode(int selection_mode) {
else
actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
break;
-
+
/* Mixed I/O with optimization */
case TEST_ACTUAL_IO_MULTI_CHUNK_MIX:
/* A chunk will be assigned collective I/O only if it is selected by each
@@ -3102,7 +3110,7 @@ test_actual_io_mode(int selection_mode) {
* assigned independent I/O. Each process will access one chunk collectively
* and at least one chunk independently, reporting mixed I/O.
*/
-
+
if(mpi_rank == 0) {
/* Select the first column */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
@@ -3117,7 +3125,7 @@ test_actual_io_mode(int selection_mode) {
start[0] = 0;
start[1] = mpi_rank*block[1];
}
-
+
test_name = "Multi Chunk - Mixed";
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
@@ -3127,7 +3135,7 @@ test_actual_io_mode(int selection_mode) {
* performed. To acheive this, we have RESET perform collective I/O (which would change
* the values from the defaults) followed by independent I/O (which should report the
* default values). RESET doesn't need to have a unique selection, so we reuse
- * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works
+ * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works
* on all builds. The independent section of RESET can be found at the end of this function.
*/
case TEST_ACTUAL_IO_RESET:
@@ -3136,7 +3144,7 @@ test_actual_io_mode(int selection_mode) {
case TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE:
/* A chunk will be assigned collective I/O only if it is selected by each
* process. To get mixed I/O with disagreement, assign process n to the
- * first chunk and the nth chunk. The first chunk, selected by all, is
+ * first chunk and the nth chunk. The first chunk, selected by all, is
* assgigned collective I/O, while each other process gets independent I/O.
* Since the root process with only access the first chunk, it will report
* collective I/O. The subsequent processes will access the first chunk
@@ -3158,13 +3166,13 @@ test_actual_io_mode(int selection_mode) {
start[0] = 0;
start[1] = mpi_rank*block[1];
}
-
+
/* If the testname was not already set by the RESET case */
if (selection_mode == TEST_ACTUAL_IO_RESET)
test_name = "RESET";
else
test_name = "Multi Chunk - Mixed (Disagreement)";
-
+
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
if(mpi_size > 1) {
if(mpi_rank == 0)
@@ -3174,14 +3182,14 @@ test_actual_io_mode(int selection_mode) {
}
else
actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
-
- break;
+
+ break;
/* Linked Chunk I/O */
- case TEST_ACTUAL_IO_LINK_CHUNK:
+ case TEST_ACTUAL_IO_LINK_CHUNK:
/* Nothing special; link chunk I/O is forced in the dxpl settings. */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
+
test_name = "Link Chunk";
actual_chunk_opt_mode_expected = H5D_MPIO_LINK_CHUNK;
actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
@@ -3192,7 +3200,7 @@ test_actual_io_mode(int selection_mode) {
/* A non overlapping, regular selection in a contiguous dataset leads to
* collective I/O */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
+
test_name = "Contiguous";
actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE;
@@ -3200,7 +3208,7 @@ test_actual_io_mode(int selection_mode) {
case TEST_ACTUAL_IO_NO_COLLECTIVE:
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
+
test_name = "Independent";
actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
@@ -3215,13 +3223,13 @@ test_actual_io_mode(int selection_mode) {
ret = H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
+
/* Create a memory dataspace mirroring the dataset and select the same hyperslab
- * as in the file space.
+ * as in the file space.
*/
mem_space = H5Screate_simple (RANK, dims, NULL);
VRFY((mem_space >= 0), "mem_space created");
-
+
ret = H5Sselect_hyperslab(mem_space, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
@@ -3230,22 +3238,22 @@ test_actual_io_mode(int selection_mode) {
/* Allocate and initialize the buffer */
buffer = (int *)HDmalloc(sizeof(int) * length);
- VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for(i = 0; i < length; i++)
+ VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
+ for(i = 0; i < length; i++)
buffer[i] = i;
/* Set up the dxpl for the write */
dxpl_write = H5Pcreate(H5P_DATASET_XFER);
VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
+
/* Set collective I/O properties in the dxpl. */
if(is_collective) {
/* Request collective I/O */
ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
- /* Set the threshold number of processes per chunk to twice mpi_size.
- * This will prevent the threshold from ever being met, thus forcing
+
+ /* Set the threshold number of processes per chunk to twice mpi_size.
+ * This will prevent the threshold from ever being met, thus forcing
* multi chunk io instead of link chunk io.
* This is via deault.
*/
@@ -3283,12 +3291,12 @@ test_actual_io_mode(int selection_mode) {
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
VRFY((ret >= 0), "retriving actual chunk opt mode succeeded" );
-
+
/* Read */
ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
-
+
/* Retreive Actual io values */
ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
VRFY((ret >= 0), "retriving actual io mode succeeded" );
@@ -3304,9 +3312,9 @@ test_actual_io_mode(int selection_mode) {
/* Test values */
if(actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t) -1 && actual_io_mode_expected != (H5D_mpio_actual_io_mode_t) -1) {
- sprintf(message, "Actual Chunk Opt Mode has the correct value for %s.\n",test_name);
+ HDsprintf(message, "Actual Chunk Opt Mode has the correct value for %s.\n",test_name);
VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message);
- sprintf(message, "Actual IO Mode has the correct value for %s.\n",test_name);
+ HDsprintf(message, "Actual IO Mode has the correct value for %s.\n",test_name);
VRFY((actual_io_mode_write == actual_io_mode_expected), message);
} else {
HDfprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank,
@@ -3338,7 +3346,7 @@ test_actual_io_mode(int selection_mode) {
"actual_chunk_opt_mode has correct value for reset write (independent)");
VRFY(actual_io_mode_write == H5D_MPIO_NO_COLLECTIVE,
"actual_io_mode has correct value for reset write (independent)");
-
+
/* Read */
ret = H5Dread(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_read, buffer);
VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
@@ -3348,7 +3356,7 @@ test_actual_io_mode(int selection_mode) {
VRFY( (ret >= 0), "retriving actual io mode succeeded" );
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
VRFY( (ret >= 0), "retriving actual chunk opt mode succeeded" );
-
+
VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
"actual_chunk_opt_mode has correct value for reset read (independent)");
VRFY(actual_io_mode_read == H5D_MPIO_NO_COLLECTIVE,
@@ -3370,10 +3378,10 @@ test_actual_io_mode(int selection_mode) {
return;
}
-
+
/* Function: actual_io_mode_tests
*
- * Purpose: Tests all possible cases of the actual_io_mode property.
+ * Purpose: Tests all possible cases of the actual_io_mode property.
*
* Programmer: Jacob Gruber
* Date: 2011-04-06
@@ -3384,10 +3392,10 @@ actual_io_mode_tests(void) {
int mpi_rank = -1;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_rank);
-
+
test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE);
-
- /*
+
+ /*
* Test multi-chunk-io via proc_num threshold
*/
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND);
@@ -3398,10 +3406,10 @@ actual_io_mode_tests(void) {
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX);
else
HDfprintf(stdout, "Multi Chunk Mixed test requires 3 proceses minimum\n");
-
+
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE);
- /*
+ /*
* Test multi-chunk-io via setting direct property
*/
test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND);
@@ -3409,31 +3417,31 @@ actual_io_mode_tests(void) {
test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK);
test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS);
-
+
test_actual_io_mode(TEST_ACTUAL_IO_RESET);
return;
}
-/*
+/*
* Function: test_no_collective_cause_mode
*
- * Purpose:
- * tests cases for broken collective I/O and checks that the
+ * Purpose:
+ * tests cases for broken collective I/O and checks that the
* H5Pget_mpio_no_collective_cause properties in the DXPL have the correct values.
*
- * Input:
+ * Input:
* selection_mode: various mode to cause broken collective I/O
* Note: Originally, each TEST case is supposed to be used alone.
* After some discussion, this is updated to take multiple TEST cases
- * with '|'. However there is no error check for any of combined
+ * with '|'. However there is no error check for any of combined
* test cases, so a tester is responsible to understand and feed
* proper combination of TESTs if needed.
*
- *
+ *
* TEST_COLLECTIVE:
* Test for regular collective I/O without cause of breaking.
* Just to test normal behavior.
- *
+ *
* TEST_SET_INDEPENDENT:
* Test for Independent I/O as the cause of breaking collective I/O.
*
@@ -3445,7 +3453,7 @@ actual_io_mode_tests(void) {
*
* TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES:
* Test for NULL dataspace as the cause of breaking collective I/O.
- *
+ *
* TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT:
* Test for Compact layout as the cause of breaking collective I/O.
*
@@ -3454,17 +3462,17 @@ actual_io_mode_tests(void) {
*
* TEST_FILTERS:
* Test for using filter (checksum) as the cause of breaking collective I/O.
- * Note: TEST_FILTERS mode will not work until H5Dcreate and H5write is supported for mpio and filter feature. Use test_no_collective_cause_mode_filter() function instead.
+ * Note: TEST_FILTERS mode will not work until H5Dcreate and H5write is supported for mpio and filter feature. Use test_no_collective_cause_mode_filter() function instead.
+ *
*
- *
* Programmer: Jonathan Kim
* Date: Aug, 2012
*/
#define DSET_NOCOLCAUSE "nocolcause"
#define NELM 2
#define FILE_EXTERNAL "nocolcause_extern.data"
-static void
-test_no_collective_cause_mode(int selection_mode)
+static void
+test_no_collective_cause_mode(int selection_mode)
{
uint32_t no_collective_cause_local_write = 0;
uint32_t no_collective_cause_local_read = 0;
@@ -3478,7 +3486,7 @@ test_no_collective_cause_mode(int selection_mode)
const char * test_name;
hbool_t is_chunked=1;
hbool_t is_independent=0;
- int mpi_size = -1;
+ int mpi_size = -1;
int mpi_rank = -1;
int length;
int * buffer;
@@ -3502,7 +3510,7 @@ test_no_collective_cause_mode(int selection_mode)
H5Z_filter_t filter_info;
#endif /* LATER */
/* set to global value as default */
- int l_facc_type = facc_type;
+ int l_facc_type = facc_type;
char message[256];
/* Set up MPI parameters */
@@ -3564,7 +3572,7 @@ test_no_collective_cause_mode(int selection_mode)
sid = H5Screate_simple (RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
}
-
+
filename = (const char *)GetTestParameters();
HDassert(filename != NULL);
@@ -3589,20 +3597,19 @@ test_no_collective_cause_mode(int selection_mode)
/* Create the dataset */
- dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT,
- dcpl, H5P_DEFAULT);
+ dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
- /*
- * Set expected causes and some tweaks based on the type of test
+ /*
+ * Set expected causes and some tweaks based on the type of test
*/
if (selection_mode & TEST_DATATYPE_CONVERSION) {
test_name = "Broken Collective I/O - Datatype Conversion";
no_collective_cause_local_expected |= H5D_MPIO_DATATYPE_CONVERSION;
no_collective_cause_global_expected |= H5D_MPIO_DATATYPE_CONVERSION;
/* set different sign to trigger type conversion */
- data_type = H5T_NATIVE_UINT;
+ data_type = H5T_NATIVE_UINT;
}
if (selection_mode & TEST_DATA_TRANSFORMS) {
@@ -3667,14 +3674,14 @@ test_no_collective_cause_mode(int selection_mode)
/* Allocate and initialize the buffer */
buffer = (int *)HDmalloc(sizeof(int) * length);
- VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for(i = 0; i < length; i++)
+ VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
+ for(i = 0; i < length; i++)
buffer[i] = i;
/* Set up the dxpl for the write */
dxpl_write = H5Pcreate(H5P_DATASET_XFER);
VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
+
if(is_independent) {
/* Set Independent I/O */
ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT);
@@ -3684,17 +3691,17 @@ test_no_collective_cause_mode(int selection_mode)
/* Set Collective I/O */
ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
+
}
if (selection_mode & TEST_DATA_TRANSFORMS) {
- ret = H5Pset_data_transform (dxpl_write, "x+1");
+ ret = H5Pset_data_transform (dxpl_write, "x+1");
VRFY((ret >= 0), "H5Pset_data_transform succeeded");
}
/*---------------------
* Test Write access
- *---------------------*/
+ *---------------------*/
/* Write */
ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer);
@@ -3709,7 +3716,7 @@ test_no_collective_cause_mode(int selection_mode)
/*---------------------
* Test Read access
- *---------------------*/
+ *---------------------*/
/* Make a copy of the dxpl to test the read operation */
dxpl_read = H5Pcopy(dxpl_write);
@@ -3720,7 +3727,7 @@ test_no_collective_cause_mode(int selection_mode)
if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
-
+
/* Get the cause of broken collective I/O */
ret = H5Pget_mpio_no_collective_cause (dxpl_read, &no_collective_cause_local_read, &no_collective_cause_global_read);
VRFY((ret >= 0), "retriving no collective cause succeeded" );
@@ -3730,13 +3737,13 @@ test_no_collective_cause_mode(int selection_mode)
"reading and writing are the same for local cause of Broken Collective I/O");
VRFY((no_collective_cause_global_read == no_collective_cause_global_write),
"reading and writing are the same for global cause of Broken Collective I/O");
-
+
/* Test values */
- memset (message, 0, sizeof (message));
- sprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset (message, 0, sizeof (message));
+ HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name);
VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message);
- memset (message, 0, sizeof (message));
- sprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset (message, 0, sizeof (message));
+ HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name);
VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message);
/* Release some resources */
@@ -3768,28 +3775,28 @@ test_no_collective_cause_mode(int selection_mode)
}
-/*
+/*
* Function: test_no_collective_cause_mode_filter
*
- * Purpose:
- * Test specific for using filter as a caus of broken collective I/O and
+ * Purpose:
+ * Test specific for using filter as a caus of broken collective I/O and
* checks that the H5Pget_mpio_no_collective_cause properties in the DXPL
* have the correct values.
*
- * NOTE:
- * This is a temporary function.
+ * NOTE:
+ * This is a temporary function.
* test_no_collective_cause_mode(TEST_FILTERS) will replace this when
* H5Dcreate and H5write support for mpio and filter feature.
*
- * Input:
+ * Input:
* TEST_FILTERS_READ:
* Test for using filter (checksum) as the cause of breaking collective I/O.
- *
+ *
* Programmer: Jonathan Kim
* Date: Aug, 2012
*/
-static void
-test_no_collective_cause_mode_filter(int selection_mode)
+static void
+test_no_collective_cause_mode_filter(int selection_mode)
{
uint32_t no_collective_cause_local_read = 0;
uint32_t no_collective_cause_local_expected = 0;
@@ -3799,7 +3806,7 @@ test_no_collective_cause_mode_filter(int selection_mode)
const char * filename;
const char * test_name;
hbool_t is_chunked=1;
- int mpi_size = -1;
+ int mpi_size = -1;
int mpi_rank = -1;
int length;
int * buffer;
@@ -3829,7 +3836,7 @@ test_no_collective_cause_mode_filter(int selection_mode)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Barrier(MPI_COMM_WORLD);
-
+
HDassert(mpi_size >= 1);
mpi_comm = MPI_COMM_WORLD;
@@ -3855,12 +3862,12 @@ test_no_collective_cause_mode_filter(int selection_mode)
VRFY(0, "Unexpected mode, only test for TEST_FILTERS_READ.");
}
- /* Create the basic Space */
+ /* Create the basic Space */
dims[0] = dim0;
dims[1] = dim1;
sid = H5Screate_simple (RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
-
+
filename = (const char *)GetTestParameters();
HDassert(filename != NULL);
@@ -3883,8 +3890,7 @@ test_no_collective_cause_mode_filter(int selection_mode)
/* Create the dataset */
- dataset = H5Dcreate2(fid, DSET_NOCOLCAUSE, data_type, sid, H5P_DEFAULT,
- dcpl, H5P_DEFAULT);
+ dataset = H5Dcreate2(fid, DSET_NOCOLCAUSE, data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
#ifdef LATER /* fletcher32 */
@@ -3907,16 +3913,16 @@ test_no_collective_cause_mode_filter(int selection_mode)
/* Allocate and initialize the buffer */
buffer = (int *)HDmalloc(sizeof(int) * length);
- VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for(i = 0; i < length; i++)
+ VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
+ for(i = 0; i < length; i++)
buffer[i] = i;
/* Set up the dxpl for the write */
dxpl = H5Pcreate(H5P_DATASET_XFER);
VRFY((dxpl >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
+
if (selection_mode == TEST_FILTERS_READ) {
- /* To test read in collective I/O mode , write in independent mode
+ /* To test read in collective I/O mode , write in independent mode
* because write fails with mpio + filter */
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
@@ -3926,7 +3932,7 @@ test_no_collective_cause_mode_filter(int selection_mode)
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
}
-
+
/* Write */
ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl, buffer);
@@ -3967,17 +3973,17 @@ test_no_collective_cause_mode_filter(int selection_mode)
if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
-
+
/* Get the cause of broken collective I/O */
ret = H5Pget_mpio_no_collective_cause (dxpl, &no_collective_cause_local_read, &no_collective_cause_global_read);
VRFY((ret >= 0), "retriving no collective cause succeeded" );
/* Test values */
- memset (message, 0, sizeof (message));
- sprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset (message, 0, sizeof (message));
+ HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name);
VRFY((no_collective_cause_local_read == (uint32_t)no_collective_cause_local_expected), message);
- memset (message, 0, sizeof (message));
- sprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset (message, 0, sizeof (message));
+ HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name);
VRFY((no_collective_cause_global_read == (uint32_t)no_collective_cause_global_expected), message);
/* Release some resources */
@@ -4003,16 +4009,16 @@ test_no_collective_cause_mode_filter(int selection_mode)
/* Function: no_collective_cause_tests
*
- * Purpose: Tests cases for broken collective IO.
+ * Purpose: Tests cases for broken collective IO.
*
* Programmer: Jonathan Kim
* Date: Aug, 2012
*/
-void
-no_collective_cause_tests(void)
+void
+no_collective_cause_tests(void)
{
- /*
- * Test individual cause
+ /*
+ * Test individual cause
*/
test_no_collective_cause_mode (TEST_COLLECTIVE);
test_no_collective_cause_mode (TEST_SET_INDEPENDENT);
@@ -4022,15 +4028,15 @@ no_collective_cause_tests(void)
test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
#ifdef LATER /* fletcher32 */
- /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
+ /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
* H5Dwrite is ready for mpio + filter feature.
*/
/* test_no_collective_cause_mode (TEST_FILTERS); */
test_no_collective_cause_mode_filter (TEST_FILTERS_READ);
-#endif /* LATER */
+#endif /* LATER */
- /*
- * Test combined causes
+ /*
+ * Test combined causes
*/
test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION);
test_no_collective_cause_mode (TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
@@ -4054,22 +4060,22 @@ void
dataset_atomicity(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t dataset1; /* Dataset IDs */
- hsize_t dims[RANK]; /* dataset dim sizes */
- int *write_buf = NULL; /* data buffer */
- int *read_buf = NULL; /* data buffer */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t dataset1; /* Dataset IDs */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ int *write_buf = NULL; /* data buffer */
+ int *read_buf = NULL; /* data buffer */
int buf_size;
hid_t dataset2;
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* Memory dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* Memory dataspace ID */
hsize_t start[RANK];
hsize_t stride[RANK];
hsize_t count[RANK];
hsize_t block[RANK];
const char *filename;
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
int i, j, k;
hbool_t atomicity = FALSE;
@@ -4079,11 +4085,11 @@ dataset_atomicity(void)
dim0 = 64; dim1 = 32;
filename = GetTestParameters();
if (facc_type != FACC_MPIO) {
- printf("Atomicity tests will not work without the MPIO VFD\n");
+ HDprintf("Atomicity tests will not work without the MPIO VFD\n");
return;
}
if(VERBOSE_MED)
- printf("atomic writes to file %s\n", filename);
+ HDprintf("atomic writes to file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
@@ -4134,7 +4140,7 @@ dataset_atomicity(void)
H5P_DEFAULT, write_buf);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
}
-
+
ret = H5Dclose(dataset1);
VRFY((ret >= 0), "H5Dclose succeeded");
ret = H5Dclose(dataset2);
@@ -4149,8 +4155,8 @@ dataset_atomicity(void)
/* make sure setting atomicity fails on a serial file ID */
/* file locking allows only one file open (serial) for writing */
if(MAINPROCESS){
- fid=H5Fopen(filename,H5F_ACC_RDWR,H5P_DEFAULT);
- VRFY((fid >= 0), "H5Fopen succeeed");
+ fid=H5Fopen(filename,H5F_ACC_RDWR,H5P_DEFAULT);
+ VRFY((fid >= 0), "H5Fopen succeeed");
}
/* should fail */
@@ -4158,8 +4164,8 @@ dataset_atomicity(void)
VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed");
if(MAINPROCESS){
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
}
MPI_Barrier (comm);
@@ -4215,23 +4221,23 @@ dataset_atomicity(void)
if(VERBOSE_MED) {
i=0;j=0;k=0;
for (i=0 ; i<dim0 ; i++) {
- printf ("\n");
+ HDprintf ("\n");
for (j=0 ; j<dim1 ; j++)
- printf ("%d ", read_buf[k++]);
+ HDprintf ("%d ", read_buf[k++]);
}
}
/* The processes that read the dataset must either read all values
as 0 (read happened before process 0 wrote to dataset 1), or 5
(read happened after process 0 wrote to dataset 1) */
- if (0 != mpi_rank) {
+ if (0 != mpi_rank) {
int compare = read_buf[0];
- VRFY((compare == 0 || compare == 5),
+ VRFY((compare == 0 || compare == 5),
"Atomicity Test Failed Process %d: Value read should be 0 or 5\n");
for (i=1; i<buf_size; i++) {
if (read_buf[i] != compare) {
- printf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i, read_buf[i], compare);
+ HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i, read_buf[i], compare);
nerrors ++;
}
}
@@ -4313,17 +4319,17 @@ dataset_atomicity(void)
if (mpi_rank == 1) {
i=0;j=0;k=0;
for (i=0 ; i<dim0 ; i++) {
- printf ("\n");
+ HDprintf ("\n");
for (j=0 ; j<dim1 ; j++)
- printf ("%d ", read_buf[k++]);
+ HDprintf ("%d ", read_buf[k++]);
}
- printf ("\n");
+ HDprintf ("\n");
}
}
/* The processes that read the dataset must either read all values
as 5 (read happened after process 0 wrote to dataset 1) */
- if (0 != mpi_rank) {
+ if (0 != mpi_rank) {
int compare;
i=0;j=0;k=0;
@@ -4347,7 +4353,7 @@ dataset_atomicity(void)
continue;
}
else if (compare != read_buf[k]) {
- printf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, k, read_buf[k], compare);
+ HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, k, read_buf[k], compare);
nerrors++;
}
k ++;
@@ -4378,8 +4384,8 @@ dataset_atomicity(void)
* Programmer: Quincey Koziol
* Date: April, 2013
*/
-void
-test_dense_attr(void)
+void
+test_dense_attr(void)
{
int mpi_size, mpi_rank;
hid_t fpid, fid;
@@ -4417,7 +4423,7 @@ test_dense_attr(void)
status = H5Pclose(gpid);
VRFY((status >= 0), "H5Pclose succeeded");
- atFileSpace = H5Screate_simple(1, atDims, NULL);
+ atFileSpace = H5Screate_simple(1, atDims, NULL);
VRFY((atFileSpace > 0), "H5Screate_simple succeeded");
atid = H5Acreate2(gid, "bar", H5T_STD_U64LE, atFileSpace, H5P_DEFAULT, H5P_DEFAULT);
VRFY((atid > 0), "H5Acreate succeeded");
diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c
index f436c8f..1f26e0d 100644
--- a/testpar/t_filters_parallel.c
+++ b/testpar/t_filters_parallel.c
@@ -32,9 +32,28 @@ char filenames[1][256];
int nerrors = 0;
size_t cur_filter_idx = 0;
+#define GZIP_INDEX 0
+#define FLETCHER32_INDEX 1
#define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0])
+/*
+ * Used to check if a filter is available before running a test.
+ */
+#define CHECK_CUR_FILTER_AVAIL() \
+{ \
+ htri_t filter_is_avail; \
+ \
+ if (cur_filter_idx == GZIP_INDEX) { \
+ if ((filter_is_avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE)) != TRUE) { \
+ if (MAINPROCESS) { \
+ HDputs(" - SKIPPED - Deflate filter not available"); \
+ } \
+ return; \
+ } \
+ } \
+}
+
static herr_t set_dcpl_filter(hid_t dcpl);
#if MPI_VERSION >= 3
@@ -144,9 +163,9 @@ static herr_t
set_dcpl_filter(hid_t dcpl)
{
switch (cur_filter_idx) {
- case 0:
+ case GZIP_INDEX:
return H5Pset_deflate(dcpl, DEFAULT_DEFLATE_LEVEL);
- case 1:
+ case FLETCHER32_INDEX:
return H5Pset_fletcher32(dcpl);
default:
return H5Pset_deflate(dcpl, DEFAULT_DEFLATE_LEVEL);
@@ -178,7 +197,9 @@ test_write_one_chunk_filtered_dataset(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to one-chunk filtered dataset");
+ if (MAINPROCESS) HDputs("Testing write to one-chunk filtered dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -238,9 +259,9 @@ test_write_one_chunk_filtered_dataset(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -292,7 +313,7 @@ test_write_one_chunk_filtered_dataset(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -333,7 +354,9 @@ test_write_filtered_dataset_no_overlap(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to unshared filtered chunks");
+ if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -394,9 +417,9 @@ test_write_filtered_dataset_no_overlap(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -450,7 +473,7 @@ test_write_filtered_dataset_no_overlap(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -492,7 +515,9 @@ test_write_filtered_dataset_overlap(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to shared filtered chunks");
+ if (MAINPROCESS) HDputs("Testing write to shared filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -553,9 +578,9 @@ test_write_filtered_dataset_overlap(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -609,7 +634,7 @@ test_write_filtered_dataset_overlap(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -654,7 +679,9 @@ test_write_filtered_dataset_single_no_selection(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to filtered chunks with a single process having no selection");
+ if (MAINPROCESS) HDputs("Testing write to filtered chunks with a single process having no selection");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -718,9 +745,9 @@ test_write_filtered_dataset_single_no_selection(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -782,7 +809,7 @@ test_write_filtered_dataset_single_no_selection(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -823,7 +850,9 @@ test_write_filtered_dataset_all_no_selection(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to filtered chunks with all processes having no selection");
+ if (MAINPROCESS) HDputs("Testing write to filtered chunks with all processes having no selection");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -912,7 +941,7 @@ test_write_filtered_dataset_all_no_selection(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -949,7 +978,9 @@ test_write_filtered_dataset_point_selection(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to filtered chunks with point selection");
+ if (MAINPROCESS) HDputs("Testing write to filtered chunks with point selection");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -1058,7 +1089,7 @@ test_write_filtered_dataset_point_selection(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (coords) HDfree(coords);
@@ -1102,7 +1133,9 @@ test_write_filtered_dataset_interleaved_write(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing interleaved write to filtered chunks");
+ if (MAINPROCESS) HDputs("Testing interleaved write to filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -1163,9 +1196,9 @@ test_write_filtered_dataset_interleaved_write(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -1225,7 +1258,7 @@ test_write_filtered_dataset_interleaved_write(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -1265,7 +1298,9 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to unshared filtered chunks on separate pages in 3D dataset");
+ if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks on separate pages in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -1333,9 +1368,9 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
start[2] = (hsize_t) mpi_rank;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -1385,7 +1420,7 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -1426,7 +1461,9 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
hid_t file_id, dset_id, plist_id;
hid_t filespace, memspace;
- if (MAINPROCESS) puts("Testing write to unshared filtered chunks on the same pages in 3D dataset");
+ if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks on the same pages in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -1494,9 +1531,9 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
start[2] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -1549,7 +1586,7 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -1590,7 +1627,9 @@ test_write_3d_filtered_dataset_overlap(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to shared filtered chunks in 3D dataset");
+ if (MAINPROCESS) HDputs("Testing write to shared filtered chunks in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -1658,9 +1697,9 @@ test_write_3d_filtered_dataset_overlap(void)
start[2] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -1722,7 +1761,7 @@ test_write_3d_filtered_dataset_overlap(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -1762,7 +1801,9 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to unshared filtered chunks in Compound Datatype dataset without Datatype conversion");
+ if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset without Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -1834,9 +1875,9 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
start[1] = ((hsize_t) mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -1902,7 +1943,7 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -1943,7 +1984,9 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
hid_t file_id, dset_id, plist_id, memtype;
hid_t filespace, memspace;
- if (MAINPROCESS) puts("Testing write to shared filtered chunks in Compound Datatype dataset without Datatype conversion");
+ if (MAINPROCESS) HDputs("Testing write to shared filtered chunks in Compound Datatype dataset without Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -2015,9 +2058,9 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -2086,7 +2129,7 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -2132,7 +2175,9 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype conversion");
+ if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -2215,9 +2260,9 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
start[1] = ((hsize_t) mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -2269,7 +2314,7 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -2316,7 +2361,9 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
hid_t file_id, dset_id, plist_id, filetype, memtype;
hid_t filespace, memspace;
- if (MAINPROCESS) puts("Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion");
+ if (MAINPROCESS) HDputs("Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -2399,9 +2446,9 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -2453,7 +2500,7 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -2503,6 +2550,10 @@ test_read_one_chunk_filtered_dataset(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from one-chunk filtered dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NROWS;
dataset_dims[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NCOLS;
@@ -2517,8 +2568,6 @@ test_read_one_chunk_filtered_dataset(void)
+ ((C_DATATYPE) i / (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS));
if (MAINPROCESS) {
- puts("Testing read from one-chunk filtered dataset");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -2606,9 +2655,9 @@ test_read_one_chunk_filtered_dataset(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -2648,7 +2697,7 @@ test_read_one_chunk_filtered_dataset(void)
VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -2698,6 +2747,10 @@ test_read_filtered_dataset_no_overlap(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS;
@@ -2714,8 +2767,6 @@ test_read_filtered_dataset_no_overlap(void)
);
if (MAINPROCESS) {
- puts("Testing read from unshared filtered chunks");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -2803,9 +2854,9 @@ test_read_filtered_dataset_no_overlap(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -2845,7 +2896,7 @@ test_read_filtered_dataset_no_overlap(void)
VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -2896,6 +2947,10 @@ test_read_filtered_dataset_overlap(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from shared filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NCOLS;
@@ -2913,8 +2968,6 @@ test_read_filtered_dataset_overlap(void)
);
if (MAINPROCESS) {
- puts("Testing read from shared filtered chunks");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -3002,9 +3055,9 @@ test_read_filtered_dataset_overlap(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -3059,7 +3112,7 @@ test_read_filtered_dataset_overlap(void)
}
}
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -3111,6 +3164,10 @@ test_read_filtered_dataset_single_no_selection(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from filtered chunks with a single process having no selection");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
@@ -3133,8 +3190,6 @@ test_read_filtered_dataset_single_no_selection(void)
0, segment_length * sizeof(*correct_buf));
if (MAINPROCESS) {
- puts("Testing read from filtered chunks with a single process having no selection");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -3225,9 +3280,9 @@ test_read_filtered_dataset_single_no_selection(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
@@ -3275,7 +3330,7 @@ test_read_filtered_dataset_single_no_selection(void)
VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -3319,6 +3374,10 @@ test_read_filtered_dataset_all_no_selection(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
+ if (MAINPROCESS) HDputs("Testing read from filtered chunks with all processes having no selection");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
@@ -3329,8 +3388,6 @@ test_read_filtered_dataset_all_no_selection(void)
VRFY((NULL != correct_buf), "HDcalloc succeeded");
if (MAINPROCESS) {
- puts("Testing read from filtered chunks with all processes having no selection");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -3460,6 +3517,10 @@ test_read_filtered_dataset_point_selection(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from filtered chunks with point selection");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
@@ -3477,8 +3538,6 @@ test_read_filtered_dataset_point_selection(void)
);
if (MAINPROCESS) {
- puts("Testing read from filtered chunks with point selection");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -3615,7 +3674,7 @@ test_read_filtered_dataset_point_selection(void)
}
}
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -3669,6 +3728,10 @@ test_read_filtered_dataset_interleaved_read(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing interleaved read from filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NROWS;
dataset_dims[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS;
@@ -3692,8 +3755,6 @@ test_read_filtered_dataset_interleaved_read(void)
);
if (MAINPROCESS) {
- puts("Testing interleaved read from filtered chunks");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -3781,9 +3842,9 @@ test_read_filtered_dataset_interleaved_read(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -3838,7 +3899,7 @@ test_read_filtered_dataset_interleaved_read(void)
}
}
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -3889,6 +3950,10 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
+ if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks on separate pages in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
dataset_dims[2] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH;
@@ -3903,8 +3968,6 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
correct_buf[i] = (C_DATATYPE) ((i % (hsize_t) mpi_size) + (i / (hsize_t) mpi_size));
if (MAINPROCESS) {
- puts("Testing read from unshared filtered chunks on separate pages in 3D dataset");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -3998,9 +4061,9 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
start[2] = (hsize_t) mpi_rank;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -4043,7 +4106,7 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, 1, resized_vector_type, comm)),
"MPI_Allgather succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded");
@@ -4096,6 +4159,10 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks on the same pages in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS;
dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
dataset_dims[2] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
@@ -4113,8 +4180,6 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
);
if (MAINPROCESS) {
- puts("Testing read from unshared filtered chunks on the same pages in 3D dataset");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -4208,9 +4273,9 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
start[2] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -4250,7 +4315,7 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -4302,6 +4367,10 @@ test_read_3d_filtered_dataset_overlap(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
+ if (MAINPROCESS) HDputs("Testing read from shared filtered chunks in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_NROWS;
dataset_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_NCOLS;
dataset_dims[2] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_DEPTH;
@@ -4328,8 +4397,6 @@ test_read_3d_filtered_dataset_overlap(void)
);
if (MAINPROCESS) {
- puts("Testing read from shared filtered chunks in 3D dataset");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -4423,9 +4490,9 @@ test_read_3d_filtered_dataset_overlap(void)
start[2] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -4473,7 +4540,7 @@ test_read_3d_filtered_dataset_overlap(void)
VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, 1, resized_vector_type, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded");
@@ -4525,6 +4592,10 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset without Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS;
dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS;
@@ -4563,8 +4634,6 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
"Datatype insertion succeeded");
if (MAINPROCESS) {
- puts("Testing read from unshared filtered chunks in Compound Datatype dataset without Datatype conversion");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -4652,9 +4721,9 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
start[1] = ((hsize_t) mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -4694,7 +4763,7 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -4746,6 +4815,10 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from shared filtered chunks in Compound Datatype dataset without Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS;
dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS;
@@ -4787,8 +4860,6 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
"Datatype insertion succeeded");
if (MAINPROCESS) {
- puts("Testing read from shared filtered chunks in Compound Datatype dataset without Datatype conversion");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -4876,9 +4947,9 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -4918,7 +4989,7 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -4970,6 +5041,10 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset with Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS;
dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS;
@@ -5019,8 +5094,6 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
"Datatype insertion succeeded");
if (MAINPROCESS) {
- puts("Testing read from unshared filtered chunks in Compound Datatype dataset with Datatype conversion");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -5108,9 +5181,9 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
start[1] = ((hsize_t) mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -5150,7 +5223,7 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -5203,6 +5276,10 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from shared filtered chunks in Compound Datatype dataset with Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS;
dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS;
@@ -5255,8 +5332,6 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
"Datatype insertion succeeded");
if (MAINPROCESS) {
- puts("Testing read from shared filtered chunks in Compound Datatype dataset with Datatype conversion");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -5344,9 +5419,9 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -5386,7 +5461,7 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -5427,7 +5502,9 @@ test_write_serial_read_parallel(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1;
- if (MAINPROCESS) puts("Testing write file serially; read file in parallel");
+ if (MAINPROCESS) HDputs("Testing write file serially; read file in parallel");
+
+ CHECK_CUR_FILTER_AVAIL();
dataset_dims[0] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_NROWS;
dataset_dims[1] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_NCOLS;
@@ -5527,7 +5604,7 @@ test_write_serial_read_parallel(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -5568,7 +5645,9 @@ test_write_parallel_read_serial(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write file in parallel; read serially");
+ if (MAINPROCESS) HDputs("Testing write file in parallel; read serially");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -5636,9 +5715,9 @@ test_write_parallel_read_serial(void)
offset[2] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -5707,7 +5786,7 @@ test_write_parallel_read_serial(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
@@ -5741,7 +5820,9 @@ test_shrinking_growing_chunks(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing continually shrinking/growing chunks");
+ if (MAINPROCESS) HDputs("Testing continually shrinking/growing chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -5803,9 +5884,9 @@ test_shrinking_growing_chunks(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -5868,8 +5949,8 @@ main(int argc, char** argv)
if (mpi_size <= 0) {
if (MAINPROCESS) {
- printf("The Parallel Filters tests require at least 1 rank.\n");
- printf("Quitting...\n");
+ HDprintf("The Parallel Filters tests require at least 1 rank.\n");
+ HDprintf("Quitting...\n");
}
MPI_Abort(MPI_COMM_WORLD, 1);
@@ -5877,16 +5958,16 @@ main(int argc, char** argv)
if (H5dont_atexit() < 0) {
if (MAINPROCESS) {
- printf("Failed to turn off atexit processing. Continue.\n");
+ HDprintf("Failed to turn off atexit processing. Continue.\n");
}
}
H5open();
if (MAINPROCESS) {
- printf("==========================\n");
- printf("Parallel Filters tests\n");
- printf("==========================\n\n");
+ HDprintf("==========================\n");
+ HDprintf("Parallel Filters tests\n");
+ HDprintf("==========================\n\n");
}
if (VERBOSE_MED) h5_show_hostname();
@@ -5942,9 +6023,9 @@ main(int argc, char** argv)
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
if (MAINPROCESS) {
- printf("\n=================================================================\n");
- printf("Re-running Parallel Filters tests with Fletcher32 checksum filter\n");
- printf("=================================================================\n\n");
+ HDprintf("\n=================================================================\n");
+ HDprintf("Re-running Parallel Filters tests with Fletcher32 checksum filter\n");
+ HDprintf("=================================================================\n\n");
}
for (i = 0; i < ARRAY_SIZE(tests); i++) {
@@ -5959,12 +6040,12 @@ main(int argc, char** argv)
if (nerrors) goto exit;
- if (MAINPROCESS) puts("All Parallel Filters tests passed\n");
+ if (MAINPROCESS) HDputs("All Parallel Filters tests passed\n");
exit:
if (nerrors)
if (MAINPROCESS)
- printf("*** %d TEST ERROR%s OCCURRED ***\n", nerrors,
+ HDprintf("*** %d TEST ERROR%s OCCURRED ***\n", nerrors,
nerrors > 1 ? "S" : "");
ALARM_OFF;
diff --git a/testpar/t_pread.c b/testpar/t_pread.c
index 19ccf56..0905d44 100644
--- a/testpar/t_pread.c
+++ b/testpar/t_pread.c
@@ -699,18 +699,11 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id)
* with the opening and validation of the data contained
* therein.
*
- * WARNING: This test uses fork() and execve(), and
- * therefore will not run on Windows.
- *
* Return: Success: 0
- *
* Failure: 1
*
* Programmer: Richard Warren
* 10/1/17
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index 87d9056..69b66ae 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -547,6 +547,9 @@ int main(int argc, char **argv)
AddTest("denseattr", test_dense_attr, NULL,
"Store Dense Attributes", PARATESTFILE);
+ AddTest("noselcollmdread", test_partial_no_selection_coll_md_read, NULL,
+ "Collective Metadata read with some ranks having no selection", PARATESTFILE);
+
/* Display testing information */
TestInfo(argv[0]);
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index 322cb9b..176574e 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -294,6 +294,7 @@ void file_image_daisy_chain_test(void);
void compress_readAll(void);
#endif /* H5_HAVE_FILTER_DEFLATE */
void test_dense_attr(void);
+void test_partial_no_selection_coll_md_read(void);
/* commonly used prototypes */
hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type);