summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
authorAllen Byrne <byrn@hdfgroup.org>2020-09-30 14:27:10 (GMT)
committerAllen Byrne <byrn@hdfgroup.org>2020-09-30 14:27:10 (GMT)
commitb2d661b508a7fc7a2592c13bc6bdc175551f075d (patch)
tree13baeb0d83a7c2a4c6299993c182b1227c2f6114 /testpar
parent29ab58b58dce556639ea3154e262895773a8a8df (diff)
downloadhdf5-b2d661b508a7fc7a2592c13bc6bdc175551f075d.zip
hdf5-b2d661b508a7fc7a2592c13bc6bdc175551f075d.tar.gz
hdf5-b2d661b508a7fc7a2592c13bc6bdc175551f075d.tar.bz2
Clang-format of source files
Diffstat (limited to 'testpar')
-rw-r--r--testpar/t_2Gio.c2758
-rw-r--r--testpar/t_bigio.c1801
-rw-r--r--testpar/t_cache.c5147
-rw-r--r--testpar/t_cache_image.c2460
-rw-r--r--testpar/t_chunk_alloc.c243
-rw-r--r--testpar/t_coll_chunk.c1191
-rw-r--r--testpar/t_coll_md_read.c211
-rw-r--r--testpar/t_dset.c2481
-rw-r--r--testpar/t_file.c474
-rw-r--r--testpar/t_file_image.c285
-rw-r--r--testpar/t_filter_read.c406
-rw-r--r--testpar/t_filters_parallel.c3533
-rw-r--r--testpar/t_filters_parallel.h301
-rw-r--r--testpar/t_init_term.c31
-rw-r--r--testpar/t_mdset.c1675
-rw-r--r--testpar/t_mpi.c618
-rw-r--r--testpar/t_pflush1.c105
-rw-r--r--testpar/t_pflush2.c115
-rw-r--r--testpar/t_ph5basic.c28
-rw-r--r--testpar/t_pread.c1105
-rw-r--r--testpar/t_prestart.c89
-rw-r--r--testpar/t_prop.c179
-rw-r--r--testpar/t_pshutdown.c76
-rw-r--r--testpar/t_shapesame.c3309
-rw-r--r--testpar/t_span_tree.c2661
-rw-r--r--testpar/testpar.h97
-rw-r--r--testpar/testphdf5.c514
-rw-r--r--testpar/testphdf5.h297
28 files changed, 15066 insertions, 17124 deletions
diff --git a/testpar/t_2Gio.c b/testpar/t_2Gio.c
index d44c7c7..3e92f18 100644
--- a/testpar/t_2Gio.c
+++ b/testpar/t_2Gio.c
@@ -32,7 +32,6 @@
#include "mpi.h"
-
/* For this test, we don't want to inherit the RANK definition
* from testphdf5.h. We'll define MAX_RANK to accomodate 3D arrays
* and use that definition rather than RANK.
@@ -57,8 +56,8 @@
#endif
#ifndef PATH_MAX
-#define PATH_MAX 512
-#endif /* !PATH_MAX */
+#define PATH_MAX 512
+#endif /* !PATH_MAX */
/* global variables */
int dim0;
@@ -66,25 +65,22 @@ int dim1;
int dim2;
int chunkdim0;
int chunkdim1;
-int nerrors = 0; /* errors count */
-int ndatasets = 300; /* number of datasets to create*/
-int ngroups = 512; /* number of groups to create in root
- * group. */
-int facc_type = FACC_MPIO; /*Test file access type */
+int nerrors = 0; /* errors count */
+int ndatasets = 300; /* number of datasets to create*/
+int ngroups = 512; /* number of groups to create in root
+ * group. */
+int facc_type = FACC_MPIO; /*Test file access type */
int dxfer_coll_type = DXFER_COLLECTIVE_IO;
-H5E_auto2_t old_func; /* previous error handler */
-void *old_client_data; /* previous error handler arg.*/
+H5E_auto2_t old_func; /* previous error handler */
+void * old_client_data; /* previous error handler arg.*/
-#define NFILENAME 3
+#define NFILENAME 3
#define PARATESTFILE filenames[0]
-const char *FILENAME[NFILENAME]={
- "ParaTest",
- "Hugefile",
- NULL};
-char filenames[NFILENAME][PATH_MAX];
-hid_t fapl; /* file access property list */
-MPI_Comm test_comm = MPI_COMM_WORLD;
+const char *FILENAME[NFILENAME] = {"ParaTest", "Hugefile", NULL};
+char filenames[NFILENAME][PATH_MAX];
+hid_t fapl; /* file access property list */
+MPI_Comm test_comm = MPI_COMM_WORLD;
// static int enable_error_stack = 0; /* enable error stack; disable=0 enable=1 */
// static const char *TestProgName = NULL;
@@ -95,7 +91,6 @@ MPI_Comm test_comm = MPI_COMM_WORLD;
* The following are various utility routines used by the tests.
*/
-
/*
* Show command usage
*/
@@ -103,15 +98,15 @@ static void
usage(void)
{
HDprintf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] "
- "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
+ "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
HDprintf("\t-m<n_datasets>"
- "\tset number of datasets for the multiple dataset test\n");
+ "\tset number of datasets for the multiple dataset test\n");
HDprintf("\t-n<n_groups>"
- "\tset number of groups for the multiple group test\n");
+ "\tset number of groups for the multiple group test\n");
HDprintf("\t-f <prefix>\tfilename prefix\n");
HDprintf("\t-2\t\tuse Split-file together with MPIO\n");
- HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n",
- BIG_X_FACTOR, BIG_Y_FACTOR);
+ HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", BIG_X_FACTOR,
+ BIG_Y_FACTOR);
HDprintf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
HDprintf("\n");
}
@@ -122,124 +117,127 @@ usage(void)
static int
parse_options(int argc, char **argv)
{
- int mpi_size, mpi_rank; /* mpi variables */
+ int mpi_size, mpi_rank; /* mpi variables */
MPI_Comm_size(test_comm, &mpi_size);
MPI_Comm_rank(test_comm, &mpi_rank);
/* setup default chunk-size. Make sure sizes are > 0 */
- chunkdim0 = (dim0+9)/10;
- chunkdim1 = (dim1+9)/10;
+ chunkdim0 = (dim0 + 9) / 10;
+ chunkdim1 = (dim1 + 9) / 10;
- while (--argc){
- if (**(++argv) != '-'){
- break;
- }else{
- switch(*(*argv+1)){
- case 'm': ndatasets = atoi((*argv+1)+1);
- if (ndatasets < 0){
- nerrors++;
- return(1);
- }
+ while (--argc) {
+ if (**(++argv) != '-') {
break;
- case 'n': ngroups = atoi((*argv+1)+1);
- if (ngroups < 0){
- nerrors++;
- return(1);
- }
- break;
- case 'f': if (--argc < 1) {
- nerrors++;
- return(1);
- }
- if (**(++argv) == '-') {
- nerrors++;
- return(1);
- }
- paraprefix = *argv;
- break;
- case 'i': /* Collective MPI-IO access with independent IO */
- dxfer_coll_type = DXFER_INDEPENDENT_IO;
- break;
- case '2': /* Use the split-file driver with MPIO access */
- /* Can use $HDF5_METAPREFIX to define the */
- /* meta-file-prefix. */
- facc_type = FACC_MPIO | FACC_SPLIT;
- break;
- case 'd': /* dimensizes */
- if (--argc < 2){
- nerrors++;
- return(1);
- }
- dim0 = atoi(*(++argv))*mpi_size;
- argc--;
- dim1 = atoi(*(++argv))*mpi_size;
- /* set default chunkdim sizes too */
- chunkdim0 = (dim0+9)/10;
- chunkdim1 = (dim1+9)/10;
- break;
- case 'c': /* chunk dimensions */
- if (--argc < 2){
+ }
+ else {
+ switch (*(*argv + 1)) {
+ case 'm':
+ ndatasets = atoi((*argv + 1) + 1);
+ if (ndatasets < 0) {
+ nerrors++;
+ return (1);
+ }
+ break;
+ case 'n':
+ ngroups = atoi((*argv + 1) + 1);
+ if (ngroups < 0) {
+ nerrors++;
+ return (1);
+ }
+ break;
+ case 'f':
+ if (--argc < 1) {
+ nerrors++;
+ return (1);
+ }
+ if (**(++argv) == '-') {
+ nerrors++;
+ return (1);
+ }
+ paraprefix = *argv;
+ break;
+ case 'i': /* Collective MPI-IO access with independent IO */
+ dxfer_coll_type = DXFER_INDEPENDENT_IO;
+ break;
+ case '2': /* Use the split-file driver with MPIO access */
+ /* Can use $HDF5_METAPREFIX to define the */
+ /* meta-file-prefix. */
+ facc_type = FACC_MPIO | FACC_SPLIT;
+ break;
+ case 'd': /* dimensizes */
+ if (--argc < 2) {
+ nerrors++;
+ return (1);
+ }
+ dim0 = atoi(*(++argv)) * mpi_size;
+ argc--;
+ dim1 = atoi(*(++argv)) * mpi_size;
+ /* set default chunkdim sizes too */
+ chunkdim0 = (dim0 + 9) / 10;
+ chunkdim1 = (dim1 + 9) / 10;
+ break;
+ case 'c': /* chunk dimensions */
+ if (--argc < 2) {
+ nerrors++;
+ return (1);
+ }
+ chunkdim0 = atoi(*(++argv));
+ argc--;
+ chunkdim1 = atoi(*(++argv));
+ break;
+ case 'h': /* print help message--return with nerrors set */
+ return (1);
+ default:
+ HDprintf("Illegal option(%s)\n", *argv);
nerrors++;
- return(1);
- }
- chunkdim0 = atoi(*(++argv));
- argc--;
- chunkdim1 = atoi(*(++argv));
- break;
- case 'h': /* print help message--return with nerrors set */
- return(1);
- default: HDprintf("Illegal option(%s)\n", *argv);
- nerrors++;
- return(1);
+ return (1);
}
}
} /*while*/
/* check validity of dimension and chunk sizes */
- if (dim0 <= 0 || dim1 <= 0){
+ if (dim0 <= 0 || dim1 <= 0) {
HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
nerrors++;
- return(1);
+ return (1);
}
- if (chunkdim0 <= 0 || chunkdim1 <= 0){
+ if (chunkdim0 <= 0 || chunkdim1 <= 0) {
HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
nerrors++;
- return(1);
+ return (1);
}
/* Make sure datasets can be divided into equal portions by the processes */
- if ((dim0 % mpi_size) || (dim1 % mpi_size)){
+ if ((dim0 % mpi_size) || (dim1 % mpi_size)) {
if (MAINPROCESS)
- HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n",
- dim0, dim1, mpi_size);
+ HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", dim0, dim1, mpi_size);
nerrors++;
- return(1);
+ return (1);
}
/* compose the test filenames */
{
int i, n;
- n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1; /* exclude the NULL */
+ n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */
- for (i=0; i < n; i++)
- if (h5_fixname(FILENAME[i],fapl,filenames[i],sizeof(filenames[i]))
- == NULL){
+ for (i = 0; i < n; i++)
+ if (h5_fixname(FILENAME[i], fapl, filenames[i], sizeof(filenames[i])) == NULL) {
HDprintf("h5_fixname failed\n");
nerrors++;
- return(1);
+ return (1);
}
if (MAINPROCESS) {
- HDprintf("Test filenames are:\n");
- for (i=0; i < n; i++)
- HDprintf(" %s\n", filenames[i]);
- }
+ HDprintf("Test filenames are:\n");
+ for (i = 0; i < n; i++)
+ HDprintf(" %s\n", filenames[i]);
+ }
}
- return(0);
+ return (0);
}
/*
@@ -248,20 +246,20 @@ parse_options(int argc, char **argv)
hid_t
create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
{
- hid_t ret_pl = -1;
- herr_t ret; /* generic return value */
- int mpi_rank; /* mpi variables */
+ hid_t ret_pl = -1;
+ herr_t ret; /* generic return value */
+ int mpi_rank; /* mpi variables */
/* need the rank for error checking macros */
MPI_Comm_rank(test_comm, &mpi_rank);
- ret_pl = H5Pcreate (H5P_FILE_ACCESS);
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
if (l_facc_type == FACC_DEFAULT)
return (ret_pl);
- if (l_facc_type == FACC_MPIO){
+ if (l_facc_type == FACC_MPIO) {
/* set Parallel access with communicator */
ret = H5Pset_fapl_mpio(ret_pl, comm, info);
VRFY((ret >= 0), "");
@@ -269,33 +267,32 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
VRFY((ret >= 0), "");
ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
VRFY((ret >= 0), "");
- return(ret_pl);
+ return (ret_pl);
}
- if (l_facc_type == (FACC_MPIO | FACC_SPLIT)){
+ if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) {
hid_t mpio_pl;
- mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
+ mpio_pl = H5Pcreate(H5P_FILE_ACCESS);
VRFY((mpio_pl >= 0), "");
/* set Parallel access with communicator */
ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
VRFY((ret >= 0), "");
/* setup file access template */
- ret_pl = H5Pcreate (H5P_FILE_ACCESS);
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
VRFY((ret_pl >= 0), "");
/* set Parallel access with communicator */
ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
H5Pclose(mpio_pl);
- return(ret_pl);
+ return (ret_pl);
}
/* unknown file access types */
return (ret_pl);
}
-
/*
* Setup the dimensions of the hyperslab.
* Two modes--by rows or by columns.
@@ -306,227 +303,222 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
* ZCOL same as BYCOL except process 0 gets 0 columns
*/
static void
-slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[],
- hsize_t stride[], hsize_t block[], int mode)
+slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ int mode)
{
switch (mode) {
- case BYROW:
- /* Each process takes a slabs of rows. */
- block[0] = (hsize_t)dim0 / (hsize_t)mpi_size;
- block[1] = (hsize_t)dim1;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)mpi_rank * block[0];
- start[1] = 0;
- if (VERBOSE_MED)
- HDprintf("slab_set BYROW\n");
- break;
- case BYCOL:
- /* Each process takes a block of columns. */
- block[0] = (hsize_t)dim0;
- block[1] = (hsize_t)dim1 / (hsize_t)mpi_size;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = (hsize_t)mpi_rank * block[1];
- if (VERBOSE_MED)
- HDprintf("slab_set BYCOL\n");
- break;
- case ZROW:
- /* Similar to BYROW except process 0 gets 0 row */
- block[0] = (hsize_t)(mpi_rank ? dim0 / mpi_size : 0);
- block[1] = (hsize_t)dim1;
- stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)(mpi_rank ? (hsize_t)mpi_rank * block[0] : 0);
- start[1] = 0;
- if (VERBOSE_MED)
- HDprintf("slab_set ZROW\n");
- break;
- case ZCOL:
- /* Similar to BYCOL except process 0 gets 0 column */
- block[0] = (hsize_t)dim0;
- block[1] = (hsize_t)(mpi_rank ? dim1 / mpi_size : 0);
- stride[0] = block[0];
- stride[1] = (mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = (hsize_t)(mpi_rank ? (hsize_t)mpi_rank * block[1] : 0);
- if (VERBOSE_MED)
- HDprintf("slab_set ZCOL\n");
- break;
- default:
- /* Unknown mode. Set it to cover the whole dataset. */
- HDprintf("unknown slab_set mode (%d)\n", mode);
- block[0] = (hsize_t)dim0;
- block[1] = (hsize_t)dim1;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = 0;
- if (VERBOSE_MED)
- HDprintf("slab_set wholeset\n");
- break;
+ case BYROW:
+ /* Each process takes a slabs of rows. */
+ block[0] = (hsize_t)dim0 / (hsize_t)mpi_size;
+ block[1] = (hsize_t)dim1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set BYROW\n");
+ break;
+ case BYCOL:
+ /* Each process takes a block of columns. */
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)dim1 / (hsize_t)mpi_size;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank * block[1];
+ if (VERBOSE_MED)
+ HDprintf("slab_set BYCOL\n");
+ break;
+ case ZROW:
+ /* Similar to BYROW except process 0 gets 0 row */
+ block[0] = (hsize_t)(mpi_rank ? dim0 / mpi_size : 0);
+ block[1] = (hsize_t)dim1;
+ stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)(mpi_rank ? (hsize_t)mpi_rank * block[0] : 0);
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set ZROW\n");
+ break;
+ case ZCOL:
+ /* Similar to BYCOL except process 0 gets 0 column */
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)(mpi_rank ? dim1 / mpi_size : 0);
+ stride[0] = block[0];
+ stride[1] = (mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)(mpi_rank ? (hsize_t)mpi_rank * block[1] : 0);
+ if (VERBOSE_MED)
+ HDprintf("slab_set ZCOL\n");
+ break;
+ default:
+ /* Unknown mode. Set it to cover the whole dataset. */
+ HDprintf("unknown slab_set mode (%d)\n", mode);
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)dim1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set wholeset\n");
+ break;
}
if (VERBOSE_MED) {
- HDprintf(
- "start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
- (unsigned long) start[0], (unsigned long) start[1],
- (unsigned long) count[0], (unsigned long) count[1],
- (unsigned long) stride[0], (unsigned long) stride[1],
- (unsigned long) block[0], (unsigned long) block[1],
- (unsigned long) (block[0] * block[1] * count[0] * count[1]));
+ HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total "
+ "datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
}
}
/*
* Setup the coordinates for point selection.
*/
-void point_set(hsize_t start[],
- hsize_t count[],
- hsize_t stride[],
- hsize_t block[],
- size_t num_points,
- hsize_t coords[],
- int order)
+void
+point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
+ hsize_t coords[], int order)
{
- hsize_t i,j, k = 0, m ,n, s1 ,s2;
+ hsize_t i, j, k = 0, m, n, s1, s2;
// HDcompile_assert(MAX_RANK == 3);
HDcompile_assert(MAX_RANK == 2);
- if(OUT_OF_ORDER == order)
+ if (OUT_OF_ORDER == order)
k = (num_points * MAX_RANK) - 1;
- else if(IN_ORDER == order)
+ else if (IN_ORDER == order)
k = 0;
s1 = start[0];
s2 = start[1];
- for(i = 0 ; i < count[0]; i++)
- for(j = 0 ; j < count[1]; j++)
- for(m = 0 ; m < block[0]; m++)
- for(n = 0 ; n < block[1]; n++)
- if(OUT_OF_ORDER == order) {
+ for (i = 0; i < count[0]; i++)
+ for (j = 0; j < count[1]; j++)
+ for (m = 0; m < block[0]; m++)
+ for (n = 0; n < block[1]; n++)
+ if (OUT_OF_ORDER == order) {
coords[k--] = s2 + (stride[1] * j) + n;
coords[k--] = s1 + (stride[0] * i) + m;
}
- else if(IN_ORDER == order) {
+ else if (IN_ORDER == order) {
coords[k++] = s1 + stride[0] * i + m;
coords[k++] = s2 + stride[1] * j + n;
}
- if(VERBOSE_MED) {
- HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
- (unsigned long)(block[0] * block[1] * count[0] * count[1]));
+ if (VERBOSE_MED) {
+ HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total "
+ "datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
k = 0;
- for(i = 0; i < num_points ; i++) {
+ for (i = 0; i < num_points; i++) {
HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
k += 2;
}
}
}
-
/*
* Fill the dataset with trivial data for testing.
* Assume dimension rank is 2 and data is stored contiguous.
*/
static void
-dataset_fill(hsize_t start[], hsize_t block[], DATATYPE * dataset)
+dataset_fill(hsize_t start[], hsize_t block[], DATATYPE *dataset)
{
DATATYPE *dataptr = dataset;
- hsize_t i, j;
+ hsize_t i, j;
/* put some trivial data in the data_array */
- for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- *dataptr = (DATATYPE)((i+start[0])*100 + (j+start[1]+1));
- dataptr++;
- }
+ for (i = 0; i < block[0]; i++) {
+ for (j = 0; j < block[1]; j++) {
+ *dataptr = (DATATYPE)((i + start[0]) * 100 + (j + start[1] + 1));
+ dataptr++;
+ }
}
}
-
/*
* Print the content of the dataset.
*/
static void
-dataset_print(hsize_t start[], hsize_t block[], DATATYPE * dataset)
+dataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset)
{
DATATYPE *dataptr = dataset;
- hsize_t i, j;
+ hsize_t i, j;
/* print the column heading */
HDprintf("%-8s", "Cols:");
- for (j=0; j < block[1]; j++){
- HDprintf("%3lu ", (unsigned long)(start[1]+j));
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%3lu ", (unsigned long)(start[1] + j));
}
HDprintf("\n");
/* print the slab data */
- for (i=0; i < block[0]; i++){
- HDprintf("Row %2lu: ", (unsigned long)(i+start[0]));
- for (j=0; j < block[1]; j++){
- HDprintf("%03d ", *dataptr++);
- }
- HDprintf("\n");
+ for (i = 0; i < block[0]; i++) {
+ HDprintf("Row %2lu: ", (unsigned long)(i + start[0]));
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%03d ", *dataptr++);
+ }
+ HDprintf("\n");
}
}
-
/*
* Print the content of the dataset.
*/
int
-dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, DATATYPE *original)
+dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
+ DATATYPE *original)
{
hsize_t i, j;
- int vrfyerrs;
+ int vrfyerrs;
/* print it if VERBOSE_MED */
- if(VERBOSE_MED) {
- HDprintf("dataset_vrfy dumping:::\n");
- HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
- HDprintf("original values:\n");
- dataset_print(start, block, original);
- HDprintf("compared values:\n");
- dataset_print(start, block, dataset);
+ if (VERBOSE_MED) {
+ HDprintf("dataset_vrfy dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("original values:\n");
+ dataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ dataset_print(start, block, dataset);
}
vrfyerrs = 0;
- for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- if(*dataset != *original){
- if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
- HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
- (unsigned long)i, (unsigned long)j,
- (unsigned long)(i+start[0]), (unsigned long)(j+start[1]),
- *(original), *(dataset));
- }
- dataset++;
- original++;
+ for (i = 0; i < block[0]; i++) {
+ for (j = 0; j < block[1]; j++) {
+ if (*dataset != *original) {
+ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j, (unsigned long)(i + start[0]),
+ (unsigned long)(j + start[1]), *(original), *(dataset));
+ }
+ dataset++;
+ original++;
+ }
}
}
- }
- if(vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- HDprintf("[more errors ...]\n");
- if(vrfyerrs)
- HDprintf("%d errors found in dataset_vrfy\n", vrfyerrs);
- return(vrfyerrs);
+ if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if (vrfyerrs)
+ HDprintf("%d errors found in dataset_vrfy\n", vrfyerrs);
+ return (vrfyerrs);
}
/* NOTE: This is a memory intensive test and is only run
@@ -549,26 +541,27 @@ dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[]
* -n 1 ./h5_mpi_big_dataset.x 1024 1024 1024
*/
-#define H5FILE_NAME "hugefile.h5"
-#define DATASETNAME "dataset"
+#define H5FILE_NAME "hugefile.h5"
+#define DATASETNAME "dataset"
-static int MpioTest2G( MPI_Comm comm )
+static int
+MpioTest2G(MPI_Comm comm)
{
/*
* HDF5 APIs definitions
*/
- herr_t status;
- hid_t file_id, dset_id; /* file and dataset identifiers */
- hid_t plist_id; /* property list identifier */
- hid_t filespace; /* file and memory dataspace identifiers */
- int *data; /* pointer to data buffer to write */
- size_t tot_size_bytes;
- hid_t dcpl_id;
- hid_t memorydataspace;
- hid_t filedataspace;
- size_t slice_per_process;
- size_t data_size;
- size_t data_size_bytes;
+ herr_t status;
+ hid_t file_id, dset_id; /* file and dataset identifiers */
+ hid_t plist_id; /* property list identifier */
+ hid_t filespace; /* file and memory dataspace identifiers */
+ int * data; /* pointer to data buffer to write */
+ size_t tot_size_bytes;
+ hid_t dcpl_id;
+ hid_t memorydataspace;
+ hid_t filedataspace;
+ size_t slice_per_process;
+ size_t data_size;
+ size_t data_size_bytes;
hsize_t chunk[3];
hsize_t h5_counts[3];
@@ -578,25 +571,25 @@ static int MpioTest2G( MPI_Comm comm )
/*
* MPI variables
*/
- int mpi_size, mpi_rank;
- MPI_Info info = MPI_INFO_NULL;
+ int mpi_size, mpi_rank;
+ MPI_Info info = MPI_INFO_NULL;
MPI_Comm_size(comm, &mpi_size);
MPI_Comm_rank(comm, &mpi_rank);
- if(mpi_rank == 0) {
+ if (mpi_rank == 0) {
HDprintf("Using %d process on dataset shape "
- "[%" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE "]\n",
- mpi_size, shape[0], shape[1], shape[2]);
+ "[%" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE "]\n",
+ mpi_size, shape[0], shape[1], shape[2]);
}
/*
* Set up file access property list with parallel I/O access
*/
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "H5Pcreate file_access succeeded");
- status = H5Pset_fapl_mpio(plist_id, comm, info);
- VRFY((status >= 0), "H5Pset_dxpl_mpio succeeded");
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "H5Pcreate file_access succeeded");
+ status = H5Pset_fapl_mpio(plist_id, comm, info);
+ VRFY((status >= 0), "H5Pset_dxpl_mpio succeeded");
/*
* Create a new file collectively and release property list identifier.
@@ -611,10 +604,10 @@ static int MpioTest2G( MPI_Comm comm )
*/
tot_size_bytes = sizeof(int);
for (int i = 0; i < 3; i++) {
- tot_size_bytes *= shape[i];
+ tot_size_bytes *= shape[i];
}
- if(mpi_rank == 0) {
- HDprintf("Dataset of %zu bytes\n", tot_size_bytes);
+ if (mpi_rank == 0) {
+ HDprintf("Dataset of %zu bytes\n", tot_size_bytes);
}
filespace = H5Screate_simple(3, shape, NULL);
VRFY((filespace >= 0), "H5Screate_simple succeeded");
@@ -622,20 +615,18 @@ static int MpioTest2G( MPI_Comm comm )
/*
* Select chunking
*/
- dcpl_id = H5Pcreate (H5P_DATASET_CREATE);
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dcpl_id >= 0), "H5P_DATASET_CREATE");
chunk[0] = 4;
chunk[1] = shape[1];
chunk[2] = shape[2];
- status = H5Pset_chunk(dcpl_id, 3, chunk);
+ status = H5Pset_chunk(dcpl_id, 3, chunk);
VRFY((status >= 0), "H5Pset_chunk succeeded");
/*
* Create the dataset with default properties and close filespace.
*/
- dset_id = H5Dcreate2(file_id, DATASETNAME,
- H5T_NATIVE_INT, filespace,
- H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, DATASETNAME, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
H5Sclose(filespace);
@@ -647,19 +638,20 @@ static int MpioTest2G( MPI_Comm comm )
status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
VRFY((status >= 0), "");
- H5_CHECKED_ASSIGN(slice_per_process, size_t, (shape[0] + (hsize_t)mpi_size - 1) / (hsize_t)mpi_size, hsize_t);
- data_size = slice_per_process * shape[1] * shape[2];
+ H5_CHECKED_ASSIGN(slice_per_process, size_t, (shape[0] + (hsize_t)mpi_size - 1) / (hsize_t)mpi_size,
+ hsize_t);
+ data_size = slice_per_process * shape[1] * shape[2];
data_size_bytes = sizeof(int) * data_size;
- data = HDmalloc(data_size_bytes);
+ data = HDmalloc(data_size_bytes);
VRFY((data != NULL), "data HDmalloc succeeded");
for (size_t i = 0; i < data_size; i++) {
data[i] = mpi_rank;
}
- h5_counts[0] = slice_per_process;
- h5_counts[1] = shape[1];
- h5_counts[2] = shape[2];
+ h5_counts[0] = slice_per_process;
+ h5_counts[1] = shape[1];
+ h5_counts[2] = shape[2];
h5_offsets[0] = (size_t)mpi_rank * slice_per_process;
h5_offsets[1] = 0;
h5_offsets[2] = 0;
@@ -667,19 +659,17 @@ static int MpioTest2G( MPI_Comm comm )
VRFY((filedataspace >= 0), "H5Screate_simple succeeded");
// fix reminder along first dimension multiple of chunk[0]
- if ( h5_offsets[0] + h5_counts[0] > shape[0]) {
- h5_counts[0] = shape[0] - h5_offsets[0];
+ if (h5_offsets[0] + h5_counts[0] > shape[0]) {
+ h5_counts[0] = shape[0] - h5_offsets[0];
}
- status = H5Sselect_hyperslab(filedataspace, H5S_SELECT_SET,
- h5_offsets, NULL, h5_counts, NULL);
+ status = H5Sselect_hyperslab(filedataspace, H5S_SELECT_SET, h5_offsets, NULL, h5_counts, NULL);
VRFY((status >= 0), "H5Sselect_hyperslab succeeded");
memorydataspace = H5Screate_simple(3, h5_counts, NULL);
VRFY((memorydataspace >= 0), "H5Screate_simple succeeded");
- status = H5Dwrite(dset_id, H5T_NATIVE_INT,
- memorydataspace, filedataspace, plist_id, data);
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, memorydataspace, filedataspace, plist_id, data);
VRFY((status >= 0), "H5Dwrite succeeded");
H5Pclose(plist_id);
@@ -695,11 +685,10 @@ static int MpioTest2G( MPI_Comm comm )
HDprintf("Proc %d - MpioTest2G test succeeded\n", mpi_rank);
if (mpi_rank == 0)
- HDremove(FILENAME[1]);
+ HDremove(FILENAME[1]);
return 0;
}
-
/*
* Part 1.a--Independent read/write for fixed dimension datasets.
*/
@@ -715,35 +704,37 @@ static int MpioTest2G( MPI_Comm comm )
void
dataset_writeInd(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- hsize_t dims[MAX_RANK] = {1,}; /* dataset dim sizes */
- hsize_t data_size;
- DATATYPE *data_array1 = NULL; /* data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims[MAX_RANK] = {
+ 1,
+ }; /* dataset dim sizes */
+ hsize_t data_size;
+ DATATYPE * data_array1 = NULL; /* data buffer */
const char *filename;
- hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
hsize_t count[MAX_RANK];
- hsize_t stride[MAX_RANK]; /* for hyperslab setting */
- hsize_t block[MAX_RANK]; /* for hyperslab setting */
+ hsize_t stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = test_comm;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Independent write test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Independent write test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(test_comm,&mpi_size);
- MPI_Comm_rank(test_comm,&mpi_rank);
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_rank(test_comm, &mpi_rank);
/* allocate memory for data buffer */
data_size = sizeof(DATATYPE);
@@ -766,7 +757,6 @@ dataset_writeInd(void)
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "");
-
/* ---------------------------------------------
* Define the dimensions of the overall datasets
* and the slabs local to the MPI process.
@@ -774,21 +764,17 @@ dataset_writeInd(void)
/* setup dimensionality object */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple (MAX_RANK, dims, NULL);
+ sid = H5Screate_simple(MAX_RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
-
/* create a dataset collectively */
- dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
/* create another dataset collectively */
- dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
-
/*
* To test the independent orders of writes between processes, all
* even number processes write to dataset1 first, then dataset2.
@@ -803,43 +789,40 @@ dataset_writeInd(void)
MESG("data_array initialized");
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* write data independently */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
/* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
/* setup dimensions again to write with zero rows for process 0 */
- if(VERBOSE_MED)
- HDprintf("writeInd by some with zero row\n");
+ if (VERBOSE_MED)
+ HDprintf("writeInd by some with zero row\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
- if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("writeInd by some with zero row");
-if((mpi_rank/2)*2 != mpi_rank){
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
- VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
-}
+ if ((mpi_rank / 2) * 2 != mpi_rank) {
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
+ }
#ifdef BARRIER_CHECKS
-MPI_Barrier(test_comm);
+ MPI_Barrier(test_comm);
#endif /* BARRIER_CHECKS */
/* release dataspace ID */
@@ -858,44 +841,45 @@ MPI_Barrier(test_comm);
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
+ if (data_array1)
+ HDfree(data_array1);
}
/* Example of using the parallel HDF5 library to read a dataset */
void
dataset_readInd(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ DATATYPE * data_origin1 = NULL; /* expected data buffer */
const char *filename;
- hsize_t start[MAX_RANK]; /* for hyperslab setting */
- hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */
- hsize_t block[MAX_RANK]; /* for hyperslab setting */
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = test_comm;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Independent read test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Independent read test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(test_comm,&mpi_size);
- MPI_Comm_rank(test_comm,&mpi_rank);
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_rank(test_comm, &mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* setup file access template */
@@ -918,40 +902,39 @@ dataset_readInd(void)
dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
VRFY((dataset2 >= 0), "");
-
/* set up dimensions of the slab this process accesses */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
/* read data independently */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* read data independently */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* close dataset collectively */
ret = H5Dclose(dataset1);
@@ -966,11 +949,12 @@ dataset_readInd(void)
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
- if(data_origin1) HDfree(data_origin1);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_origin1)
+ HDfree(data_origin1);
}
-
/*
* Part 1.b--Collective read/write for fixed dimension datasets.
*/
@@ -987,49 +971,51 @@ dataset_readInd(void)
void
dataset_writeAll(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */
- hid_t dataset5, dataset6, dataset7; /* Dataset ID */
- hid_t datatype; /* Datatype ID */
- hsize_t dims[MAX_RANK] = {1,}; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */
+ hid_t dataset5, dataset6, dataset7; /* Dataset ID */
+ hid_t datatype; /* Datatype ID */
+ hsize_t dims[MAX_RANK] = {
+ 1,
+ }; /* dataset dim sizes */
+ DATATYPE * data_array1 = NULL; /* data buffer */
const char *filename;
- hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
hsize_t count[MAX_RANK];
- hsize_t stride[MAX_RANK]; /* for hyperslab setting */
- hsize_t block[MAX_RANK]; /* for hyperslab setting */
+ hsize_t stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
- size_t num_points; /* for point selection */
- hsize_t *coords = NULL; /* for point selection */
- hsize_t current_dims; /* for point selection */
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ hsize_t current_dims; /* for point selection */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = test_comm;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Collective write test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Collective write test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(test_comm,&mpi_size);
- MPI_Comm_rank(test_comm,&mpi_rank);
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_rank(test_comm, &mpi_rank);
/* set up the coords array selection */
num_points = (size_t)dim1;
- coords = (hsize_t *)HDmalloc((size_t)dim1 * (size_t)MAX_RANK * sizeof(hsize_t));
+ coords = (hsize_t *)HDmalloc((size_t)dim1 * (size_t)MAX_RANK * sizeof(hsize_t));
VRFY((coords != NULL), "coords malloc succeeded");
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* -------------------
@@ -1047,7 +1033,6 @@ dataset_writeAll(void)
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "");
-
/* --------------------------
* Define the dimensions of the overall datasets
* and create the dataset
@@ -1055,17 +1040,16 @@ dataset_writeAll(void)
/* setup 2-D dimensionality object */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple (MAX_RANK, dims, NULL);
+ sid = H5Screate_simple(MAX_RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
-
/* create a dataset collectively */
dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
/* create another dataset collectively */
datatype = H5Tcopy(H5T_NATIVE_INT);
- ret = H5Tset_order(datatype, H5T_ORDER_LE);
+ ret = H5Tset_order(datatype, H5T_ORDER_LE);
VRFY((ret >= 0), "H5Tset_order succeeded");
dataset2 = H5Dcreate2(fid, DATASETNAME2, datatype, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -1104,54 +1088,51 @@ dataset_writeAll(void)
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill the local slab with some trivial data */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* write data collectively */
MESG("writeAll by Row");
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
/* setup dimensions again to writeAll with zero rows for process 0 */
- if(VERBOSE_MED)
- HDprintf("writeAll by some with zero row\n");
+ if (VERBOSE_MED)
+ HDprintf("writeAll by some with zero row\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
- if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("writeAll by some with zero row");
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
/* release all temporary handles. */
@@ -1167,59 +1148,56 @@ dataset_writeAll(void)
/* put some trivial data in the data_array */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill the local slab with some trivial data */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
/* setup dimensions again to writeAll with zero columns for process 0 */
- if(VERBOSE_MED)
- HDprintf("writeAll by some with zero col\n");
+ if (VERBOSE_MED)
+ HDprintf("writeAll by some with zero col\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
- if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("writeAll by some with zero col");
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 by ZCOL succeeded");
/* release all temporary handles. */
@@ -1229,16 +1207,15 @@ dataset_writeAll(void)
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
-
/* Dataset3: each process takes a block of rows, except process zero uses "none" selection. */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset3);
+ file_dataspace = H5Dget_space(dataset3);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- if(MAINPROCESS) {
- ret = H5Sselect_none(file_dataspace);
- VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded");
} /* end if */
else {
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
@@ -1246,42 +1223,39 @@ dataset_writeAll(void)
} /* end else */
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
- if(MAINPROCESS) {
- ret = H5Sselect_none(mem_dataspace);
- VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded");
} /* end if */
/* fill the local slab with some trivial data */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
} /* end if */
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* write data collectively */
MESG("writeAll with none");
- ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
/* write data collectively (with datatype conversion) */
MESG("writeAll with none");
- ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
/* release all temporary handles. */
@@ -1295,11 +1269,11 @@ dataset_writeAll(void)
/* Additionally, these are in a scalar dataspace */
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset4);
+ file_dataspace = H5Dget_space(dataset4);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- if(MAINPROCESS) {
- ret = H5Sselect_none(file_dataspace);
- VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded");
} /* end if */
else {
ret = H5Sselect_all(file_dataspace);
@@ -1309,9 +1283,9 @@ dataset_writeAll(void)
/* create a memory dataspace independently */
mem_dataspace = H5Screate(H5S_SCALAR);
VRFY((mem_dataspace >= 0), "");
- if(MAINPROCESS) {
- ret = H5Sselect_none(mem_dataspace);
- VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded");
} /* end if */
else {
ret = H5Sselect_all(mem_dataspace);
@@ -1321,31 +1295,29 @@ dataset_writeAll(void)
/* fill the local slab with some trivial data */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
} /* end if */
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* write data collectively */
MESG("writeAll with scalar dataspace");
- ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
/* write data collectively (with datatype conversion) */
MESG("writeAll with scalar dataspace");
- ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
/* release all temporary handles. */
@@ -1353,55 +1325,54 @@ dataset_writeAll(void)
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
-
- if(data_array1) free(data_array1);
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ if (data_array1)
+ free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
- block[0] = 1;
- block[1] = (hsize_t)dim1;
+ block[0] = 1;
+ block[1] = (hsize_t)dim1;
stride[0] = 1;
stride[1] = (hsize_t)dim1;
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)dim0/(hsize_t)mpi_size * (hsize_t)mpi_rank;
- start[1] = 0;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)dim0 / (hsize_t)mpi_size * (hsize_t)mpi_rank;
+ start[1] = 0;
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* Dataset5: point selection in File - Hyperslab selection in Memory*/
/* create a file dataspace independently */
- point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
- file_dataspace = H5Dget_space (dataset5);
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ file_dataspace = H5Dget_space(dataset5);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
- start[0] = 0;
- start[1] = 0;
- mem_dataspace = H5Dget_space (dataset5);
+ start[0] = 0;
+ start[1] = 0;
+ mem_dataspace = H5Dget_space(dataset5);
VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* write data collectively */
- ret = H5Dwrite(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset5 succeeded");
/* release all temporary handles. */
@@ -1411,35 +1382,34 @@ dataset_writeAll(void)
/* Dataset6: point selection in File - Point selection in Memory*/
/* create a file dataspace independently */
- start[0] = (hsize_t)dim0/(hsize_t)mpi_size * (hsize_t)mpi_rank;
+ start[0] = (hsize_t)dim0 / (hsize_t)mpi_size * (hsize_t)mpi_rank;
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
- file_dataspace = H5Dget_space (dataset6);
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ file_dataspace = H5Dget_space(dataset6);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
start[0] = 0;
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, IN_ORDER);
- mem_dataspace = H5Dget_space (dataset6);
+ point_set(start, count, stride, block, num_points, coords, IN_ORDER);
+ mem_dataspace = H5Dget_space(dataset6);
VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* write data collectively */
- ret = H5Dwrite(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset6 succeeded");
/* release all temporary handles. */
@@ -1449,34 +1419,33 @@ dataset_writeAll(void)
/* Dataset7: point selection in File - All selection in Memory*/
/* create a file dataspace independently */
- start[0] = (hsize_t)dim0/(hsize_t)mpi_size * (hsize_t)mpi_rank;
+ start[0] = (hsize_t)dim0 / (hsize_t)mpi_size * (hsize_t)mpi_rank;
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, IN_ORDER);
- file_dataspace = H5Dget_space (dataset7);
+ point_set(start, count, stride, block, num_points, coords, IN_ORDER);
+ file_dataspace = H5Dget_space(dataset7);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
- current_dims = num_points;
- mem_dataspace = H5Screate_simple (1, &current_dims, NULL);
+ current_dims = num_points;
+ mem_dataspace = H5Screate_simple(1, &current_dims, NULL);
VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
ret = H5Sselect_all(mem_dataspace);
VRFY((ret >= 0), "H5Sselect_all succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* write data collectively */
- ret = H5Dwrite(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset7 succeeded");
/* release all temporary handles. */
@@ -1506,8 +1475,10 @@ dataset_writeAll(void)
H5Fclose(fid);
/* release data buffers */
- if(coords) HDfree(coords);
- if(data_array1) HDfree(data_array1);
+ if (coords)
+ HDfree(coords);
+ if (data_array1)
+ HDfree(data_array1);
}
/*
@@ -1522,47 +1493,47 @@ dataset_writeAll(void)
void
dataset_readAll(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ DATATYPE * data_origin1 = NULL; /* expected data buffer */
const char *filename;
- hsize_t start[MAX_RANK]; /* for hyperslab setting */
- hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */
- hsize_t block[MAX_RANK]; /* for hyperslab setting */
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
- size_t num_points; /* for point selection */
- hsize_t *coords = NULL; /* for point selection */
- int i,j,k;
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ int i, j, k;
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = test_comm;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Collective read test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Collective read test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(test_comm,&mpi_size);
- MPI_Comm_rank(test_comm,&mpi_rank);
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_rank(test_comm, &mpi_rank);
/* set up the coords array selection */
num_points = (size_t)dim1;
- coords = (hsize_t *)HDmalloc((size_t)dim0 * (size_t)dim1 * MAX_RANK * sizeof(hsize_t));
+ coords = (hsize_t *)HDmalloc((size_t)dim0 * (size_t)dim1 * MAX_RANK * sizeof(hsize_t));
VRFY((coords != NULL), "coords malloc succeeded");
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* -------------------
@@ -1573,14 +1544,13 @@ dataset_readAll(void)
VRFY((acc_tpl >= 0), "");
/* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl);
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
VRFY((fid >= 0), "H5Fopen succeeded");
/* Release file-access template */
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "");
-
/* --------------------------
* Open the datasets in it
* ------------------------- */
@@ -1608,62 +1578,61 @@ dataset_readAll(void)
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* read data collectively */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset1 succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* setup dimensions again to readAll with zero columns for process 0 */
- if(VERBOSE_MED)
- HDprintf("readAll by some with zero col\n");
+ if (VERBOSE_MED)
+ HDprintf("readAll by some with zero col\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
- if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("readAll by some with zero col");
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset1 by ZCOL succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* release all temporary handles. */
/* Could have used them for dataset2 but it is cleaner */
@@ -1676,218 +1645,221 @@ dataset_readAll(void)
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* read data collectively */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset2 succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* setup dimensions again to readAll with zero rows for process 0 */
- if(VERBOSE_MED)
- HDprintf("readAll by some with zero row\n");
+ if (VERBOSE_MED)
+ HDprintf("readAll by some with zero row\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
- if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("readAll by some with zero row");
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset1 by ZROW succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* release all temporary handles. */
H5Sclose(file_dataspace);
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
- if(data_array1) free(data_array1);
- if(data_origin1) free(data_origin1);
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ if (data_array1)
+ free(data_array1);
+ if (data_origin1)
+ free(data_origin1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
- block[0] = 1;
- block[1] = (hsize_t)dim1;
+ block[0] = 1;
+ block[1] = (hsize_t)dim1;
stride[0] = 1;
stride[1] = (hsize_t)dim1;
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)dim0/(hsize_t)mpi_size * (hsize_t)mpi_rank;
- start[1] = 0;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)dim0 / (hsize_t)mpi_size * (hsize_t)mpi_rank;
+ start[1] = 0;
dataset_fill(start, block, data_origin1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
}
/* Dataset5: point selection in memory - Hyperslab selection in file*/
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset5);
+ file_dataspace = H5Dget_space(dataset5);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
start[0] = 0;
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
- mem_dataspace = H5Dget_space (dataset5);
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ mem_dataspace = H5Dget_space(dataset5);
VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* read data collectively */
- ret = H5Dread(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset5 succeeded");
-
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* release all temporary handles. */
H5Sclose(file_dataspace);
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
- if(data_array1) free(data_array1);
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ if (data_array1)
+ free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
/* Dataset6: point selection in File - Point selection in Memory*/
/* create a file dataspace independently */
- start[0] = (hsize_t)dim0/(hsize_t)mpi_size * (hsize_t)mpi_rank;
+ start[0] = (hsize_t)dim0 / (hsize_t)mpi_size * (hsize_t)mpi_rank;
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, IN_ORDER);
- file_dataspace = H5Dget_space (dataset6);
+ point_set(start, count, stride, block, num_points, coords, IN_ORDER);
+ file_dataspace = H5Dget_space(dataset6);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
start[0] = 0;
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
- mem_dataspace = H5Dget_space (dataset6);
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ mem_dataspace = H5Dget_space(dataset6);
VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* read data collectively */
- ret = H5Dread(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset6 succeeded");
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* release all temporary handles. */
H5Sclose(file_dataspace);
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
- if(data_array1) free(data_array1);
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ if (data_array1)
+ free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
/* Dataset7: point selection in memory - All selection in file*/
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset7);
+ file_dataspace = H5Dget_space(dataset7);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_all(file_dataspace);
VRFY((ret >= 0), "H5Sselect_all succeeded");
num_points = (size_t)dim0 * (size_t)dim1;
- k=0;
- for (i=0 ; i<dim0; i++) {
- for (j=0 ; j<dim1; j++) {
+ k = 0;
+ for (i = 0; i < dim0; i++) {
+ for (j = 0; j < dim1; j++) {
coords[k++] = (hsize_t)i;
coords[k++] = (hsize_t)j;
}
}
- mem_dataspace = H5Dget_space (dataset7);
+ mem_dataspace = H5Dget_space(dataset7);
VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* read data collectively */
- ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset7 succeeded");
- start[0] = (hsize_t)dim0/(hsize_t)mpi_size * (hsize_t)mpi_rank;
+ start[0] = (hsize_t)dim0 / (hsize_t)mpi_size * (hsize_t)mpi_rank;
start[1] = 0;
- ret = dataset_vrfy(start, count, stride, block, data_array1+(dim0/mpi_size * dim1 * mpi_rank), data_origin1);
- if(ret) nerrors++;
+ ret = dataset_vrfy(start, count, stride, block, data_array1 + (dim0 / mpi_size * dim1 * mpi_rank),
+ data_origin1);
+ if (ret)
+ nerrors++;
/* release all temporary handles. */
H5Sclose(file_dataspace);
@@ -1912,12 +1884,14 @@ dataset_readAll(void)
H5Fclose(fid);
/* release data buffers */
- if(coords) HDfree(coords);
- if(data_array1) HDfree(data_array1);
- if(data_origin1) HDfree(data_origin1);
+ if (coords)
+ HDfree(coords);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_origin1)
+ HDfree(data_origin1);
}
-
/*
* Part 2--Independent read/write for extendible datasets.
*/
@@ -1933,45 +1907,44 @@ dataset_readAll(void)
void
extend_writeInd(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[MAX_RANK]; /* dataset dim sizes */
- hsize_t max_dims[MAX_RANK] =
- {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- hsize_t chunk_dims[MAX_RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
+ hsize_t dims[MAX_RANK]; /* dataset dim sizes */
+ hsize_t max_dims[MAX_RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ hsize_t chunk_dims[MAX_RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
- hsize_t start[MAX_RANK]; /* for hyperslab setting */
- hsize_t count[MAX_RANK]; /* for hyperslab setting */
- hsize_t stride[MAX_RANK]; /* for hyperslab setting */
- hsize_t block[MAX_RANK]; /* for hyperslab setting */
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t count[MAX_RANK]; /* for hyperslab setting */
+ hsize_t stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = test_comm;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Extend independent write test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(test_comm,&mpi_size);
- MPI_Comm_rank(test_comm,&mpi_rank);
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_rank(test_comm, &mpi_rank);
/* setup chunk-size. Make sure sizes are > 0 */
chunk_dims[0] = (hsize_t)chunkdim0;
chunk_dims[1] = (hsize_t)chunkdim1;
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* -------------------
@@ -1981,22 +1954,22 @@ extend_writeInd(void)
acc_tpl = create_faccess_plist(comm, info, facc_type);
VRFY((acc_tpl >= 0), "");
-/* Reduce the number of metadata cache slots, so that there are cache
- * collisions during the raw data I/O on the chunked dataset. This stresses
- * the metadata cache and tests for cache bugs. -QAK
- */
-{
- int mdc_nelmts;
- size_t rdcc_nelmts;
- size_t rdcc_nbytes;
- double rdcc_w0;
-
- ret = H5Pget_cache(acc_tpl,&mdc_nelmts,&rdcc_nelmts,&rdcc_nbytes,&rdcc_w0);
- VRFY((ret >= 0), "H5Pget_cache succeeded");
- mdc_nelmts=4;
- ret = H5Pset_cache(acc_tpl,mdc_nelmts,rdcc_nelmts,rdcc_nbytes,rdcc_w0);
- VRFY((ret >= 0), "H5Pset_cache succeeded");
-}
+ /* Reduce the number of metadata cache slots, so that there are cache
+ * collisions during the raw data I/O on the chunked dataset. This stresses
+ * the metadata cache and tests for cache bugs. -QAK
+ */
+ {
+ int mdc_nelmts;
+ size_t rdcc_nelmts;
+ size_t rdcc_nbytes;
+ double rdcc_w0;
+
+ ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0);
+ VRFY((ret >= 0), "H5Pget_cache succeeded");
+ mdc_nelmts = 4;
+ ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0);
+ VRFY((ret >= 0), "H5Pset_cache succeeded");
+ }
/* create the file collectively */
fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
@@ -2006,14 +1979,13 @@ extend_writeInd(void)
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "");
-
/* --------------------------------------------------------------
* Define the dimensions of the overall datasets and create them.
* ------------------------------------------------------------- */
/* set up dataset storage chunk sizes and creation property list */
- if(VERBOSE_MED)
- HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ if (VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
ret = H5Pset_chunk(dataset_pl, MAX_RANK, chunk_dims);
@@ -2022,7 +1994,7 @@ extend_writeInd(void)
/* setup dimensionality object */
/* start out with no rows, extend it later. */
dims[0] = dims[1] = 0;
- sid = H5Screate_simple (MAX_RANK, dims, max_dims);
+ sid = H5Screate_simple(MAX_RANK, dims, max_dims);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* create an extendible dataset collectively */
@@ -2037,8 +2009,6 @@ extend_writeInd(void)
H5Sclose(sid);
H5Pclose(dataset_pl);
-
-
/* -------------------------
* Test writing to dataset1
* -------------------------*/
@@ -2048,37 +2018,35 @@ extend_writeInd(void)
/* put some trivial data in the data_array */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* Extend its current dim sizes before writing */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- ret = H5Dset_extent(dataset1, dims);
+ ret = H5Dset_extent(dataset1, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
H5Sclose(file_dataspace);
H5Sclose(mem_dataspace);
-
/* -------------------------
* Test writing to dataset2
* -------------------------*/
@@ -2088,13 +2056,13 @@ extend_writeInd(void)
/* put some trivial data in the data_array */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* Try write to dataset2 beyond its current dim sizes. Should fail. */
@@ -2103,14 +2071,13 @@ extend_writeInd(void)
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently. Should fail. */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret < 0), "H5Dwrite failed as expected");
/* restore auto error reporting */
@@ -2120,18 +2087,17 @@ extend_writeInd(void)
/* Extend dataset2 and try again. Should succeed. */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- ret = H5Dset_extent(dataset2, dims);
+ ret = H5Dset_extent(dataset2, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
@@ -2140,7 +2106,6 @@ extend_writeInd(void)
ret = H5Sclose(mem_dataspace);
VRFY((ret >= 0), "H5Sclose succeeded");
-
/* close dataset collectively */
ret = H5Dclose(dataset1);
VRFY((ret >= 0), "H5Dclose1 succeeded");
@@ -2151,7 +2116,8 @@ extend_writeInd(void)
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
+ if (data_array1)
+ HDfree(data_array1);
}
/*
@@ -2164,30 +2130,30 @@ void
extend_writeInd2(void)
{
const char *filename;
- hid_t fid; /* HDF5 file ID */
- hid_t fapl_id; /* File access templates */
- hid_t fs; /* File dataspace ID */
- hid_t ms; /* Memory dataspace ID */
- hid_t dataset; /* Dataset ID */
- hsize_t orig_size=10; /* Original dataset dim size */
- hsize_t new_size=20; /* Extended dataset dim size */
- hsize_t one=1;
- hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */
- hsize_t chunk_size = 16384; /* chunk size */
- hid_t dcpl; /* dataset create prop. list */
- int written[10], /* Data to write */
- retrieved[10]; /* Data read in */
- int mpi_size, mpi_rank; /* MPI settings */
- int i; /* Local index variable */
- herr_t ret; /* Generic return value */
+ hid_t fid; /* HDF5 file ID */
+ hid_t fapl_id; /* File access templates */
+ hid_t fs; /* File dataspace ID */
+ hid_t ms; /* Memory dataspace ID */
+ hid_t dataset; /* Dataset ID */
+ hsize_t orig_size = 10; /* Original dataset dim size */
+ hsize_t new_size = 20; /* Extended dataset dim size */
+ hsize_t one = 1;
+ hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */
+ hsize_t chunk_size = 16384; /* chunk size */
+ hid_t dcpl; /* dataset create prop. list */
+ int written[10], /* Data to write */
+ retrieved[10]; /* Data read in */
+ int mpi_size, mpi_rank; /* MPI settings */
+ int i; /* Local index variable */
+ herr_t ret; /* Generic return value */
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Extend independent write test #2 on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test #2 on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(test_comm,&mpi_size);
- MPI_Comm_rank(test_comm,&mpi_rank);
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_rank(test_comm, &mpi_rank);
/* -------------------
* START AN HDF5 FILE
@@ -2204,7 +2170,6 @@ extend_writeInd2(void)
ret = H5Pclose(fapl_id);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/* --------------------------------------------------------------
* Define the dimensions of the overall datasets and create them.
* ------------------------------------------------------------- */
@@ -2216,7 +2181,7 @@ extend_writeInd2(void)
VRFY((ret >= 0), "H5Pset_chunk succeeded");
/* setup dimensionality object */
- fs = H5Screate_simple (1, &orig_size, &max_size);
+ fs = H5Screate_simple(1, &orig_size, &max_size);
VRFY((fs >= 0), "H5Screate_simple succeeded");
/* create an extendible dataset collectively */
@@ -2227,7 +2192,6 @@ extend_writeInd2(void)
ret = H5Pclose(dcpl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/* -------------------------
* Test writing to dataset
* -------------------------*/
@@ -2236,13 +2200,13 @@ extend_writeInd2(void)
VRFY((ms >= 0), "H5Screate_simple succeeded");
/* put some trivial data in the data_array */
- for(i = 0; i < (int)orig_size; i++)
+ for (i = 0; i < (int)orig_size; i++)
written[i] = i;
MESG("data array initialized");
- if(VERBOSE_MED) {
- MESG("writing at offset zero: ");
- for(i = 0; i < (int)orig_size; i++)
- HDprintf("%s%d", i?", ":"", written[i]);
+ if (VERBOSE_MED) {
+ MESG("writing at offset zero: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", written[i]);
HDprintf("\n");
}
ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written);
@@ -2253,16 +2217,16 @@ extend_writeInd2(void)
* -------------------------*/
ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved);
VRFY((ret >= 0), "H5Dread succeeded");
- for (i=0; i<(int)orig_size; i++)
- if(written[i]!=retrieved[i]) {
- HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
- i,written[i], i,retrieved[i]);
+ for (i = 0; i < (int)orig_size; i++)
+ if (written[i] != retrieved[i]) {
+ HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i,
+ written[i], i, retrieved[i]);
nerrors++;
}
- if(VERBOSE_MED){
- MESG("read at offset zero: ");
- for (i=0; i<(int)orig_size; i++)
- HDprintf("%s%d", i?", ":"", retrieved[i]);
+ if (VERBOSE_MED) {
+ MESG("read at offset zero: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", retrieved[i]);
HDprintf("\n");
}
@@ -2279,13 +2243,13 @@ extend_writeInd2(void)
/* -------------------------
* Write to the second half of the dataset
* -------------------------*/
- for (i=0; i<(int)orig_size; i++)
+ for (i = 0; i < (int)orig_size; i++)
H5_CHECKED_ASSIGN(written[i], int, orig_size + (hsize_t)i, hsize_t);
MESG("data array re-initialized");
- if(VERBOSE_MED) {
- MESG("writing at offset 10: ");
- for (i=0; i<(int)orig_size; i++)
- HDprintf("%s%d", i?", ":"", written[i]);
+ if (VERBOSE_MED) {
+ MESG("writing at offset 10: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", written[i]);
HDprintf("\n");
}
ret = H5Sselect_hyperslab(fs, H5S_SELECT_SET, &orig_size, NULL, &one, &orig_size);
@@ -2298,20 +2262,19 @@ extend_writeInd2(void)
* -------------------------*/
ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved);
VRFY((ret >= 0), "H5Dread succeeded");
- for (i=0; i<(int)orig_size; i++)
- if(written[i]!=retrieved[i]) {
- HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
- i,written[i], i,retrieved[i]);
+ for (i = 0; i < (int)orig_size; i++)
+ if (written[i] != retrieved[i]) {
+ HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i,
+ written[i], i, retrieved[i]);
nerrors++;
}
- if(VERBOSE_MED){
- MESG("read at offset 10: ");
- for (i=0; i<(int)orig_size; i++)
- HDprintf("%s%d", i?", ":"", retrieved[i]);
+ if (VERBOSE_MED) {
+ MESG("read at offset 10: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", retrieved[i]);
HDprintf("\n");
}
-
/* Close dataset collectively */
ret = H5Dclose(dataset);
VRFY((ret >= 0), "H5Dclose succeeded");
@@ -2325,41 +2288,41 @@ extend_writeInd2(void)
void
extend_readInd(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- hsize_t dims[MAX_RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_array2 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims[MAX_RANK]; /* dataset dim sizes */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ DATATYPE * data_array2 = NULL; /* data buffer */
+ DATATYPE * data_origin1 = NULL; /* expected data buffer */
const char *filename;
- hsize_t start[MAX_RANK]; /* for hyperslab setting */
- hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */
- hsize_t block[MAX_RANK]; /* for hyperslab setting */
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = test_comm;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Extend independent read test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent read test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(test_comm,&mpi_size);
- MPI_Comm_rank(test_comm,&mpi_rank);
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_rank(test_comm, &mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_array2 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array2 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* -------------------
@@ -2370,7 +2333,7 @@ extend_readInd(void)
VRFY((acc_tpl >= 0), "");
/* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl);
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
VRFY((fid >= 0), "");
/* Release file-access template */
@@ -2390,7 +2353,7 @@ extend_readInd(void)
H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
@@ -2402,72 +2365,70 @@ extend_readInd(void)
H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
H5Sclose(file_dataspace);
-
/* Read dataset1 using BYROW pattern */
/* set up dimensions of the slab this process accesses */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* read data independently */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
VRFY((ret == 0), "dataset1 read verified correct");
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
H5Sclose(mem_dataspace);
H5Sclose(file_dataspace);
-
/* Read dataset2 using BYCOL pattern */
/* set up dimensions of the slab this process accesses */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* read data independently */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
VRFY((ret == 0), "dataset2 read verified correct");
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
H5Sclose(mem_dataspace);
H5Sclose(file_dataspace);
@@ -2478,14 +2439,16 @@ extend_readInd(void)
ret = H5Dclose(dataset2);
VRFY((ret >= 0), "");
-
/* close the file collectively */
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
- if(data_array2) HDfree(data_array2);
- if(data_origin1) HDfree(data_origin1);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_array2)
+ HDfree(data_array2);
+ if (data_origin1)
+ HDfree(data_origin1);
}
/*
@@ -2503,46 +2466,45 @@ extend_readInd(void)
void
extend_writeAll(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[MAX_RANK]; /* dataset dim sizes */
- hsize_t max_dims[MAX_RANK] =
- {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- hsize_t chunk_dims[MAX_RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
+ hsize_t dims[MAX_RANK]; /* dataset dim sizes */
+ hsize_t max_dims[MAX_RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ hsize_t chunk_dims[MAX_RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
- hsize_t start[MAX_RANK]; /* for hyperslab setting */
- hsize_t count[MAX_RANK]; /* for hyperslab setting */
- hsize_t stride[MAX_RANK]; /* for hyperslab setting */
- hsize_t block[MAX_RANK]; /* for hyperslab setting */
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t count[MAX_RANK]; /* for hyperslab setting */
+ hsize_t stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = test_comm;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Extend independent write test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(test_comm,&mpi_size);
- MPI_Comm_rank(test_comm,&mpi_rank);
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_rank(test_comm, &mpi_rank);
/* setup chunk-size. Make sure sizes are > 0 */
chunk_dims[0] = (hsize_t)chunkdim0;
chunk_dims[1] = (hsize_t)chunkdim1;
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* -------------------
@@ -2552,22 +2514,22 @@ extend_writeAll(void)
acc_tpl = create_faccess_plist(comm, info, facc_type);
VRFY((acc_tpl >= 0), "");
-/* Reduce the number of metadata cache slots, so that there are cache
- * collisions during the raw data I/O on the chunked dataset. This stresses
- * the metadata cache and tests for cache bugs. -QAK
- */
-{
- int mdc_nelmts;
- size_t rdcc_nelmts;
- size_t rdcc_nbytes;
- double rdcc_w0;
-
- ret = H5Pget_cache(acc_tpl,&mdc_nelmts,&rdcc_nelmts,&rdcc_nbytes,&rdcc_w0);
- VRFY((ret >= 0), "H5Pget_cache succeeded");
- mdc_nelmts=4;
- ret = H5Pset_cache(acc_tpl,mdc_nelmts,rdcc_nelmts,rdcc_nbytes,rdcc_w0);
- VRFY((ret >= 0), "H5Pset_cache succeeded");
-}
+ /* Reduce the number of metadata cache slots, so that there are cache
+ * collisions during the raw data I/O on the chunked dataset. This stresses
+ * the metadata cache and tests for cache bugs. -QAK
+ */
+ {
+ int mdc_nelmts;
+ size_t rdcc_nelmts;
+ size_t rdcc_nbytes;
+ double rdcc_w0;
+
+ ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0);
+ VRFY((ret >= 0), "H5Pget_cache succeeded");
+ mdc_nelmts = 4;
+ ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0);
+ VRFY((ret >= 0), "H5Pset_cache succeeded");
+ }
/* create the file collectively */
fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
@@ -2577,14 +2539,13 @@ extend_writeAll(void)
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "");
-
/* --------------------------------------------------------------
* Define the dimensions of the overall datasets and create them.
* ------------------------------------------------------------- */
/* set up dataset storage chunk sizes and creation property list */
- if(VERBOSE_MED)
- HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ if (VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
ret = H5Pset_chunk(dataset_pl, MAX_RANK, chunk_dims);
@@ -2593,7 +2554,7 @@ extend_writeAll(void)
/* setup dimensionality object */
/* start out with no rows, extend it later. */
dims[0] = dims[1] = 0;
- sid = H5Screate_simple (MAX_RANK, dims, max_dims);
+ sid = H5Screate_simple(MAX_RANK, dims, max_dims);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* create an extendible dataset collectively */
@@ -2608,8 +2569,6 @@ extend_writeAll(void)
H5Sclose(sid);
H5Pclose(dataset_pl);
-
-
/* -------------------------
* Test writing to dataset1
* -------------------------*/
@@ -2619,41 +2578,39 @@ extend_writeAll(void)
/* put some trivial data in the data_array */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* Extend its current dim sizes before writing */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- ret = H5Dset_extent(dataset1, dims);
+ ret = H5Dset_extent(dataset1, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* write data collectively */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
@@ -2661,7 +2618,6 @@ extend_writeAll(void)
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
-
/* -------------------------
* Test writing to dataset2
* -------------------------*/
@@ -2671,40 +2627,38 @@ extend_writeAll(void)
/* put some trivial data in the data_array */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* Try write to dataset2 beyond its current dim sizes. Should fail. */
/* Temporary turn off auto error reporting */
H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently. Should fail. */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret < 0), "H5Dwrite failed as expected");
/* restore auto error reporting */
@@ -2714,18 +2668,17 @@ extend_writeAll(void)
/* Extend dataset2 and try again. Should succeed. */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- ret = H5Dset_extent(dataset2, dims);
+ ret = H5Dset_extent(dataset2, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
@@ -2736,7 +2689,6 @@ extend_writeAll(void)
ret = H5Pclose(xfer_plist);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/* close dataset collectively */
ret = H5Dclose(dataset1);
VRFY((ret >= 0), "H5Dclose1 succeeded");
@@ -2747,49 +2699,50 @@ extend_writeAll(void)
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
+ if (data_array1)
+ HDfree(data_array1);
}
/* Example of using the parallel HDF5 library to read an extendible dataset */
void
extend_readAll(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[MAX_RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_array2 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hsize_t dims[MAX_RANK]; /* dataset dim sizes */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ DATATYPE * data_array2 = NULL; /* data buffer */
+ DATATYPE * data_origin1 = NULL; /* expected data buffer */
- hsize_t start[MAX_RANK]; /* for hyperslab setting */
- hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */
- hsize_t block[MAX_RANK]; /* for hyperslab setting */
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = test_comm;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Extend independent read test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent read test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(test_comm,&mpi_size);
- MPI_Comm_rank(test_comm,&mpi_rank);
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_rank(test_comm, &mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_array2 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array2 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* -------------------
@@ -2800,7 +2753,7 @@ extend_readAll(void)
VRFY((acc_tpl >= 0), "");
/* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl);
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
VRFY((fid >= 0), "");
/* Release file-access template */
@@ -2820,7 +2773,7 @@ extend_readAll(void)
H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
@@ -2832,95 +2785,91 @@ extend_readAll(void)
H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
H5Sclose(file_dataspace);
-
/* Read dataset1 using BYROW pattern */
/* set up dimensions of the slab this process accesses */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* read data collectively */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
VRFY((ret == 0), "dataset1 read verified correct");
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
H5Sclose(mem_dataspace);
H5Sclose(file_dataspace);
H5Pclose(xfer_plist);
-
/* Read dataset2 using BYCOL pattern */
/* set up dimensions of the slab this process accesses */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* read data collectively */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
VRFY((ret == 0), "dataset2 read verified correct");
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
H5Sclose(mem_dataspace);
H5Sclose(file_dataspace);
@@ -2932,14 +2881,16 @@ extend_readAll(void)
ret = H5Dclose(dataset2);
VRFY((ret >= 0), "");
-
/* close the file collectively */
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
- if(data_array2) HDfree(data_array2);
- if(data_origin1) HDfree(data_origin1);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_array2)
+ HDfree(data_array2);
+ if (data_origin1)
+ HDfree(data_origin1);
}
/*
@@ -2950,49 +2901,49 @@ extend_readAll(void)
void
compress_readAll(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t dcpl; /* Dataset creation property list */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t dataspace; /* Dataspace ID */
- hid_t dataset; /* Dataset ID */
- int rank=1; /* Dataspace rank */
- hsize_t dim=(hsize_t)dim0; /* Dataspace dimensions */
- unsigned u; /* Local index variable */
- unsigned chunk_opts; /* Chunk options */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t dataspace; /* Dataspace ID */
+ hid_t dataset; /* Dataset ID */
+ int rank = 1; /* Dataspace rank */
+ hsize_t dim = (hsize_t)dim0; /* Dataspace dimensions */
+ unsigned u; /* Local index variable */
+ unsigned chunk_opts; /* Chunk options */
unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
- DATATYPE *data_read = NULL; /* data buffer */
- DATATYPE *data_orig = NULL; /* expected data buffer */
+ DATATYPE * data_read = NULL; /* data buffer */
+ DATATYPE * data_orig = NULL; /* expected data buffer */
const char *filename;
- MPI_Comm comm = test_comm;
- MPI_Info info = MPI_INFO_NULL;
- int mpi_size, mpi_rank;
- herr_t ret; /* Generic return value */
+ MPI_Comm comm = test_comm;
+ MPI_Info info = MPI_INFO_NULL;
+ int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Collective chunked dataset read test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Collective chunked dataset read test on file %s\n", filename);
/* Retrieve MPI parameters */
- MPI_Comm_size(comm,&mpi_size);
- MPI_Comm_rank(comm,&mpi_rank);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
/* Allocate data buffer */
- data_orig = (DATATYPE *)HDmalloc((size_t)dim*sizeof(DATATYPE));
+ data_orig = (DATATYPE *)HDmalloc((size_t)dim * sizeof(DATATYPE));
VRFY((data_orig != NULL), "data_origin1 HDmalloc succeeded");
- data_read = (DATATYPE *)HDmalloc((size_t)dim*sizeof(DATATYPE));
+ data_read = (DATATYPE *)HDmalloc((size_t)dim * sizeof(DATATYPE));
VRFY((data_read != NULL), "data_array1 HDmalloc succeeded");
/* Initialize data buffers */
- for(u=0; u<dim;u++)
- data_orig[u]=(DATATYPE)u;
+ for (u = 0; u < dim; u++)
+ data_orig[u] = (DATATYPE)u;
/* Run test both with and without filters disabled on partial chunks */
- for(disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
- disable_partial_chunk_filters++) {
+ for (disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
+ disable_partial_chunk_filters++) {
/* Process zero creates the file with a compressed, chunked dataset */
- if(mpi_rank==0) {
- hsize_t chunk_dim; /* Chunk dimensions */
+ if (mpi_rank == 0) {
+ hsize_t chunk_dim; /* Chunk dimensions */
/* Create the file */
fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
@@ -3007,18 +2958,18 @@ compress_readAll(void)
/* Use eight chunks */
chunk_dim = dim / 8;
- ret = H5Pset_chunk(dcpl, rank, &chunk_dim);
+ ret = H5Pset_chunk(dcpl, rank, &chunk_dim);
VRFY((ret >= 0), "H5Pset_chunk succeeded");
/* Set chunk options appropriately */
- if(disable_partial_chunk_filters) {
+ if (disable_partial_chunk_filters) {
ret = H5Pget_chunk_opts(dcpl, &chunk_opts);
- VRFY((ret>=0),"H5Pget_chunk_opts succeeded");
+ VRFY((ret >= 0), "H5Pget_chunk_opts succeeded");
chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
ret = H5Pset_chunk_opts(dcpl, chunk_opts);
- VRFY((ret>=0),"H5Pset_chunk_opts succeeded");
+ VRFY((ret >= 0), "H5Pset_chunk_opts succeeded");
} /* end if */
ret = H5Pset_deflate(dcpl, 9);
@@ -3029,7 +2980,8 @@ compress_readAll(void)
VRFY((dataspace > 0), "H5Screate_simple succeeded");
/* Create dataset */
- dataset = H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ dataset =
+ H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset > 0), "H5Dcreate2 succeeded");
/* Write compressed data */
@@ -3051,49 +3003,47 @@ compress_readAll(void)
MPI_Barrier(comm);
/* -------------------
- * OPEN AN HDF5 FILE
- * -------------------*/
+ * OPEN AN HDF5 FILE
+ * -------------------*/
/* setup file access template */
acc_tpl = create_faccess_plist(comm, info, facc_type);
VRFY((acc_tpl >= 0), "");
/* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl);
+ fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl);
VRFY((fid > 0), "H5Fopen succeeded");
/* Release file-access template */
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/* Open dataset with compressed chunks */
dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT);
VRFY((dataset > 0), "H5Dopen2 succeeded");
/* Try reading & writing data */
- if(dataset>0) {
+ if (dataset > 0) {
/* Create dataset transfer property list */
xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist > 0), "H5Pcreate succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* Try reading the data */
ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
VRFY((ret >= 0), "H5Dread succeeded");
/* Verify data read */
- for(u=0; u<dim; u++)
- if(data_orig[u]!=data_read[u]) {
- HDprintf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__,
- (unsigned)u,data_orig[u],(unsigned)u,data_read[u]);
+ for (u = 0; u < dim; u++)
+ if (data_orig[u] != data_read[u]) {
+ HDprintf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n", __LINE__,
+ (unsigned)u, data_orig[u], (unsigned)u, data_read[u]);
nerrors++;
}
@@ -3114,8 +3064,10 @@ compress_readAll(void)
} /* end for */
/* release data buffers */
- if(data_read) HDfree(data_read);
- if(data_orig) HDfree(data_orig);
+ if (data_read)
+ HDfree(data_read);
+ if (data_orig)
+ HDfree(data_orig);
}
#endif /* H5_HAVE_FILTER_DEFLATE */
@@ -3134,39 +3086,39 @@ compress_readAll(void)
void
none_selection_chunk(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[MAX_RANK]; /* dataset dim sizes */
- DATATYPE *data_origin = NULL; /* data buffer */
- DATATYPE *data_array = NULL; /* data buffer */
- hsize_t chunk_dims[MAX_RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
+ hsize_t dims[MAX_RANK]; /* dataset dim sizes */
+ DATATYPE * data_origin = NULL; /* data buffer */
+ DATATYPE * data_array = NULL; /* data buffer */
+ hsize_t chunk_dims[MAX_RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
- hsize_t start[MAX_RANK]; /* for hyperslab setting */
- hsize_t count[MAX_RANK]; /* for hyperslab setting */
- hsize_t stride[MAX_RANK]; /* for hyperslab setting */
- hsize_t block[MAX_RANK]; /* for hyperslab setting */
- hsize_t mstart[MAX_RANK]; /* for data buffer in memory */
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t count[MAX_RANK]; /* for hyperslab setting */
+ hsize_t stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
+ hsize_t mstart[MAX_RANK]; /* for data buffer in memory */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = test_comm;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Extend independent write test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(test_comm,&mpi_size);
- MPI_Comm_rank(test_comm,&mpi_rank);
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_rank(test_comm, &mpi_rank);
/* setup chunk-size. Make sure sizes are > 0 */
chunk_dims[0] = (hsize_t)chunkdim0;
@@ -3192,8 +3144,8 @@ none_selection_chunk(void)
* ------------------------------------------------------------- */
/* set up dataset storage chunk sizes and creation property list */
- if(VERBOSE_MED)
- HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ if (VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
ret = H5Pset_chunk(dataset_pl, MAX_RANK, chunk_dims);
@@ -3202,7 +3154,7 @@ none_selection_chunk(void)
/* setup dimensionality object */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple(MAX_RANK, dims, NULL);
+ sid = H5Screate_simple(MAX_RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* create an extendible dataset collectively */
@@ -3225,65 +3177,64 @@ none_selection_chunk(void)
/* allocate memory for data buffer. Only allocate enough buffer for
* each processor's data. */
- if(mpi_rank) {
- data_origin = (DATATYPE *)HDmalloc(block[0]*block[1]*sizeof(DATATYPE));
+ if (mpi_rank) {
+ data_origin = (DATATYPE *)HDmalloc(block[0] * block[1] * sizeof(DATATYPE));
VRFY((data_origin != NULL), "data_origin HDmalloc succeeded");
- data_array = (DATATYPE *)HDmalloc(block[0]*block[1]*sizeof(DATATYPE));
+ data_array = (DATATYPE *)HDmalloc(block[0] * block[1] * sizeof(DATATYPE));
VRFY((data_array != NULL), "data_array HDmalloc succeeded");
/* put some trivial data in the data_array */
mstart[0] = mstart[1] = 0;
dataset_fill(mstart, block, data_origin);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(mstart, block, data_origin);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(mstart, block, data_origin);
}
}
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* Process 0 has no selection */
- if(!mpi_rank) {
+ if (!mpi_rank) {
ret = H5Sselect_none(mem_dataspace);
VRFY((ret >= 0), "H5Sselect_none succeeded");
}
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* Process 0 has no selection */
- if(!mpi_rank) {
+ if (!mpi_rank) {
ret = H5Sselect_none(file_dataspace);
VRFY((ret >= 0), "H5Sselect_none succeeded");
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
/* write data collectively */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_origin);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* read data independently */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
- if(mpi_rank) {
+ if (mpi_rank) {
ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
}
/* -------------------------
@@ -3293,19 +3244,18 @@ none_selection_chunk(void)
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
/* write data collectively */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_origin);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* read data independently */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
- if(mpi_rank) {
+ if (mpi_rank) {
ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
}
/* release resource */
@@ -3316,7 +3266,6 @@ none_selection_chunk(void)
ret = H5Pclose(xfer_plist);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/* close dataset collectively */
ret = H5Dclose(dataset1);
VRFY((ret >= 0), "H5Dclose1 succeeded");
@@ -3327,11 +3276,12 @@ none_selection_chunk(void)
H5Fclose(fid);
/* release data buffers */
- if(data_origin) HDfree(data_origin);
- if(data_array) HDfree(data_array);
+ if (data_origin)
+ HDfree(data_origin);
+ if (data_array)
+ HDfree(data_array);
}
-
/* Function: test_actual_io_mode
*
* Purpose: tests one specific case of collective I/O and checks that the
@@ -3395,63 +3345,61 @@ none_selection_chunk(void)
* Date: 2011-04-06
*/
static void
-test_actual_io_mode(int selection_mode) {
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = -1;
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = -1;
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = -1;
- H5D_mpio_actual_io_mode_t actual_io_mode_write = -1;
- H5D_mpio_actual_io_mode_t actual_io_mode_read = -1;
- H5D_mpio_actual_io_mode_t actual_io_mode_expected = -1;
- const char * filename;
- const char * test_name;
- hbool_t direct_multi_chunk_io;
- hbool_t multi_chunk_io;
- hbool_t is_chunked;
- hbool_t is_collective;
- int mpi_size = -1;
- int mpi_rank = -1;
- int length;
- int * buffer;
- int i;
- MPI_Comm mpi_comm = MPI_COMM_NULL;
- MPI_Info mpi_info = MPI_INFO_NULL;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
- hid_t data_type = H5T_NATIVE_INT;
- hid_t fapl_id = -1;
- hid_t mem_space = -1;
- hid_t file_space = -1;
- hid_t dcpl = -1;
- hid_t dxpl_write = -1;
- hid_t dxpl_read = -1;
- hsize_t dims[MAX_RANK];
- hsize_t chunk_dims[MAX_RANK];
- hsize_t start[MAX_RANK];
- hsize_t stride[MAX_RANK];
- hsize_t count[MAX_RANK];
- hsize_t block[MAX_RANK];
- char message[256];
- herr_t ret;
+test_actual_io_mode(int selection_mode)
+{
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = -1;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = -1;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = -1;
+ H5D_mpio_actual_io_mode_t actual_io_mode_write = -1;
+ H5D_mpio_actual_io_mode_t actual_io_mode_read = -1;
+ H5D_mpio_actual_io_mode_t actual_io_mode_expected = -1;
+ const char * filename;
+ const char * test_name;
+ hbool_t direct_multi_chunk_io;
+ hbool_t multi_chunk_io;
+ hbool_t is_chunked;
+ hbool_t is_collective;
+ int mpi_size = -1;
+ int mpi_rank = -1;
+ int length;
+ int * buffer;
+ int i;
+ MPI_Comm mpi_comm = MPI_COMM_NULL;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t dataset = -1;
+ hid_t data_type = H5T_NATIVE_INT;
+ hid_t fapl_id = -1;
+ hid_t mem_space = -1;
+ hid_t file_space = -1;
+ hid_t dcpl = -1;
+ hid_t dxpl_write = -1;
+ hid_t dxpl_read = -1;
+ hsize_t dims[MAX_RANK];
+ hsize_t chunk_dims[MAX_RANK];
+ hsize_t start[MAX_RANK];
+ hsize_t stride[MAX_RANK];
+ hsize_t count[MAX_RANK];
+ hsize_t block[MAX_RANK];
+ char message[256];
+ herr_t ret;
/* Set up some flags to make some future if statements slightly more readable */
- direct_multi_chunk_io = (
- selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND ||
- selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL );
+ direct_multi_chunk_io = (selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND ||
+ selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL);
/* Note: RESET performs the same tests as MULTI_CHUNK_MIX_DISAGREE and then
* tests independent I/O
*/
- multi_chunk_io = (
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND ||
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL ||
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX ||
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE ||
- selection_mode == TEST_ACTUAL_IO_RESET );
+ multi_chunk_io =
+ (selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE || selection_mode == TEST_ACTUAL_IO_RESET);
- is_chunked = (
- selection_mode != TEST_ACTUAL_IO_CONTIGUOUS &&
- selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE);
+ is_chunked =
+ (selection_mode != TEST_ACTUAL_IO_CONTIGUOUS && selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE);
is_collective = selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE;
@@ -3480,7 +3428,7 @@ test_actual_io_mode(int selection_mode) {
/* Create the basic Space */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple (MAX_RANK, dims, NULL);
+ sid = H5Screate_simple(MAX_RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* Create the dataset creation plist */
@@ -3488,17 +3436,16 @@ test_actual_io_mode(int selection_mode) {
VRFY((dcpl >= 0), "dataset creation plist created successfully");
/* If we are not testing contiguous datasets */
- if(is_chunked) {
+ if (is_chunked) {
/* Set up chunk information. */
- chunk_dims[0] = dims[0]/(hsize_t)mpi_size;
+ chunk_dims[0] = dims[0] / (hsize_t)mpi_size;
chunk_dims[1] = dims[1];
- ret = H5Pset_chunk(dcpl, 2, chunk_dims);
- VRFY((ret >= 0),"chunk creation property list succeeded");
+ ret = H5Pset_chunk(dcpl, 2, chunk_dims);
+ VRFY((ret >= 0), "chunk creation property list succeeded");
}
/* Create the dataset */
- dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT,
- dcpl, H5P_DEFAULT);
+ dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
/* Create the file dataspace */
@@ -3507,7 +3454,7 @@ test_actual_io_mode(int selection_mode) {
/* Choose a selection method based on the type of I/O we want to occur,
* and also set up some selection-dependeent test info. */
- switch(selection_mode) {
+ switch (selection_mode) {
/* Independent I/O with optimization */
case TEST_ACTUAL_IO_MULTI_CHUNK_IND:
@@ -3518,9 +3465,9 @@ test_actual_io_mode(int selection_mode) {
*/
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
- test_name = "Multi Chunk - Independent";
+ test_name = "Multi Chunk - Independent";
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
- actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
break;
/* Collective I/O with optimization */
@@ -3532,9 +3479,9 @@ test_actual_io_mode(int selection_mode) {
*/
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
- test_name = "Multi Chunk - Collective";
+ test_name = "Multi Chunk - Collective";
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
- if(mpi_size > 1)
+ if (mpi_size > 1)
actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
else
actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
@@ -3551,24 +3498,25 @@ test_actual_io_mode(int selection_mode) {
* and at least one chunk independently, reporting mixed I/O.
*/
- if(mpi_rank == 0) {
- /* Select the first column */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
- } else {
+ if (mpi_rank == 0) {
+ /* Select the first column */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+ }
+ else {
/* Select the first and the nth chunk in the nth column */
- block[0] = (hsize_t)(dim0 / mpi_size);
- block[1] = (hsize_t)(dim1 / mpi_size);
- count[0] = 2;
- count[1] = 1;
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)(dim1 / mpi_size);
+ count[0] = 2;
+ count[1] = 1;
stride[0] = (hsize_t)mpi_rank * block[0];
stride[1] = 1;
- start[0] = 0;
- start[1] = (hsize_t)mpi_rank*block[1];
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank * block[1];
}
- test_name = "Multi Chunk - Mixed";
+ test_name = "Multi Chunk - Mixed";
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
- actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
break;
/* RESET tests that the properties are properly reset to defaults each time I/O is
@@ -3591,20 +3539,21 @@ test_actual_io_mode(int selection_mode) {
* collectively, and their other chunk indpendently, reporting mixed I/O.
*/
- if(mpi_rank == 0) {
- /* Select the first chunk in the first column */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
- block[0] = block[0] / (hsize_t)mpi_size;
- } else {
+ if (mpi_rank == 0) {
+ /* Select the first chunk in the first column */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+ block[0] = block[0] / (hsize_t)mpi_size;
+ }
+ else {
/* Select the first and the nth chunk in the nth column */
- block[0] = (hsize_t)(dim0 / mpi_size);
- block[1] = (hsize_t)(dim1 / mpi_size);
- count[0] = 2;
- count[1] = 1;
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)(dim1 / mpi_size);
+ count[0] = 2;
+ count[1] = 1;
stride[0] = (hsize_t)mpi_rank * block[0];
stride[1] = 1;
- start[0] = 0;
- start[1] = (hsize_t)mpi_rank*block[1];
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank * block[1];
}
/* If the testname was not already set by the RESET case */
@@ -3614,8 +3563,8 @@ test_actual_io_mode(int selection_mode) {
test_name = "Multi Chunk - Mixed (Disagreement)";
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
- if(mpi_size > 1) {
- if(mpi_rank == 0)
+ if (mpi_size > 1) {
+ if (mpi_rank == 0)
actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
else
actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
@@ -3630,9 +3579,9 @@ test_actual_io_mode(int selection_mode) {
/* Nothing special; link chunk I/O is forced in the dxpl settings. */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
- test_name = "Link Chunk";
+ test_name = "Link Chunk";
actual_chunk_opt_mode_expected = H5D_MPIO_LINK_CHUNK;
- actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
break;
/* Contiguous Dataset */
@@ -3641,23 +3590,23 @@ test_actual_io_mode(int selection_mode) {
* collective I/O */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
- test_name = "Contiguous";
+ test_name = "Contiguous";
actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
- actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE;
+ actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE;
break;
case TEST_ACTUAL_IO_NO_COLLECTIVE:
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
- test_name = "Independent";
+ test_name = "Independent";
actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
- actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
+ actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
break;
default:
- test_name = "Undefined Selection Mode";
+ test_name = "Undefined Selection Mode";
actual_chunk_opt_mode_expected = -1;
- actual_io_mode_expected = -1;
+ actual_io_mode_expected = -1;
break;
}
@@ -3667,7 +3616,7 @@ test_actual_io_mode(int selection_mode) {
/* Create a memory dataspace mirroring the dataset and select the same hyperslab
* as in the file space.
*/
- mem_space = H5Screate_simple (MAX_RANK, dims, NULL);
+ mem_space = H5Screate_simple(MAX_RANK, dims, NULL);
VRFY((mem_space >= 0), "mem_space created");
ret = H5Sselect_hyperslab(mem_space, H5S_SELECT_SET, start, stride, count, block);
@@ -3679,7 +3628,7 @@ test_actual_io_mode(int selection_mode) {
/* Allocate and initialize the buffer */
buffer = (int *)HDmalloc(sizeof(int) * (size_t)length);
VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for(i = 0; i < length; i++)
+ for (i = 0; i < length; i++)
buffer[i] = i;
/* Set up the dxpl for the write */
@@ -3687,7 +3636,7 @@ test_actual_io_mode(int selection_mode) {
VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
/* Set collective I/O properties in the dxpl. */
- if(is_collective) {
+ if (is_collective) {
/* Request collective I/O */
ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
@@ -3697,19 +3646,19 @@ test_actual_io_mode(int selection_mode) {
* multi chunk io instead of link chunk io.
* This is via deault.
*/
- if(multi_chunk_io) {
+ if (multi_chunk_io) {
/* force multi-chunk-io by threshold */
- ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned) mpi_size*2);
+ ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned)mpi_size * 2);
VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded");
/* set this to manipulate testing senario about allocating processes
* to chunks */
- ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned) 99);
+ ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned)99);
VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded");
}
/* Set directly go to multi-chunk-io without threshold calc. */
- if(direct_multi_chunk_io) {
+ if (direct_multi_chunk_io) {
/* set for multi chunk io by property*/
ret = H5Pset_dxpl_mpio_chunk_opt(dxpl_write, H5FD_MPIO_CHUNK_MULTI_IO);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
@@ -3722,43 +3671,47 @@ test_actual_io_mode(int selection_mode) {
/* Write */
ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer);
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
/* Retreive Actual io valuess */
ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
- VRFY((ret >= 0), "retriving actual io mode suceeded" );
+ VRFY((ret >= 0), "retriving actual io mode suceeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
- VRFY((ret >= 0), "retriving actual chunk opt mode succeeded" );
+ VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
/* Read */
ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
/* Retreive Actual io values */
ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
- VRFY((ret >= 0), "retriving actual io mode succeeded" );
+ VRFY((ret >= 0), "retriving actual io mode succeeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
- VRFY((ret >= 0), "retriving actual chunk opt mode succeeded" );
+ VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
/* Check write vs read */
VRFY((actual_io_mode_read == actual_io_mode_write),
- "reading and writing are the same for actual_io_mode");
+ "reading and writing are the same for actual_io_mode");
VRFY((actual_chunk_opt_mode_read == actual_chunk_opt_mode_write),
- "reading and writing are the same for actual_chunk_opt_mode");
+ "reading and writing are the same for actual_chunk_opt_mode");
/* Test values */
- if(actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t) -1 && actual_io_mode_expected != (H5D_mpio_actual_io_mode_t) -1) {
- HDsprintf(message, "Actual Chunk Opt Mode has the correct value for %s.\n",test_name);
+ if (actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t)-1 &&
+ actual_io_mode_expected != (H5D_mpio_actual_io_mode_t)-1) {
+ HDsprintf(message, "Actual Chunk Opt Mode has the correct value for %s.\n", test_name);
VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message);
- HDsprintf(message, "Actual IO Mode has the correct value for %s.\n",test_name);
+ HDsprintf(message, "Actual IO Mode has the correct value for %s.\n", test_name);
VRFY((actual_io_mode_write == actual_io_mode_expected), message);
- } else {
- HDfprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank,
- actual_chunk_opt_mode_write, actual_io_mode_write);
+ }
+ else {
+ HDfprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank, actual_chunk_opt_mode_write,
+ actual_io_mode_write);
}
/* To test that the property is succesfully reset to the default, we perform some
@@ -3778,14 +3731,14 @@ test_actual_io_mode(int selection_mode) {
/* Check Properties */
ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
- VRFY( (ret >= 0), "retriving actual io mode succeeded" );
+ VRFY((ret >= 0), "retriving actual io mode succeeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
- VRFY( (ret >= 0), "retriving actual chunk opt mode succeeded" );
+ VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
VRFY(actual_chunk_opt_mode_write == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
- "actual_chunk_opt_mode has correct value for reset write (independent)");
+ "actual_chunk_opt_mode has correct value for reset write (independent)");
VRFY(actual_io_mode_write == H5D_MPIO_NO_COLLECTIVE,
- "actual_io_mode has correct value for reset write (independent)");
+ "actual_io_mode has correct value for reset write (independent)");
/* Read */
ret = H5Dread(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_read, buffer);
@@ -3793,15 +3746,15 @@ test_actual_io_mode(int selection_mode) {
/* Check Properties */
ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
- VRFY( (ret >= 0), "retriving actual io mode succeeded" );
+ VRFY((ret >= 0), "retriving actual io mode succeeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
- VRFY( (ret >= 0), "retriving actual chunk opt mode succeeded" );
+ VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
- "actual_chunk_opt_mode has correct value for reset read (independent)");
+ "actual_chunk_opt_mode has correct value for reset read (independent)");
VRFY(actual_io_mode_read == H5D_MPIO_NO_COLLECTIVE,
- "actual_io_mode has correct value for reset read (independent)");
- }
+ "actual_io_mode has correct value for reset read (independent)");
+ }
}
/* Release some resources */
@@ -3818,7 +3771,6 @@ test_actual_io_mode(int selection_mode) {
return;
}
-
/* Function: actual_io_mode_tests
*
* Purpose: Tests all possible cases of the actual_io_mode property.
@@ -3827,7 +3779,8 @@ test_actual_io_mode(int selection_mode) {
* Date: 2011-04-06
*/
void
-actual_io_mode_tests(void) {
+actual_io_mode_tests(void)
+{
int mpi_size = -1;
int mpi_rank = -1;
MPI_Comm_size(test_comm, &mpi_size);
@@ -3902,7 +3855,8 @@ actual_io_mode_tests(void) {
*
* TEST_FILTERS:
* Test for using filter (checksum) as the cause of breaking collective I/O.
- * Note: TEST_FILTERS mode will not work until H5Dcreate and H5write is supported for mpio and filter feature. Use test_no_collective_cause_mode_filter() function instead.
+ * Note: TEST_FILTERS mode will not work until H5Dcreate and H5write is supported for mpio and filter
+ * feature. Use test_no_collective_cause_mode_filter() function instead.
*
*
* Programmer: Jonathan Kim
@@ -3912,35 +3866,35 @@ actual_io_mode_tests(void) {
static void
test_no_collective_cause_mode(int selection_mode)
{
- uint32_t no_collective_cause_local_write = 0;
- uint32_t no_collective_cause_local_read = 0;
- uint32_t no_collective_cause_local_expected = 0;
- uint32_t no_collective_cause_global_write = 0;
- uint32_t no_collective_cause_global_read = 0;
+ uint32_t no_collective_cause_local_write = 0;
+ uint32_t no_collective_cause_local_read = 0;
+ uint32_t no_collective_cause_local_expected = 0;
+ uint32_t no_collective_cause_global_write = 0;
+ uint32_t no_collective_cause_global_read = 0;
uint32_t no_collective_cause_global_expected = 0;
// hsize_t coord[NELM][MAX_RANK];
- const char * filename;
- const char * test_name;
- hbool_t is_chunked=1;
- hbool_t is_independent=0;
- int mpi_size = -1;
- int mpi_rank = -1;
+ const char *filename;
+ const char *test_name;
+ hbool_t is_chunked = 1;
+ hbool_t is_independent = 0;
+ int mpi_size = -1;
+ int mpi_rank = -1;
int length;
- int * buffer;
+ int * buffer;
int i;
MPI_Comm mpi_comm;
MPI_Info mpi_info;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
- hid_t data_type = H5T_NATIVE_INT;
- hid_t fapl_id = -1;
- hid_t dcpl = -1;
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t dataset = -1;
+ hid_t data_type = H5T_NATIVE_INT;
+ hid_t fapl_id = -1;
+ hid_t dcpl = -1;
hid_t dxpl_write = -1;
- hid_t dxpl_read = -1;
+ hid_t dxpl_read = -1;
hsize_t dims[MAX_RANK];
- hid_t mem_space = -1;
+ hid_t mem_space = -1;
hid_t file_space = -1;
hsize_t chunk_dims[MAX_RANK];
herr_t ret;
@@ -3948,7 +3902,7 @@ test_no_collective_cause_mode(int selection_mode)
H5Z_filter_t filter_info;
#endif /* LATER */
/* set to global value as default */
- int l_facc_type = facc_type;
+ int l_facc_type = facc_type;
char message[256];
/* Set up MPI parameters */
@@ -3967,27 +3921,29 @@ test_no_collective_cause_mode(int selection_mode)
VRFY((dcpl >= 0), "dataset creation plist created successfully");
if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) {
- ret = H5Pset_layout (dcpl, H5D_COMPACT);
- VRFY((ret >= 0),"set COMPACT layout succeeded");
+ ret = H5Pset_layout(dcpl, H5D_COMPACT);
+ VRFY((ret >= 0), "set COMPACT layout succeeded");
is_chunked = 0;
}
if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
- ret = H5Pset_external (dcpl, FILE_EXTERNAL, (off_t) 0, H5F_UNLIMITED);
- VRFY((ret >= 0),"set EXTERNAL file layout succeeded");
+ ret = H5Pset_external(dcpl, FILE_EXTERNAL, (off_t)0, H5F_UNLIMITED);
+ VRFY((ret >= 0), "set EXTERNAL file layout succeeded");
is_chunked = 0;
}
#ifdef LATER /* fletcher32 */
if (selection_mode & TEST_FILTERS) {
ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32);
- VRFY ((ret >=0 ), "Fletcher32 filter is available.\n");
+ VRFY((ret >= 0), "Fletcher32 filter is available.\n");
- ret = H5Zget_filter_info (H5Z_FILTER_FLETCHER32, &filter_info);
- VRFY ( ( (filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) || (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED) ) , "Fletcher32 filter encoding and decoding available.\n");
+ ret = H5Zget_filter_info(H5Z_FILTER_FLETCHER32, &filter_info);
+ VRFY(((filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) ||
+ (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED)),
+ "Fletcher32 filter encoding and decoding available.\n");
ret = H5Pset_fletcher32(dcpl);
- VRFY((ret >= 0),"set filter (flecher32) succeeded");
+ VRFY((ret >= 0), "set filter (flecher32) succeeded");
}
#endif /* LATER */
@@ -4007,11 +3963,10 @@ test_no_collective_cause_mode(int selection_mode)
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
}
- sid = H5Screate_simple (MAX_RANK, dims, NULL);
+ sid = H5Screate_simple(MAX_RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
}
-
filename = (const char *)GetTestParameters();
HDassert(filename != NULL);
@@ -4025,20 +3980,18 @@ test_no_collective_cause_mode(int selection_mode)
VRFY((fid >= 0), "H5Fcreate succeeded");
/* If we are not testing contiguous datasets */
- if(is_chunked) {
+ if (is_chunked) {
/* Set up chunk information. */
- chunk_dims[0] = dims[0]/(hsize_t)mpi_size;
+ chunk_dims[0] = dims[0] / (hsize_t)mpi_size;
chunk_dims[1] = dims[1];
- ret = H5Pset_chunk(dcpl, 2, chunk_dims);
- VRFY((ret >= 0),"chunk creation property list succeeded");
+ ret = H5Pset_chunk(dcpl, 2, chunk_dims);
+ VRFY((ret >= 0), "chunk creation property list succeeded");
}
-
/* Create the dataset */
dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
-
/*
* Set expected causes and some tweaks based on the type of test
*/
@@ -4078,14 +4031,14 @@ test_no_collective_cause_mode(int selection_mode)
#endif /* LATER */
if (selection_mode & TEST_COLLECTIVE) {
- test_name = "Broken Collective I/O - Not Broken";
- no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE;
+ test_name = "Broken Collective I/O - Not Broken";
+ no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE;
no_collective_cause_global_expected = H5D_MPIO_COLLECTIVE;
}
if (selection_mode & TEST_SET_INDEPENDENT) {
- test_name = "Broken Collective I/O - Independent";
- no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT;
+ test_name = "Broken Collective I/O - Independent";
+ no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT;
no_collective_cause_global_expected = H5D_MPIO_SET_INDEPENDENT;
/* switch to independent io */
is_independent = 1;
@@ -4095,7 +4048,7 @@ test_no_collective_cause_mode(int selection_mode)
if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES ||
selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
file_space = H5S_ALL;
- mem_space = H5S_ALL;
+ mem_space = H5S_ALL;
}
else {
/* Get the file dataspace */
@@ -4103,7 +4056,7 @@ test_no_collective_cause_mode(int selection_mode)
VRFY((file_space >= 0), "H5Dget_space succeeded");
/* Create the memory dataspace */
- mem_space = H5Screate_simple (MAX_RANK, dims, NULL);
+ mem_space = H5Screate_simple(MAX_RANK, dims, NULL);
VRFY((mem_space >= 0), "mem_space created");
}
@@ -4113,14 +4066,14 @@ test_no_collective_cause_mode(int selection_mode)
/* Allocate and initialize the buffer */
buffer = (int *)HDmalloc(sizeof(int) * (size_t)length);
VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for(i = 0; i < length; i++)
+ for (i = 0; i < length; i++)
buffer[i] = i;
/* Set up the dxpl for the write */
dxpl_write = H5Pcreate(H5P_DATASET_XFER);
VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
- if(is_independent) {
+ if (is_independent) {
/* Set Independent I/O */
ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
@@ -4129,11 +4082,10 @@ test_no_collective_cause_mode(int selection_mode)
/* Set Collective I/O */
ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
}
if (selection_mode & TEST_DATA_TRANSFORMS) {
- ret = H5Pset_data_transform (dxpl_write, "x+1");
+ ret = H5Pset_data_transform(dxpl_write, "x+1");
VRFY((ret >= 0), "H5Pset_data_transform succeeded");
}
@@ -4143,14 +4095,14 @@ test_no_collective_cause_mode(int selection_mode)
/* Write */
ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer);
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
-
/* Get the cause of broken collective I/O */
- ret = H5Pget_mpio_no_collective_cause (dxpl_write, &no_collective_cause_local_write, &no_collective_cause_global_write);
- VRFY((ret >= 0), "retriving no collective cause succeeded" );
-
+ ret = H5Pget_mpio_no_collective_cause(dxpl_write, &no_collective_cause_local_write,
+ &no_collective_cause_global_write);
+ VRFY((ret >= 0), "retriving no collective cause succeeded");
/*---------------------
* Test Read access
@@ -4163,25 +4115,27 @@ test_no_collective_cause_mode(int selection_mode)
/* Read */
ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
/* Get the cause of broken collective I/O */
- ret = H5Pget_mpio_no_collective_cause (dxpl_read, &no_collective_cause_local_read, &no_collective_cause_global_read);
- VRFY((ret >= 0), "retriving no collective cause succeeded" );
+ ret = H5Pget_mpio_no_collective_cause(dxpl_read, &no_collective_cause_local_read,
+ &no_collective_cause_global_read);
+ VRFY((ret >= 0), "retriving no collective cause succeeded");
/* Check write vs read */
VRFY((no_collective_cause_local_read == no_collective_cause_local_write),
- "reading and writing are the same for local cause of Broken Collective I/O");
+ "reading and writing are the same for local cause of Broken Collective I/O");
VRFY((no_collective_cause_global_read == no_collective_cause_global_write),
- "reading and writing are the same for global cause of Broken Collective I/O");
+ "reading and writing are the same for global cause of Broken Collective I/O");
/* Test values */
- HDmemset (message, 0, sizeof (message));
- HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset(message, 0, sizeof(message));
+ HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name);
VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message);
- HDmemset (message, 0, sizeof (message));
- HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset(message, 0, sizeof(message));
+ HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message);
/* Release some resources */
@@ -4212,7 +4166,6 @@ test_no_collective_cause_mode(int selection_mode)
return;
}
-
#if 0
/*
* Function: test_no_collective_cause_mode_filter
@@ -4267,7 +4220,7 @@ test_no_collective_cause_mode_filter(int selection_mode)
herr_t ret;
#ifdef LATER /* fletcher32 */
H5Z_filter_t filter_info;
-#endif /* LATER */
+#endif /* LATER */
char message[256];
/* Set up MPI parameters */
@@ -4295,7 +4248,7 @@ test_no_collective_cause_mode_filter(int selection_mode)
ret = H5Pset_fletcher32(dcpl);
VRFY((ret >= 0),"set filter (flecher32) succeeded");
-#endif /* LATER */
+#endif /* LATER */
}
else {
VRFY(0, "Unexpected mode, only test for TEST_FILTERS_READ.");
@@ -4337,7 +4290,7 @@ test_no_collective_cause_mode_filter(int selection_mode)
test_name = "Broken Collective I/O - Filter is required";
no_collective_cause_local_expected = H5D_MPIO_FILTERS;
no_collective_cause_global_expected = H5D_MPIO_FILTERS;
-#endif /* LATER */
+#endif /* LATER */
/* Get the file dataspace */
file_space = H5Dget_space(dataset);
@@ -4460,27 +4413,28 @@ no_collective_cause_tests(void)
/*
* Test individual cause
*/
- test_no_collective_cause_mode (TEST_COLLECTIVE);
- test_no_collective_cause_mode (TEST_SET_INDEPENDENT);
- test_no_collective_cause_mode (TEST_DATATYPE_CONVERSION);
- test_no_collective_cause_mode (TEST_DATA_TRANSFORMS);
- test_no_collective_cause_mode (TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES);
- test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
- test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
+ test_no_collective_cause_mode(TEST_COLLECTIVE);
+ test_no_collective_cause_mode(TEST_SET_INDEPENDENT);
+ test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION);
+ test_no_collective_cause_mode(TEST_DATA_TRANSFORMS);
+ test_no_collective_cause_mode(TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
#ifdef LATER /* fletcher32 */
- /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
- * H5Dwrite is ready for mpio + filter feature.
- */
+ /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
+ * H5Dwrite is ready for mpio + filter feature.
+ */
/* test_no_collective_cause_mode (TEST_FILTERS); */
- test_no_collective_cause_mode_filter (TEST_FILTERS_READ);
+ test_no_collective_cause_mode_filter(TEST_FILTERS_READ);
#endif /* LATER */
/*
* Test combined causes
*/
- test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION);
- test_no_collective_cause_mode (TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
- test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION);
+ test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION |
+ TEST_DATA_TRANSFORMS);
return;
}
@@ -4499,41 +4453,42 @@ no_collective_cause_tests(void)
void
dataset_atomicity(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t dataset1; /* Dataset IDs */
- hsize_t dims[MAX_RANK]; /* dataset dim sizes */
- int *write_buf = NULL; /* data buffer */
- int *read_buf = NULL; /* data buffer */
- int buf_size;
- hid_t dataset2;
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* Memory dataspace ID */
- hsize_t start[MAX_RANK];
- hsize_t stride[MAX_RANK];
- hsize_t count[MAX_RANK];
- hsize_t block[MAX_RANK];
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t dataset1; /* Dataset IDs */
+ hsize_t dims[MAX_RANK]; /* dataset dim sizes */
+ int * write_buf = NULL; /* data buffer */
+ int * read_buf = NULL; /* data buffer */
+ int buf_size;
+ hid_t dataset2;
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* Memory dataspace ID */
+ hsize_t start[MAX_RANK];
+ hsize_t stride[MAX_RANK];
+ hsize_t count[MAX_RANK];
+ hsize_t block[MAX_RANK];
const char *filename;
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
- int i, j, k;
- hbool_t atomicity = FALSE;
- MPI_Comm comm = test_comm;
- MPI_Info info = MPI_INFO_NULL;
-
- dim0 = 64; dim1 = 32;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+ int i, j, k;
+ hbool_t atomicity = FALSE;
+ MPI_Comm comm = test_comm;
+ MPI_Info info = MPI_INFO_NULL;
+
+ dim0 = 64;
+ dim1 = 32;
filename = GetTestParameters();
if (facc_type != FACC_MPIO) {
HDprintf("Atomicity tests will not work without the MPIO VFD\n");
return;
}
- if(VERBOSE_MED)
+ if (VERBOSE_MED)
HDprintf("atomic writes to file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(test_comm,&mpi_size);
- MPI_Comm_rank(test_comm,&mpi_rank);
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_rank(test_comm, &mpi_rank);
buf_size = dim0 * dim1;
/* allocate memory for data buffer */
@@ -4558,26 +4513,22 @@ dataset_atomicity(void)
/* setup dimensionality object */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple (MAX_RANK, dims, NULL);
+ sid = H5Screate_simple(MAX_RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* create datasets */
- dataset1 = H5Dcreate2(fid, DATASETNAME5, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dataset1 = H5Dcreate2(fid, DATASETNAME5, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
- dataset2 = H5Dcreate2(fid, DATASETNAME6, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dataset2 = H5Dcreate2(fid, DATASETNAME6, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
/* initialize datasets to 0s */
if (0 == mpi_rank) {
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
- H5P_DEFAULT, write_buf);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf);
VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
- H5P_DEFAULT, write_buf);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
}
@@ -4590,39 +4541,39 @@ dataset_atomicity(void)
ret = H5Fclose(fid);
VRFY((ret >= 0), "H5Fclose succeeded");
- MPI_Barrier (comm);
+ MPI_Barrier(comm);
/* make sure setting atomicity fails on a serial file ID */
/* file locking allows only one file open (serial) for writing */
- if(MAINPROCESS){
- fid=H5Fopen(filename,H5F_ACC_RDWR,H5P_DEFAULT);
+ if (MAINPROCESS) {
+ fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
VRFY((fid >= 0), "H5Fopen succeeed");
}
/* should fail */
- ret = H5Fset_mpi_atomicity(fid , TRUE);
+ ret = H5Fset_mpi_atomicity(fid, TRUE);
VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed");
- if(MAINPROCESS){
+ if (MAINPROCESS) {
ret = H5Fclose(fid);
VRFY((ret >= 0), "H5Fclose succeeded");
}
- MPI_Barrier (comm);
+ MPI_Barrier(comm);
/* setup file access template */
acc_tpl = create_faccess_plist(comm, info, facc_type);
VRFY((acc_tpl >= 0), "");
/* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl);
+ fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl);
VRFY((fid >= 0), "H5Fopen succeeded");
/* Release file-access template */
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Fset_mpi_atomicity(fid , TRUE);
+ ret = H5Fset_mpi_atomicity(fid, TRUE);
VRFY((ret >= 0), "H5Fset_mpi_atomicity succeeded");
/* open dataset1 (contiguous case) */
@@ -4630,22 +4581,22 @@ dataset_atomicity(void)
VRFY((dataset1 >= 0), "H5Dopen2 succeeded");
if (0 == mpi_rank) {
- for (i=0 ; i<buf_size ; i++) {
+ for (i = 0; i < buf_size; i++) {
write_buf[i] = 5;
}
}
else {
- for (i=0 ; i<buf_size ; i++) {
+ for (i = 0; i < buf_size; i++) {
read_buf[i] = 8;
}
}
/* check that the atomicity flag is set */
- ret = H5Fget_mpi_atomicity(fid , &atomicity);
+ ret = H5Fget_mpi_atomicity(fid, &atomicity);
VRFY((ret >= 0), "atomcity get failed");
VRFY((atomicity == TRUE), "atomcity set failed");
- MPI_Barrier (comm);
+ MPI_Barrier(comm);
/* Process 0 writes contiguously to the entire dataset */
if (0 == mpi_rank) {
@@ -4658,12 +4609,14 @@ dataset_atomicity(void)
VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
}
- if(VERBOSE_MED) {
- i=0;j=0;k=0;
- for (i=0 ; i<dim0 ; i++) {
- HDprintf ("\n");
- for (j=0 ; j<dim1 ; j++)
- HDprintf ("%d ", read_buf[k++]);
+ if (VERBOSE_MED) {
+ i = 0;
+ j = 0;
+ k = 0;
+ for (i = 0; i < dim0; i++) {
+ HDprintf("\n");
+ for (j = 0; j < dim1; j++)
+ HDprintf("%d ", read_buf[k++]);
}
}
@@ -4675,10 +4628,11 @@ dataset_atomicity(void)
VRFY((compare == 0 || compare == 5),
"Atomicity Test Failed Process %d: Value read should be 0 or 5\n");
- for (i=1; i<buf_size; i++) {
+ for (i = 1; i < buf_size; i++) {
if (read_buf[i] != compare) {
- HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i, read_buf[i], compare);
- nerrors ++;
+ HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i,
+ read_buf[i], compare);
+ nerrors++;
}
}
}
@@ -4687,8 +4641,10 @@ dataset_atomicity(void)
VRFY((ret >= 0), "H5D close succeeded");
/* release data buffers */
- if(write_buf) HDfree(write_buf);
- if(read_buf) HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (read_buf)
+ HDfree(read_buf);
/* open dataset2 (non-contiguous case) */
dataset2 = H5Dopen2(fid, DATASETNAME6, H5P_DEFAULT);
@@ -4701,69 +4657,68 @@ dataset_atomicity(void)
read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
VRFY((read_buf != NULL), "read_buf HDcalloc succeeded");
- for (i=0 ; i<buf_size ; i++) {
+ for (i = 0; i < buf_size; i++) {
write_buf[i] = 5;
}
- for (i=0 ; i<buf_size ; i++) {
+ for (i = 0; i < buf_size; i++) {
read_buf[i] = 8;
}
atomicity = FALSE;
/* check that the atomicity flag is set */
- ret = H5Fget_mpi_atomicity(fid , &atomicity);
+ ret = H5Fget_mpi_atomicity(fid, &atomicity);
VRFY((ret >= 0), "atomcity get failed");
VRFY((atomicity == TRUE), "atomcity set failed");
-
- block[0] = (hsize_t)(dim0/mpi_size) - 1;
- block[1] = (hsize_t)(dim1/mpi_size) - 1;
+ block[0] = (hsize_t)(dim0 / mpi_size) - 1;
+ block[1] = (hsize_t)(dim1 / mpi_size) - 1;
stride[0] = block[0] + 1;
stride[1] = block[1] + 1;
- count[0] = (hsize_t)mpi_size;
- count[1] = (hsize_t)mpi_size;
- start[0] = 0;
- start[1] = 0;
+ count[0] = (hsize_t)mpi_size;
+ count[1] = (hsize_t)mpi_size;
+ start[0] = 0;
+ start[1] = 0;
/* create a file dataspace */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace */
- mem_dataspace = H5Screate_simple (MAX_RANK, dims, NULL);
+ mem_dataspace = H5Screate_simple(MAX_RANK, dims, NULL);
VRFY((mem_dataspace >= 0), "");
ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
- MPI_Barrier (comm);
+ MPI_Barrier(comm);
/* Process 0 writes to the dataset */
if (0 == mpi_rank) {
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, write_buf);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, write_buf);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
}
/* All processes wait for the write to finish. This works because
atomicity is set to true */
- MPI_Barrier (comm);
+ MPI_Barrier(comm);
/* The other processes read the entire dataset */
if (0 != mpi_rank) {
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, read_buf);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, read_buf);
VRFY((ret >= 0), "H5Dread dataset2 succeeded");
}
- if(VERBOSE_MED) {
+ if (VERBOSE_MED) {
if (mpi_rank == 1) {
- i=0;j=0;k=0;
- for (i=0 ; i<dim0 ; i++) {
- HDprintf ("\n");
- for (j=0 ; j<dim1 ; j++)
- HDprintf ("%d ", read_buf[k++]);
+ i = 0;
+ j = 0;
+ k = 0;
+ for (i = 0; i < dim0; i++) {
+ HDprintf("\n");
+ for (j = 0; j < dim1; j++)
+ HDprintf("%d ", read_buf[k++]);
}
- HDprintf ("\n");
+ HDprintf("\n");
}
}
@@ -4771,32 +4726,36 @@ dataset_atomicity(void)
as 5 (read happened after process 0 wrote to dataset 1) */
if (0 != mpi_rank) {
int compare;
- i=0;j=0;k=0;
+ i = 0;
+ j = 0;
+ k = 0;
compare = 5;
- for (i=0 ; i<dim0 ; i++) {
- if ((hsize_t)i >= (hsize_t)mpi_rank*(block[0]+1)) {
+ for (i = 0; i < dim0; i++) {
+ if ((hsize_t)i >= (hsize_t)mpi_rank * (block[0] + 1)) {
break;
}
- if (((hsize_t)i+1)%(block[0]+1)==0) {
+ if (((hsize_t)i + 1) % (block[0] + 1) == 0) {
k += dim1;
continue;
}
- for (j=0 ; j<dim1 ; j++) {
- if ((hsize_t)j >= (hsize_t)mpi_rank*(block[1]+1)) {
- H5_CHECKED_ASSIGN(k, int, (hsize_t)dim1 - (hsize_t)mpi_rank*(block[1]+1) + (hsize_t)k, hsize_t);
+ for (j = 0; j < dim1; j++) {
+ if ((hsize_t)j >= (hsize_t)mpi_rank * (block[1] + 1)) {
+ H5_CHECKED_ASSIGN(k, int, (hsize_t)dim1 - (hsize_t)mpi_rank * (block[1] + 1) + (hsize_t)k,
+ hsize_t);
break;
}
- if (((hsize_t)j+1)%(block[1]+1)==0) {
+ if (((hsize_t)j + 1) % (block[1] + 1) == 0) {
k++;
continue;
}
else if (compare != read_buf[k]) {
- HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, k, read_buf[k], compare);
+ HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank,
+ k, read_buf[k], compare);
nerrors++;
}
- k ++;
+ k++;
}
}
}
@@ -4809,12 +4768,13 @@ dataset_atomicity(void)
VRFY((ret >= 0), "H5Sclose succeeded");
/* release data buffers */
- if(write_buf) HDfree(write_buf);
- if(read_buf) HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (read_buf)
+ HDfree(read_buf);
ret = H5Fclose(fid);
VRFY((ret >= 0), "H5Fclose succeeded");
-
}
/* Function: dense_attr_test
@@ -4827,21 +4787,21 @@ dataset_atomicity(void)
void
test_dense_attr(void)
{
- int mpi_size, mpi_rank;
- hid_t fpid, fid;
- hid_t gid, gpid;
- hid_t atFileSpace, atid;
- hsize_t atDims[1] = {10000};
- herr_t status;
+ int mpi_size, mpi_rank;
+ hid_t fpid, fid;
+ hid_t gid, gpid;
+ hid_t atFileSpace, atid;
+ hsize_t atDims[1] = {10000};
+ herr_t status;
const char *filename;
/* get filename */
filename = (const char *)GetTestParameters();
- HDassert( filename != NULL );
+ HDassert(filename != NULL);
/* set up MPI parameters */
- MPI_Comm_size(test_comm,&mpi_size);
- MPI_Comm_rank(test_comm,&mpi_rank);
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_rank(test_comm, &mpi_rank);
fpid = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fpid > 0), "H5Pcreate succeeded");
@@ -4881,12 +4841,11 @@ test_dense_attr(void)
return;
}
-
int
main(int argc, char **argv)
{
- int express_test;
- int mpi_size, mpi_rank; /* mpi variables */
+ int express_test;
+ int mpi_size, mpi_rank; /* mpi variables */
hsize_t oldsize, newsize = 1048576;
#ifndef H5_HAVE_WIN32_API
@@ -4895,7 +4854,6 @@ main(int argc, char **argv)
HDsetbuf(stdout, NULL);
#endif
-
MPI_Init(&argc, &argv);
MPI_Comm_size(test_comm, &mpi_size);
MPI_Comm_rank(test_comm, &mpi_rank);
@@ -4904,7 +4862,7 @@ main(int argc, char **argv)
dim1 = BIG_Y_FACTOR;
dim2 = BIG_Z_FACTOR;
- if (MAINPROCESS){
+ if (MAINPROCESS) {
HDprintf("===================================\n");
HDprintf("2 GByte IO TESTS START\n");
HDprintf("2 MPI ranks will run the tests...\n");
@@ -4912,7 +4870,7 @@ main(int argc, char **argv)
h5_show_hostname();
}
- if (H5dont_atexit() < 0){
+ if (H5dont_atexit() < 0) {
HDprintf("Failed to turn off atexit processing. Continue.\n");
};
H5open();
@@ -4923,52 +4881,46 @@ main(int argc, char **argv)
if (mpi_size > 2) {
int rank_color = 0;
- if (mpi_rank >= 2) rank_color = 1;
+ if (mpi_rank >= 2)
+ rank_color = 1;
if (MPI_Comm_split(test_comm, rank_color, mpi_rank, &test_comm) != MPI_SUCCESS) {
HDprintf("MPI returned an error. Exiting\n");
- }
+ }
}
/* Initialize testing framework */
if (mpi_rank < 2) {
- TestInit(argv[0], usage, parse_options);
+ TestInit(argv[0], usage, parse_options);
- /* Parse command line arguments */
- TestParseCmdLine(argc, argv);
+ /* Parse command line arguments */
+ TestParseCmdLine(argc, argv);
- AddTest("idsetw", dataset_writeInd, NULL,
- "dataset independent write", PARATESTFILE);
+ AddTest("idsetw", dataset_writeInd, NULL, "dataset independent write", PARATESTFILE);
- AddTest("idsetr", dataset_readInd, NULL,
- "dataset independent read", PARATESTFILE);
+ AddTest("idsetr", dataset_readInd, NULL, "dataset independent read", PARATESTFILE);
- AddTest("cdsetw", dataset_writeAll, NULL,
- "dataset collective write", PARATESTFILE);
+ AddTest("cdsetw", dataset_writeAll, NULL, "dataset collective write", PARATESTFILE);
- AddTest("cdsetr", dataset_readAll, NULL,
- "dataset collective read", PARATESTFILE);
+ AddTest("cdsetr", dataset_readAll, NULL, "dataset collective read", PARATESTFILE);
- AddTest("eidsetw2", extend_writeInd2, NULL,
- "extendible dataset independent write #2", PARATESTFILE);
+ AddTest("eidsetw2", extend_writeInd2, NULL, "extendible dataset independent write #2", PARATESTFILE);
- AddTest("selnone", none_selection_chunk, NULL,
- "chunked dataset with none-selection", PARATESTFILE);
+ AddTest("selnone", none_selection_chunk, NULL, "chunked dataset with none-selection", PARATESTFILE);
#ifdef H5_HAVE_FILTER_DEFLATE
- AddTest("cmpdsetr", compress_readAll, NULL,
- "compressed dataset collective read", PARATESTFILE);
+ AddTest("cmpdsetr", compress_readAll, NULL, "compressed dataset collective read", PARATESTFILE);
#endif /* H5_HAVE_FILTER_DEFLATE */
- /* Display testing information */
- if (MAINPROCESS)
- TestInfo(argv[0]);
+ /* Display testing information */
+ if (MAINPROCESS)
+ TestInfo(argv[0]);
- /* setup file access property list */
- fapl = H5Pcreate (H5P_FILE_ACCESS);
- H5Pset_fapl_mpio(fapl, test_comm, MPI_INFO_NULL);
+ /* setup file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(fapl, test_comm, MPI_INFO_NULL);
- /* Perform requested testing */
- PerformTests();
+ /* Perform requested testing */
+ PerformTests();
}
MPI_Barrier(MPI_COMM_WORLD);
diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c
index 22a924d..7884ecb 100644
--- a/testpar/t_bigio.c
+++ b/testpar/t_bigio.c
@@ -1,16 +1,13 @@
#include "hdf5.h"
#include "testphdf5.h"
-#include "H5Dprivate.h" /* For Chunk tests */
+#include "H5Dprivate.h" /* For Chunk tests */
/* FILENAME and filenames must have the same number of names */
-const char *FILENAME[3]={ "bigio_test.h5",
- "single_rank_independent_io.h5",
- NULL
- };
+const char *FILENAME[3] = {"bigio_test.h5", "single_rank_independent_io.h5", NULL};
/* Constants definitions */
-#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */
+#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */
/* Define some handy debugging shorthands, routines, ... */
/* debugging tools */
@@ -18,70 +15,65 @@ const char *FILENAME[3]={ "bigio_test.h5",
#define MAIN_PROCESS (mpi_rank_g == 0) /* define process 0 as main process */
/* Constants definitions */
-#define RANK 2
+#define RANK 2
#define IN_ORDER 1
#define OUT_OF_ORDER 2
-#define DATASET1 "DSET1"
-#define DATASET2 "DSET2"
-#define DATASET3 "DSET3"
-#define DATASET4 "DSET4"
-#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/
+#define DATASET1 "DSET1"
+#define DATASET2 "DSET2"
+#define DATASET3 "DSET3"
+#define DATASET4 "DSET4"
+#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/
#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */
-#define DXFER_BIGCOUNT (1 < 29)
-#define LARGE_DIM 1610612736
+#define DXFER_BIGCOUNT (1 < 29)
+#define LARGE_DIM 1610612736
#define HYPER 1
#define POINT 2
-#define ALL 3
+#define ALL 3
/* Dataset data type. Int's can be easily octo dumped. */
typedef hsize_t B_DATATYPE;
-int facc_type = FACC_MPIO; /*Test file access type */
-int dxfer_coll_type = DXFER_COLLECTIVE_IO;
-size_t bigcount = (size_t)DXFER_BIGCOUNT;
-int nerrors = 0;
+int facc_type = FACC_MPIO; /*Test file access type */
+int dxfer_coll_type = DXFER_COLLECTIVE_IO;
+size_t bigcount = (size_t)DXFER_BIGCOUNT;
+int nerrors = 0;
static int mpi_size_g, mpi_rank_g;
hsize_t space_dim1 = SPACE_DIM1 * 256; // 4096
hsize_t space_dim2 = SPACE_DIM2;
-static void coll_chunktest(const char* filename, int chunk_factor, int select_factor,
- int api_option, int file_selection, int mem_selection, int mode);
+static void coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option,
+ int file_selection, int mem_selection, int mode);
/*
* Setup the coordinates for point selection.
*/
static void
-set_coords(hsize_t start[],
- hsize_t count[],
- hsize_t stride[],
- hsize_t block[],
- size_t num_points,
- hsize_t coords[],
- int order)
+set_coords(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
+ hsize_t coords[], int order)
{
- hsize_t i,j, k = 0, m ,n, s1 ,s2;
+ hsize_t i, j, k = 0, m, n, s1, s2;
- if(OUT_OF_ORDER == order)
+ if (OUT_OF_ORDER == order)
k = (num_points * RANK) - 1;
- else if(IN_ORDER == order)
+ else if (IN_ORDER == order)
k = 0;
s1 = start[0];
s2 = start[1];
- for(i = 0 ; i < count[0]; i++)
- for(j = 0 ; j < count[1]; j++)
- for(m = 0 ; m < block[0]; m++)
- for(n = 0 ; n < block[1]; n++)
- if(OUT_OF_ORDER == order) {
+ for (i = 0; i < count[0]; i++)
+ for (j = 0; j < count[1]; j++)
+ for (m = 0; m < block[0]; m++)
+ for (n = 0; n < block[1]; n++)
+ if (OUT_OF_ORDER == order) {
coords[k--] = s2 + (stride[1] * j) + n;
coords[k--] = s1 + (stride[0] * i) + m;
}
- else if(IN_ORDER == order) {
+ else if (IN_ORDER == order) {
coords[k++] = s1 + stride[0] * i + m;
coords[k++] = s2 + stride[1] * j + n;
}
@@ -92,67 +84,62 @@ set_coords(hsize_t start[],
* Assume dimension rank is 2 and data is stored contiguous.
*/
static void
-fill_datasets(hsize_t start[], hsize_t block[], B_DATATYPE * dataset)
+fill_datasets(hsize_t start[], hsize_t block[], B_DATATYPE *dataset)
{
B_DATATYPE *dataptr = dataset;
- hsize_t i, j;
+ hsize_t i, j;
/* put some trivial data in the data_array */
- for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- *dataptr = (B_DATATYPE)((i+start[0])*100 + (j+start[1]+1));
- dataptr++;
- }
+ for (i = 0; i < block[0]; i++) {
+ for (j = 0; j < block[1]; j++) {
+ *dataptr = (B_DATATYPE)((i + start[0]) * 100 + (j + start[1] + 1));
+ dataptr++;
+ }
}
}
/*
* Setup the coordinates for point selection.
*/
-void point_set(hsize_t start[],
- hsize_t count[],
- hsize_t stride[],
- hsize_t block[],
- size_t num_points,
- hsize_t coords[],
- int order)
+void
+point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
+ hsize_t coords[], int order)
{
- hsize_t i,j, k = 0, m ,n, s1 ,s2;
+ hsize_t i, j, k = 0, m, n, s1, s2;
HDcompile_assert(RANK == 2);
- if(OUT_OF_ORDER == order)
+ if (OUT_OF_ORDER == order)
k = (num_points * RANK) - 1;
- else if(IN_ORDER == order)
+ else if (IN_ORDER == order)
k = 0;
s1 = start[0];
s2 = start[1];
- for(i = 0 ; i < count[0]; i++)
- for(j = 0 ; j < count[1]; j++)
- for(m = 0 ; m < block[0]; m++)
- for(n = 0 ; n < block[1]; n++)
- if(OUT_OF_ORDER == order) {
+ for (i = 0; i < count[0]; i++)
+ for (j = 0; j < count[1]; j++)
+ for (m = 0; m < block[0]; m++)
+ for (n = 0; n < block[1]; n++)
+ if (OUT_OF_ORDER == order) {
coords[k--] = s2 + (stride[1] * j) + n;
coords[k--] = s1 + (stride[0] * i) + m;
}
- else if(IN_ORDER == order) {
+ else if (IN_ORDER == order) {
coords[k++] = s1 + stride[0] * i + m;
coords[k++] = s2 + stride[1] * j + n;
}
- if(VERBOSE_MED) {
+ if (VERBOSE_MED) {
HDprintf("start[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
- "count[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
- "stride[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
- "block[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
- "total datapoints=%" PRIuHSIZE "\n",
- start[0], start[1], count[0], count[1],
- stride[0], stride[1], block[0], block[1],
- block[0] * block[1] * count[0] * count[1]);
+ "count[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "stride[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "block[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "total datapoints=%" PRIuHSIZE "\n",
+ start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1],
+ block[0] * block[1] * count[0] * count[1]);
k = 0;
- for(i = 0; i < num_points ; i++) {
+ for (i = 0; i < num_points; i++) {
HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
k += 2;
}
@@ -163,47 +150,46 @@ void point_set(hsize_t start[],
* Print the content of the dataset.
*/
static void
-dataset_print(hsize_t start[], hsize_t block[], B_DATATYPE * dataset)
+dataset_print(hsize_t start[], hsize_t block[], B_DATATYPE *dataset)
{
B_DATATYPE *dataptr = dataset;
- hsize_t i, j;
+ hsize_t i, j;
/* print the column heading */
HDprintf("%-8s", "Cols:");
- for (j=0; j < block[1]; j++){
+ for (j = 0; j < block[1]; j++) {
HDprintf("%3" PRIuHSIZE " ", start[1] + j);
}
HDprintf("\n");
/* print the slab data */
- for (i=0; i < block[0]; i++){
- HDprintf("Row %2" PRIuHSIZE ": ", i + start[0]);
- for (j=0; j < block[1]; j++){
- HDprintf("%" PRIuHSIZE " ", *dataptr++);
- }
- HDprintf("\n");
+ for (i = 0; i < block[0]; i++) {
+ HDprintf("Row %2" PRIuHSIZE ": ", i + start[0]);
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%" PRIuHSIZE " ", *dataptr++);
+ }
+ HDprintf("\n");
}
}
-
/*
* Print the content of the dataset.
*/
static int
-verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], B_DATATYPE *dataset, B_DATATYPE *original)
+verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], B_DATATYPE *dataset,
+ B_DATATYPE *original)
{
hsize_t i, j;
- int vrfyerrs;
+ int vrfyerrs;
/* print it if VERBOSE_MED */
- if(VERBOSE_MED) {
+ if (VERBOSE_MED) {
HDprintf("verify_data dumping:::\n");
HDprintf("start(%" PRIuHSIZE ", %" PRIuHSIZE "), "
- "count(%" PRIuHSIZE ", %" PRIuHSIZE "), "
- "stride(%" PRIuHSIZE ", %" PRIuHSIZE "), "
- "block(%" PRIuHSIZE ", %" PRIuHSIZE ")\n",
- start[0], start[1], count[0], count[1],
- stride[0], stride[1], block[0], block[1]);
+ "count(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "stride(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "block(%" PRIuHSIZE ", %" PRIuHSIZE ")\n",
+ start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1]);
HDprintf("original values:\n");
dataset_print(start, block, original);
HDprintf("compared values:\n");
@@ -211,149 +197,141 @@ verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
}
vrfyerrs = 0;
- for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- if(*dataset != *original){
- if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
- HDprintf("Dataset Verify failed at [%" PRIuHSIZE "][%" PRIuHSIZE "]"
- "(row %" PRIuHSIZE ", col %" PRIuHSIZE "): "
- "expect %" PRIuHSIZE ", got %" PRIuHSIZE "\n",
- i, j,
- i + start[0], j + start[1],
- *(original), *(dataset));
- }
- dataset++;
- original++;
+ for (i = 0; i < block[0]; i++) {
+ for (j = 0; j < block[1]; j++) {
+ if (*dataset != *original) {
+ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
+ HDprintf("Dataset Verify failed at [%" PRIuHSIZE "][%" PRIuHSIZE "]"
+ "(row %" PRIuHSIZE ", col %" PRIuHSIZE "): "
+ "expect %" PRIuHSIZE ", got %" PRIuHSIZE "\n",
+ i, j, i + start[0], j + start[1], *(original), *(dataset));
+ }
+ dataset++;
+ original++;
+ }
}
}
- }
- if(vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
+ if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
HDprintf("[more errors ...]\n");
- if(vrfyerrs)
+ if (vrfyerrs)
HDprintf("%d errors found in verify_data\n", vrfyerrs);
- return(vrfyerrs);
+ return (vrfyerrs);
}
/* Set up the selection */
static void
-ccslab_set(int mpi_rank,
- int mpi_size,
- hsize_t start[],
- hsize_t count[],
- hsize_t stride[],
- hsize_t block[],
- int mode)
+ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ int mode)
{
- switch (mode){
-
- case BYROW_CONT:
- /* Each process takes a slabs of rows. */
- block[0] = 1;
- block[1] = 1;
- stride[0] = 1;
- stride[1] = 1;
- count[0] = space_dim1;
- count[1] = space_dim2;
- start[0] = (hsize_t)mpi_rank*count[0];
- start[1] = 0;
-
- break;
-
- case BYROW_DISCONT:
- /* Each process takes several disjoint blocks. */
- block[0] = 1;
- block[1] = 1;
- stride[0] = 3;
- stride[1] = 3;
- count[0] = space_dim1/(stride[0]*block[0]);
- count[1] = (space_dim2)/(stride[1]*block[1]);
- start[0] = space_dim1*(hsize_t)mpi_rank;
- start[1] = 0;
-
- break;
-
- case BYROW_SELECTNONE:
- /* Each process takes a slabs of rows, there are
- no selections for the last process. */
- block[0] = 1;
- block[1] = 1;
- stride[0] = 1;
- stride[1] = 1;
- count[0] = ((mpi_rank >= MAX(1,(mpi_size-2)))?0:space_dim1);
- count[1] = space_dim2;
- start[0] = (hsize_t)mpi_rank*count[0];
- start[1] = 0;
-
- break;
-
- case BYROW_SELECTUNBALANCE:
- /* The first one-third of the number of processes only
- select top half of the domain, The rest will select the bottom
- half of the domain. */
-
- block[0] = 1;
- count[0] = 2;
- stride[0] = (hsize_t)(space_dim1*(hsize_t)mpi_size/4+1);
- block[1] = space_dim2;
- count[1] = 1;
- start[1] = 0;
- stride[1] = 1;
- if((mpi_rank *3)<(mpi_size*2)) start[0] = (hsize_t)mpi_rank;
- else start[0] = 1 + space_dim1*(hsize_t)mpi_size/2 + (hsize_t)(mpi_rank-2*mpi_size/3);
- break;
-
- case BYROW_SELECTINCHUNK:
- /* Each process will only select one chunk */
-
- block[0] = 1;
- count[0] = 1;
- start[0] = (hsize_t)mpi_rank*space_dim1;
- stride[0]= 1;
- block[1] = space_dim2;
- count[1] = 1;
- stride[1]= 1;
- start[1] = 0;
-
- break;
-
- default:
- /* Unknown mode. Set it to cover the whole dataset. */
- block[0] = space_dim1*(hsize_t)mpi_size;
- block[1] = space_dim2;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = 0;
-
- break;
+ switch (mode) {
+
+ case BYROW_CONT:
+ /* Each process takes a slabs of rows. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = space_dim1;
+ count[1] = space_dim2;
+ start[0] = (hsize_t)mpi_rank * count[0];
+ start[1] = 0;
+
+ break;
+
+ case BYROW_DISCONT:
+ /* Each process takes several disjoint blocks. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 3;
+ stride[1] = 3;
+ count[0] = space_dim1 / (stride[0] * block[0]);
+ count[1] = (space_dim2) / (stride[1] * block[1]);
+ start[0] = space_dim1 * (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ break;
+
+ case BYROW_SELECTNONE:
+ /* Each process takes a slabs of rows, there are
+ no selections for the last process. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = ((mpi_rank >= MAX(1, (mpi_size - 2))) ? 0 : space_dim1);
+ count[1] = space_dim2;
+ start[0] = (hsize_t)mpi_rank * count[0];
+ start[1] = 0;
+
+ break;
+
+ case BYROW_SELECTUNBALANCE:
+ /* The first one-third of the number of processes only
+ select top half of the domain, The rest will select the bottom
+ half of the domain. */
+
+ block[0] = 1;
+ count[0] = 2;
+ stride[0] = (hsize_t)(space_dim1 * (hsize_t)mpi_size / 4 + 1);
+ block[1] = space_dim2;
+ count[1] = 1;
+ start[1] = 0;
+ stride[1] = 1;
+ if ((mpi_rank * 3) < (mpi_size * 2))
+ start[0] = (hsize_t)mpi_rank;
+ else
+ start[0] = 1 + space_dim1 * (hsize_t)mpi_size / 2 + (hsize_t)(mpi_rank - 2 * mpi_size / 3);
+ break;
+
+ case BYROW_SELECTINCHUNK:
+ /* Each process will only select one chunk */
+
+ block[0] = 1;
+ count[0] = 1;
+ start[0] = (hsize_t)mpi_rank * space_dim1;
+ stride[0] = 1;
+ block[1] = space_dim2;
+ count[1] = 1;
+ stride[1] = 1;
+ start[1] = 0;
+
+ break;
+
+ default:
+ /* Unknown mode. Set it to cover the whole dataset. */
+ block[0] = space_dim1 * (hsize_t)mpi_size;
+ block[1] = space_dim2;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+
+ break;
}
- if (VERBOSE_MED){
- HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
- (unsigned long)(block[0]*block[1]*count[0]*count[1]));
+ if (VERBOSE_MED) {
+ HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total "
+ "datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
}
}
-
/*
* Fill the dataset with trivial data for testing.
* Assume dimension rank is 2.
*/
static void
-ccdataset_fill(hsize_t start[],
- hsize_t stride[],
- hsize_t count[],
- hsize_t block[],
- DATATYPE * dataset,
+ccdataset_fill(hsize_t start[], hsize_t stride[], hsize_t count[], hsize_t block[], DATATYPE *dataset,
int mem_selection)
{
DATATYPE *dataptr = dataset;
DATATYPE *tmptr;
- hsize_t i,j,k1,k2,k=0;
+ hsize_t i, j, k1, k2, k = 0;
/* put some trivial data in the data_array */
tmptr = dataptr;
@@ -361,23 +339,23 @@ ccdataset_fill(hsize_t start[],
through the pointer */
for (k1 = 0; k1 < count[0]; k1++) {
- for(i = 0; i < block[0]; i++) {
- for(k2 = 0; k2 < count[1]; k2++) {
- for(j = 0;j < block[1]; j++) {
+ for (i = 0; i < block[0]; i++) {
+ for (k2 = 0; k2 < count[1]; k2++) {
+ for (j = 0; j < block[1]; j++) {
- if (ALL != mem_selection) {
- dataptr = tmptr + ((start[0]+k1*stride[0]+i)*space_dim2+
- start[1]+k2*stride[1]+j);
- }
- else {
- dataptr = tmptr + k;
- k++;
- }
+ if (ALL != mem_selection) {
+ dataptr = tmptr + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] +
+ k2 * stride[1] + j);
+ }
+ else {
+ dataptr = tmptr + k;
+ k++;
+ }
- *dataptr = (DATATYPE)(k1+k2+i+j);
- }
+ *dataptr = (DATATYPE)(k1 + k2 + i + j);
+ }
+ }
}
- }
}
}
@@ -385,26 +363,24 @@ ccdataset_fill(hsize_t start[],
* Print the first block of the content of the dataset.
*/
static void
-ccdataset_print(hsize_t start[],
- hsize_t block[],
- DATATYPE * dataset)
+ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset)
{
DATATYPE *dataptr = dataset;
- hsize_t i, j;
+ hsize_t i, j;
/* print the column heading */
HDprintf("Print only the first block of the dataset\n");
HDprintf("%-8s", "Cols:");
- for (j=0; j < block[1]; j++){
- HDprintf("%3lu ", (unsigned long)(start[1]+j));
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%3lu ", (unsigned long)(start[1] + j));
}
HDprintf("\n");
/* print the slab data */
- for (i=0; i < block[0]; i++){
- HDprintf("Row %2lu: ", (unsigned long)(i+start[0]));
- for (j=0; j < block[1]; j++){
+ for (i = 0; i < block[0]; i++) {
+ HDprintf("Row %2lu: ", (unsigned long)(i + start[0]));
+ for (j = 0; j < block[1]; j++) {
HDprintf("%03d ", *dataptr++);
}
HDprintf("\n");
@@ -415,24 +391,20 @@ ccdataset_print(hsize_t start[],
* Print the content of the dataset.
*/
static int
-ccdataset_vrfy(hsize_t start[],
- hsize_t count[],
- hsize_t stride[],
- hsize_t block[],
- DATATYPE *dataset,
- DATATYPE *original,
- int mem_selection)
+ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
+ DATATYPE *original, int mem_selection)
{
- hsize_t i, j,k1,k2,k=0;
- int vrfyerrs;
- DATATYPE *dataptr,*oriptr;
+ hsize_t i, j, k1, k2, k = 0;
+ int vrfyerrs;
+ DATATYPE *dataptr, *oriptr;
/* print it if VERBOSE_MED */
if (VERBOSE_MED) {
HDprintf("dataset_vrfy dumping:::\n");
HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1]);
HDprintf("original values:\n");
ccdataset_print(start, block, original);
HDprintf("compared values:\n");
@@ -441,26 +413,25 @@ ccdataset_vrfy(hsize_t start[],
vrfyerrs = 0;
- for (k1=0;k1<count[0];k1++) {
- for(i=0;i<block[0];i++) {
- for(k2=0; k2<count[1];k2++) {
- for(j=0;j<block[1];j++) {
+ for (k1 = 0; k1 < count[0]; k1++) {
+ for (i = 0; i < block[0]; i++) {
+ for (k2 = 0; k2 < count[1]; k2++) {
+ for (j = 0; j < block[1]; j++) {
if (ALL != mem_selection) {
- dataptr = dataset + ((start[0]+k1*stride[0]+i)*space_dim2+
- start[1]+k2*stride[1]+j);
- oriptr = original + ((start[0]+k1*stride[0]+i)*space_dim2+
- start[1]+k2*stride[1]+j);
+ dataptr = dataset + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] +
+ k2 * stride[1] + j);
+ oriptr = original + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] +
+ k2 * stride[1] + j);
}
else {
dataptr = dataset + k;
- oriptr = original + k;
+ oriptr = original + k;
k++;
}
- if (*dataptr != *oriptr){
- if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
+ if (*dataptr != *oriptr) {
+ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
HDprintf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
- (unsigned long)i, (unsigned long)j,
- *(oriptr), *(dataptr));
+ (unsigned long)i, (unsigned long)j, *(oriptr), *(dataptr));
}
}
}
@@ -471,7 +442,7 @@ ccdataset_vrfy(hsize_t start[],
HDprintf("[more errors ...]\n");
if (vrfyerrs)
HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
- return(vrfyerrs);
+ return (vrfyerrs);
}
/*
@@ -487,29 +458,28 @@ static void
dataset_big_write(void)
{
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset;
- hsize_t dims[RANK]; /* dataset dim sizes */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK],stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
- hsize_t *coords = NULL;
- herr_t ret; /* Generic return value */
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- size_t num_points;
- B_DATATYPE * wdata;
-
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset;
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t * coords = NULL;
+ herr_t ret; /* Generic return value */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ size_t num_points;
+ B_DATATYPE *wdata;
/* allocate memory for data buffer */
- wdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE));
+ wdata = (B_DATATYPE *)HDmalloc(bigcount * sizeof(B_DATATYPE));
VRFY_G((wdata != NULL), "wdata malloc succeeded");
/* setup file access template */
- acc_tpl = H5Pcreate (H5P_FILE_ACCESS);
+ acc_tpl = H5Pcreate(H5P_FILE_ACCESS);
VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS");
H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL);
@@ -521,7 +491,6 @@ dataset_big_write(void)
ret = H5Pclose(acc_tpl);
VRFY_G((ret >= 0), "");
-
/* Each process takes a slabs of rows. */
if (mpi_rank_g == 0)
HDprintf("\nTesting Dataset1 write by ROW\n");
@@ -529,51 +498,50 @@ dataset_big_write(void)
dims[0] = bigcount;
dims[1] = (hsize_t)mpi_size_g;
- sid = H5Screate_simple (RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY_G((sid >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate2(fid, DATASET1, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
H5Sclose(sid);
- block[0] = dims[0]/(hsize_t)mpi_size_g;
- block[1] = dims[1];
+ block[0] = dims[0] / (hsize_t)mpi_size_g;
+ block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)mpi_rank_g*block[0];
- start[1] = 0;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank_g * block[0];
+ start[1] = 0;
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset);
+ file_dataspace = H5Dget_space(dataset);
VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY_G((mem_dataspace >= 0), "");
/* fill the local slab with some trivial data */
fill_datasets(start, block, wdata);
MESG("data_array initialized");
- if(VERBOSE_MED){
+ if (VERBOSE_MED) {
MESG("data_array created");
dataset_print(start, block, wdata);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
}
- ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
- xfer_plist, wdata);
+ ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata);
VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
/* release all temporary handles. */
@@ -584,7 +552,6 @@ dataset_big_write(void)
ret = H5Dclose(dataset);
VRFY_G((ret >= 0), "H5Dclose1 succeeded");
-
/* Each process takes a slabs of cols. */
if (mpi_rank_g == 0)
HDprintf("\nTesting Dataset2 write by COL\n");
@@ -592,51 +559,50 @@ dataset_big_write(void)
dims[0] = bigcount;
dims[1] = (hsize_t)mpi_size_g;
- sid = H5Screate_simple (RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY_G((sid >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate2(fid, DATASET2, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
H5Sclose(sid);
- block[0] = dims[0];
- block[1] = dims[1]/(hsize_t)mpi_size_g;
+ block[0] = dims[0];
+ block[1] = dims[1] / (hsize_t)mpi_size_g;
stride[0] = block[0];
stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = (hsize_t)mpi_rank_g*block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank_g * block[1];
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset);
+ file_dataspace = H5Dget_space(dataset);
VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY_G((mem_dataspace >= 0), "");
/* fill the local slab with some trivial data */
fill_datasets(start, block, wdata);
MESG("data_array initialized");
- if(VERBOSE_MED){
+ if (VERBOSE_MED) {
MESG("data_array created");
dataset_print(start, block, wdata);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
}
- ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
- xfer_plist, wdata);
+ ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata);
VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
/* release all temporary handles. */
@@ -647,8 +613,6 @@ dataset_big_write(void)
ret = H5Dclose(dataset);
VRFY_G((ret >= 0), "H5Dclose1 succeeded");
-
-
/* ALL selection */
if (mpi_rank_g == 0)
HDprintf("\nTesting Dataset3 write select ALL proc 0, NONE others\n");
@@ -656,16 +620,16 @@ dataset_big_write(void)
dims[0] = bigcount;
dims[1] = 1;
- sid = H5Screate_simple (RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY_G((sid >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate2(fid, DATASET3, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
H5Sclose(sid);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset);
+ file_dataspace = H5Dget_space(dataset);
VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
- if(mpi_rank_g == 0) {
+ if (mpi_rank_g == 0) {
ret = H5Sselect_all(file_dataspace);
VRFY_G((ret >= 0), "H5Sset_all succeeded");
}
@@ -675,32 +639,31 @@ dataset_big_write(void)
}
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, dims, NULL);
+ mem_dataspace = H5Screate_simple(RANK, dims, NULL);
VRFY_G((mem_dataspace >= 0), "");
- if(mpi_rank_g != 0) {
+ if (mpi_rank_g != 0) {
ret = H5Sselect_none(mem_dataspace);
VRFY_G((ret >= 0), "H5Sset_none succeeded");
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
}
/* fill the local slab with some trivial data */
fill_datasets(start, dims, wdata);
MESG("data_array initialized");
- if(VERBOSE_MED){
+ if (VERBOSE_MED) {
MESG("data_array created");
}
- ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
- xfer_plist, wdata);
+ ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata);
VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
/* release all temporary handles. */
@@ -718,38 +681,39 @@ dataset_big_write(void)
dims[0] = bigcount;
dims[1] = (hsize_t)(mpi_size_g * 4);
- sid = H5Screate_simple (RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY_G((sid >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate2(fid, DATASET4, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
H5Sclose(sid);
- block[0] = dims[0]/2;
- block[1] = 2;
- stride[0] = dims[0]/2;
+ block[0] = dims[0] / 2;
+ block[1] = 2;
+ stride[0] = dims[0] / 2;
stride[1] = 2;
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = dims[1]/(hsize_t)mpi_size_g * (hsize_t)mpi_rank_g;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = dims[1] / (hsize_t)mpi_size_g * (hsize_t)mpi_rank_g;
num_points = bigcount;
coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
VRFY_G((coords != NULL), "coords malloc succeeded");
- set_coords (start, count, stride, block, num_points, coords, IN_ORDER);
+ set_coords(start, count, stride, block, num_points, coords, IN_ORDER);
/* create a file dataspace */
- file_dataspace = H5Dget_space (dataset);
+ file_dataspace = H5Dget_space(dataset);
VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY_G((ret >= 0), "H5Sselect_elements succeeded");
- if(coords) free(coords);
+ if (coords)
+ free(coords);
fill_datasets(start, block, wdata);
MESG("data_array initialized");
- if(VERBOSE_MED){
+ if (VERBOSE_MED) {
MESG("data_array created");
dataset_print(start, block, wdata);
}
@@ -759,21 +723,20 @@ dataset_big_write(void)
* even if we only pass only a single value. Attempting anything else
* appears to cause problems with 32 bit compilers.
*/
- mem_dataspace = H5Screate_simple (1, dims, NULL);
+ mem_dataspace = H5Screate_simple(1, dims, NULL);
VRFY_G((mem_dataspace >= 0), "");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
}
- ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
- xfer_plist, wdata);
+ ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata);
VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
/* release all temporary handles. */
@@ -800,37 +763,37 @@ dataset_big_write(void)
static void
dataset_big_read(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset;
- B_DATATYPE *rdata = NULL; /* data buffer */
- B_DATATYPE *wdata = NULL; /* expected data buffer */
- hsize_t dims[RANK]; /* dataset dim sizes */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
- size_t num_points;
- hsize_t *coords = NULL;
- herr_t ret; /* Generic return value */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset;
+ B_DATATYPE *rdata = NULL; /* data buffer */
+ B_DATATYPE *wdata = NULL; /* expected data buffer */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+ size_t num_points;
+ hsize_t * coords = NULL;
+ herr_t ret; /* Generic return value */
/* allocate memory for data buffer */
- rdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE));
+ rdata = (B_DATATYPE *)HDmalloc(bigcount * sizeof(B_DATATYPE));
VRFY_G((rdata != NULL), "rdata malloc succeeded");
- wdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE));
+ wdata = (B_DATATYPE *)HDmalloc(bigcount * sizeof(B_DATATYPE));
VRFY_G((wdata != NULL), "wdata malloc succeeded");
- HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE));
+ HDmemset(rdata, 0, bigcount * sizeof(B_DATATYPE));
/* setup file access template */
- acc_tpl = H5Pcreate (H5P_FILE_ACCESS);
+ acc_tpl = H5Pcreate(H5P_FILE_ACCESS);
VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS");
H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL);
/* open the file collectively */
- fid=H5Fopen(FILENAME[0],H5F_ACC_RDONLY,acc_tpl);
+ fid = H5Fopen(FILENAME[0], H5F_ACC_RDONLY, acc_tpl);
VRFY_G((fid >= 0), "H5Fopen succeeded");
/* Release file-access template */
@@ -846,50 +809,52 @@ dataset_big_read(void)
dims[0] = bigcount;
dims[1] = (hsize_t)mpi_size_g;
/* Each process takes a slabs of cols. */
- block[0] = dims[0];
- block[1] = dims[1]/(hsize_t)mpi_size_g;
+ block[0] = dims[0];
+ block[1] = dims[1] / (hsize_t)mpi_size_g;
stride[0] = block[0];
stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = (hsize_t)mpi_rank_g*block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank_g * block[1];
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset);
+ file_dataspace = H5Dget_space(dataset);
VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY_G((mem_dataspace >= 0), "");
/* fill dataset with test data */
fill_datasets(start, block, wdata);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY_G((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
}
/* read data collectively */
- ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
- xfer_plist, rdata);
+ ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata);
VRFY_G((ret >= 0), "H5Dread dataset1 succeeded");
/* verify the read data with original expected data */
ret = verify_data(start, count, stride, block, rdata, wdata);
- if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
+ if (ret) {
+ HDfprintf(stderr, "verify failed\n");
+ exit(1);
+ }
/* release all temporary handles. */
H5Sclose(file_dataspace);
@@ -898,60 +863,61 @@ dataset_big_read(void)
ret = H5Dclose(dataset);
VRFY_G((ret >= 0), "H5Dclose1 succeeded");
-
if (mpi_rank_g == 0)
HDprintf("\nRead Testing Dataset2 by ROW\n");
- HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE));
+ HDmemset(rdata, 0, bigcount * sizeof(B_DATATYPE));
dataset = H5Dopen2(fid, DATASET2, H5P_DEFAULT);
VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
dims[0] = bigcount;
dims[1] = (hsize_t)mpi_size_g;
/* Each process takes a slabs of rows. */
- block[0] = dims[0]/(hsize_t)mpi_size_g;
- block[1] = dims[1];
+ block[0] = dims[0] / (hsize_t)mpi_size_g;
+ block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)mpi_rank_g*block[0];
- start[1] = 0;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank_g * block[0];
+ start[1] = 0;
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset);
+ file_dataspace = H5Dget_space(dataset);
VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY_G((mem_dataspace >= 0), "");
/* fill dataset with test data */
fill_datasets(start, block, wdata);
MESG("data_array initialized");
- if(VERBOSE_MED){
+ if (VERBOSE_MED) {
MESG("data_array created");
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY_G((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
}
/* read data collectively */
- ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
- xfer_plist, rdata);
+ ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata);
VRFY_G((ret >= 0), "H5Dread dataset2 succeeded");
/* verify the read data with original expected data */
ret = verify_data(start, count, stride, block, rdata, wdata);
- if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
+ if (ret) {
+ HDfprintf(stderr, "verify failed\n");
+ exit(1);
+ }
/* release all temporary handles. */
H5Sclose(file_dataspace);
@@ -962,7 +928,7 @@ dataset_big_read(void)
if (mpi_rank_g == 0)
HDprintf("\nRead Testing Dataset3 read select ALL proc 0, NONE others\n");
- HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE));
+ HDmemset(rdata, 0, bigcount * sizeof(B_DATATYPE));
dataset = H5Dopen2(fid, DATASET3, H5P_DEFAULT);
VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
@@ -970,9 +936,9 @@ dataset_big_read(void)
dims[1] = 1;
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset);
+ file_dataspace = H5Dget_space(dataset);
VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
- if(mpi_rank_g == 0) {
+ if (mpi_rank_g == 0) {
ret = H5Sselect_all(file_dataspace);
VRFY_G((ret >= 0), "H5Sset_all succeeded");
}
@@ -982,9 +948,9 @@ dataset_big_read(void)
}
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, dims, NULL);
+ mem_dataspace = H5Screate_simple(RANK, dims, NULL);
VRFY_G((mem_dataspace >= 0), "");
- if(mpi_rank_g != 0) {
+ if (mpi_rank_g != 0) {
ret = H5Sselect_none(mem_dataspace);
VRFY_G((ret >= 0), "H5Sset_none succeeded");
}
@@ -992,29 +958,31 @@ dataset_big_read(void)
/* fill dataset with test data */
fill_datasets(start, dims, wdata);
MESG("data_array initialized");
- if(VERBOSE_MED){
+ if (VERBOSE_MED) {
MESG("data_array created");
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY_G((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
}
/* read data collectively */
- ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
- xfer_plist, rdata);
+ ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata);
VRFY_G((ret >= 0), "H5Dread dataset3 succeeded");
- if(mpi_rank_g == 0) {
+ if (mpi_rank_g == 0) {
/* verify the read data with original expected data */
ret = verify_data(start, count, stride, block, rdata, wdata);
- if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
+ if (ret) {
+ HDfprintf(stderr, "verify failed\n");
+ exit(1);
+ }
}
/* release all temporary handles. */
@@ -1032,18 +1000,18 @@ dataset_big_read(void)
dims[0] = bigcount;
dims[1] = (hsize_t)(mpi_size_g * 4);
- block[0] = dims[0]/2;
- block[1] = 2;
- stride[0] = dims[0]/2;
+ block[0] = dims[0] / 2;
+ block[1] = 2;
+ stride[0] = dims[0] / 2;
stride[1] = 2;
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = dims[1]/(hsize_t)mpi_size_g * (hsize_t)mpi_rank_g;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = dims[1] / (hsize_t)mpi_size_g * (hsize_t)mpi_rank_g;
fill_datasets(start, block, wdata);
MESG("data_array initialized");
- if(VERBOSE_MED){
+ if (VERBOSE_MED) {
MESG("data_array created");
dataset_print(start, block, wdata);
}
@@ -1053,40 +1021,43 @@ dataset_big_read(void)
coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
VRFY_G((coords != NULL), "coords malloc succeeded");
- set_coords (start, count, stride, block, num_points, coords, IN_ORDER);
+ set_coords(start, count, stride, block, num_points, coords, IN_ORDER);
/* create a file dataspace */
- file_dataspace = H5Dget_space (dataset);
+ file_dataspace = H5Dget_space(dataset);
VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY_G((ret >= 0), "H5Sselect_elements succeeded");
- if(coords) HDfree(coords);
+ if (coords)
+ HDfree(coords);
/* create a memory dataspace */
/* Warning: H5Screate_simple requires an array of hsize_t elements
* even if we only pass only a single value. Attempting anything else
* appears to cause problems with 32 bit compilers.
*/
- mem_dataspace = H5Screate_simple (1, dims, NULL);
+ mem_dataspace = H5Screate_simple(1, dims, NULL);
VRFY_G((mem_dataspace >= 0), "");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY_G((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
}
/* read data collectively */
- ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
- xfer_plist, rdata);
+ ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata);
VRFY_G((ret >= 0), "H5Dread dataset1 succeeded");
ret = verify_data(start, count, stride, block, rdata, wdata);
- if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
+ if (ret) {
+ HDfprintf(stderr, "verify failed\n");
+ exit(1);
+ }
/* release all temporary handles. */
H5Sclose(file_dataspace);
@@ -1104,14 +1075,17 @@ dataset_big_read(void)
* expect to read it...
*/
file_dataspace = -1;
- mem_dataspace = -1;
- xfer_plist = -1;
- dataset = -1;
+ mem_dataspace = -1;
+ xfer_plist = -1;
+ dataset = -1;
/* release all temporary handles. */
- if (file_dataspace != -1) H5Sclose(file_dataspace);
- if (mem_dataspace != -1) H5Sclose(mem_dataspace);
- if (xfer_plist != -1) H5Pclose(xfer_plist);
+ if (file_dataspace != -1)
+ H5Sclose(file_dataspace);
+ if (mem_dataspace != -1)
+ H5Sclose(mem_dataspace);
+ if (xfer_plist != -1)
+ H5Pclose(xfer_plist);
if (dataset != -1) {
ret = H5Dclose(dataset);
VRFY_G((ret >= 0), "H5Dclose1 succeeded");
@@ -1119,8 +1093,10 @@ dataset_big_read(void)
H5Fclose(fid);
/* release data buffers */
- if(rdata) HDfree(rdata);
- if(wdata) HDfree(wdata);
+ if (rdata)
+ HDfree(rdata);
+ if (wdata)
+ HDfree(wdata);
} /* dataset_large_readAll */
@@ -1131,13 +1107,13 @@ single_rank_independent_io(void)
HDprintf("single_rank_independent_io\n");
if (MAIN_PROCESS) {
- hsize_t dims[] = { LARGE_DIM };
- hid_t file_id = -1;
- hid_t fapl_id = -1;
- hid_t dset_id = -1;
- hid_t fspace_id = -1;
- hid_t mspace_id = -1;
- void *data = NULL;
+ hsize_t dims[] = {LARGE_DIM};
+ hid_t file_id = -1;
+ hid_t fapl_id = -1;
+ hid_t dset_id = -1;
+ hid_t fspace_id = -1;
+ hid_t mspace_id = -1;
+ void * data = NULL;
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY_G((fapl_id >= 0), "H5P_FILE_ACCESS");
@@ -1152,8 +1128,8 @@ single_rank_independent_io(void)
/*
* Create and write to a >2GB dataset from a single rank.
*/
- dset_id = H5Dcreate2(file_id, "test_dset", H5T_NATIVE_INT, fspace_id,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, "test_dset", H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT);
VRFY_G((dset_id >= 0), "H5Dcreate2 succeeded");
@@ -1164,7 +1140,7 @@ single_rank_independent_io(void)
else
H5Sselect_none(fspace_id);
- dims[0] = LARGE_DIM;
+ dims[0] = LARGE_DIM;
mspace_id = H5Screate_simple(1, dims, NULL);
VRFY_G((mspace_id >= 0), "H5Screate_simple mspace_id succeeded");
H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, H5P_DEFAULT, data);
@@ -1177,7 +1153,6 @@ single_rank_independent_io(void)
H5Fclose(file_id);
HDremove(FILENAME[1]);
-
}
MPI_Barrier(MPI_COMM_WORLD);
}
@@ -1188,54 +1163,53 @@ single_rank_independent_io(void)
hid_t
create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
{
- hid_t ret_pl = -1;
- herr_t ret; /* generic return value */
- int mpi_rank; /* mpi variables */
+ hid_t ret_pl = -1;
+ herr_t ret; /* generic return value */
+ int mpi_rank; /* mpi variables */
/* need the rank for error checking macros */
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- ret_pl = H5Pcreate (H5P_FILE_ACCESS);
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
VRFY_G((ret_pl >= 0), "H5P_FILE_ACCESS");
if (l_facc_type == FACC_DEFAULT)
- return (ret_pl);
+ return (ret_pl);
- if (l_facc_type == FACC_MPIO){
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(ret_pl, comm, info);
- VRFY_G((ret >= 0), "");
+ if (l_facc_type == FACC_MPIO) {
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(ret_pl, comm, info);
+ VRFY_G((ret >= 0), "");
ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
- VRFY_G((ret >= 0), "");
+ VRFY_G((ret >= 0), "");
ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
- VRFY_G((ret >= 0), "");
- return(ret_pl);
+ VRFY_G((ret >= 0), "");
+ return (ret_pl);
}
- if (l_facc_type == (FACC_MPIO | FACC_SPLIT)){
- hid_t mpio_pl;
-
- mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY_G((mpio_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
- VRFY_G((ret >= 0), "");
-
- /* setup file access template */
- ret_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY_G((ret_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
- VRFY_G((ret >= 0), "H5Pset_fapl_split succeeded");
- H5Pclose(mpio_pl);
- return(ret_pl);
+ if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) {
+ hid_t mpio_pl;
+
+ mpio_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY_G((mpio_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
+ VRFY_G((ret >= 0), "");
+
+ /* setup file access template */
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY_G((ret_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
+ VRFY_G((ret >= 0), "H5Pset_fapl_split succeeded");
+ H5Pclose(mpio_pl);
+ return (ret_pl);
}
/* unknown file access types */
return (ret_pl);
}
-
/*-------------------------------------------------------------------------
* Function: coll_chunk1
*
@@ -1289,7 +1263,6 @@ coll_chunk1(void)
coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
}
-
/*-------------------------------------------------------------------------
* Function: coll_chunk2
*
@@ -1308,7 +1281,7 @@ coll_chunk1(void)
*-------------------------------------------------------------------------
*/
- /* ------------------------------------------------------------------------
+/* ------------------------------------------------------------------------
* Descriptions for the selection: many disjoint selections inside one chunk
* Two dimensions,
*
@@ -1343,7 +1316,6 @@ coll_chunk2(void)
coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, IN_ORDER);
}
-
/*-------------------------------------------------------------------------
* Function: coll_chunk3
*
@@ -1398,7 +1370,6 @@ coll_chunk3(void)
coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
}
-
//-------------------------------------------------------------------------
// Borrowed/Modified (slightly) from t_coll_chunk.c
/*-------------------------------------------------------------------------
@@ -1429,454 +1400,450 @@ coll_chunk3(void)
*/
static void
-coll_chunktest(const char* filename,
- int chunk_factor,
- int select_factor,
- int api_option,
- int file_selection,
- int mem_selection,
- int mode)
+coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, int file_selection,
+ int mem_selection, int mode)
{
- hid_t file, dataset, file_dataspace, mem_dataspace;
- hid_t acc_plist,xfer_plist,crp_plist;
+ hid_t file, dataset, file_dataspace, mem_dataspace;
+ hid_t acc_plist, xfer_plist, crp_plist;
- hsize_t dims[RANK], chunk_dims[RANK];
- int* data_array1 = NULL;
- int* data_origin1 = NULL;
+ hsize_t dims[RANK], chunk_dims[RANK];
+ int * data_array1 = NULL;
+ int * data_origin1 = NULL;
- hsize_t start[RANK],count[RANK],stride[RANK],block[RANK];
+ hsize_t start[RANK], count[RANK], stride[RANK], block[RANK];
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- unsigned prop_value;
+ unsigned prop_value;
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
- herr_t status;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- size_t num_points; /* for point selection */
- hsize_t *coords = NULL; /* for point selection */
-
- /* Create the data space */
-
- acc_plist = create_faccess_plist(comm,info,facc_type);
- VRFY_G((acc_plist >= 0),"");
-
- file = H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_plist);
- VRFY_G((file >= 0),"H5Fcreate succeeded");
-
- status = H5Pclose(acc_plist);
- VRFY_G((status >= 0),"");
-
- /* setup dimensionality object */
- dims[0] = space_dim1*(hsize_t)mpi_size_g;
- dims[1] = space_dim2;
-
- /* allocate memory for data buffer */
- data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
- VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded");
-
- /* set up dimensions of the slab this process accesses */
- ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor);
-
- /* set up the coords array selection */
- num_points = block[0] * block[1] * count[0] * count[1];
- coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
- VRFY_G((coords != NULL), "coords malloc succeeded");
- point_set(start, count, stride, block, num_points, coords, mode);
-
- /* Warning: H5Screate_simple requires an array of hsize_t elements
- * even if we only pass only a single value. Attempting anything else
- * appears to cause problems with 32 bit compilers.
- */
- file_dataspace = H5Screate_simple(2, dims, NULL);
- VRFY_G((file_dataspace >= 0), "file dataspace created succeeded");
-
- if(ALL != mem_selection) {
- mem_dataspace = H5Screate_simple(2, dims, NULL);
- VRFY_G((mem_dataspace >= 0), "mem dataspace created succeeded");
- }
- else {
- /* Putting the warning about H5Screate_simple (above) into practice... */
- hsize_t dsdims[1] = {num_points};
- mem_dataspace = H5Screate_simple (1, dsdims, NULL);
- VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded");
- }
-
- crp_plist = H5Pcreate(H5P_DATASET_CREATE);
- VRFY_G((crp_plist >= 0),"");
-
- /* Set up chunk information. */
- chunk_dims[0] = dims[0]/(hsize_t)chunk_factor;
-
- /* to decrease the testing time, maintain bigger chunk size */
- (chunk_factor == 1) ? (chunk_dims[1] = space_dim2) : (chunk_dims[1] = space_dim2/2);
- status = H5Pset_chunk(crp_plist, 2, chunk_dims);
- VRFY_G((status >= 0),"chunk creation property list succeeded");
-
- dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT,
- file_dataspace, H5P_DEFAULT, crp_plist, H5P_DEFAULT);
- VRFY_G((dataset >= 0),"dataset created succeeded");
-
- status = H5Pclose(crp_plist);
- VRFY_G((status >= 0), "");
-
- /*put some trivial data in the data array */
- ccdataset_fill(start, stride, count,block, data_array1, mem_selection);
-
- MESG("data_array initialized");
-
- switch (file_selection) {
- case HYPER:
- status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY_G((status >= 0),"hyperslab selection succeeded");
- break;
-
- case POINT:
- if (num_points) {
- status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY_G((status >= 0),"Element selection succeeded");
- }
- else {
- status = H5Sselect_none(file_dataspace);
- VRFY_G((status >= 0),"none selection succeeded");
- }
- break;
-
- case ALL:
- status = H5Sselect_all(file_dataspace);
- VRFY_G((status >= 0), "H5Sselect_all succeeded");
- break;
- }
-
- switch (mem_selection) {
- case HYPER:
- status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY_G((status >= 0),"hyperslab selection succeeded");
- break;
-
- case POINT:
- if (num_points) {
- status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY_G((status >= 0),"Element selection succeeded");
- }
- else {
- status = H5Sselect_none(mem_dataspace);
- VRFY_G((status >= 0),"none selection succeeded");
- }
- break;
-
- case ALL:
- status = H5Sselect_all(mem_dataspace);
- VRFY_G((status >= 0), "H5Sselect_all succeeded");
- break;
- }
-
- /* set up the collective transfer property list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY_G((xfer_plist >= 0), "");
-
- status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY_G((status>= 0),"MPIO collective transfer property succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((status>= 0),"set independent IO collectively succeeded");
- }
-
- switch(api_option){
- case API_LINK_HARD:
- status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_ONE_IO);
- VRFY_G((status>= 0),"collective chunk optimization succeeded");
- break;
-
- case API_MULTI_HARD:
- status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_MULTI_IO);
- VRFY_G((status>= 0),"collective chunk optimization succeeded ");
- break;
-
- case API_LINK_TRUE:
- status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,2);
- VRFY_G((status>= 0),"collective chunk optimization set chunk number succeeded");
- break;
-
- case API_LINK_FALSE:
- status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,6);
- VRFY_G((status>= 0),"collective chunk optimization set chunk number succeeded");
- break;
-
- case API_MULTI_COLL:
- status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
- VRFY_G((status>= 0),"collective chunk optimization set chunk number succeeded");
- status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,50);
- VRFY_G((status>= 0),"collective chunk optimization set chunk ratio succeeded");
- break;
-
- case API_MULTI_IND:
- status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
- VRFY_G((status>= 0),"collective chunk optimization set chunk number succeeded");
- status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,100);
- VRFY_G((status>= 0),"collective chunk optimization set chunk ratio succeeded");
- break;
-
- default:
- ;
- }
+ herr_t status;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+
+ /* Create the data space */
+
+ acc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY_G((acc_plist >= 0), "");
+
+ file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_plist);
+ VRFY_G((file >= 0), "H5Fcreate succeeded");
+
+ status = H5Pclose(acc_plist);
+ VRFY_G((status >= 0), "");
+
+ /* setup dimensionality object */
+ dims[0] = space_dim1 * (hsize_t)mpi_size_g;
+ dims[1] = space_dim2;
+
+ /* allocate memory for data buffer */
+ data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* set up dimensions of the slab this process accesses */
+ ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor);
+
+ /* set up the coords array selection */
+ num_points = block[0] * block[1] * count[0] * count[1];
+ coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
+ VRFY_G((coords != NULL), "coords malloc succeeded");
+ point_set(start, count, stride, block, num_points, coords, mode);
+
+ /* Warning: H5Screate_simple requires an array of hsize_t elements
+ * even if we only pass only a single value. Attempting anything else
+ * appears to cause problems with 32 bit compilers.
+ */
+ file_dataspace = H5Screate_simple(2, dims, NULL);
+ VRFY_G((file_dataspace >= 0), "file dataspace created succeeded");
+
+ if (ALL != mem_selection) {
+ mem_dataspace = H5Screate_simple(2, dims, NULL);
+ VRFY_G((mem_dataspace >= 0), "mem dataspace created succeeded");
+ }
+ else {
+ /* Putting the warning about H5Screate_simple (above) into practice... */
+ hsize_t dsdims[1] = {num_points};
+ mem_dataspace = H5Screate_simple(1, dsdims, NULL);
+ VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ }
+
+ crp_plist = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY_G((crp_plist >= 0), "");
+
+ /* Set up chunk information. */
+ chunk_dims[0] = dims[0] / (hsize_t)chunk_factor;
+
+ /* to decrease the testing time, maintain bigger chunk size */
+ (chunk_factor == 1) ? (chunk_dims[1] = space_dim2) : (chunk_dims[1] = space_dim2 / 2);
+ status = H5Pset_chunk(crp_plist, 2, chunk_dims);
+ VRFY_G((status >= 0), "chunk creation property list succeeded");
+
+ dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT, file_dataspace, H5P_DEFAULT,
+ crp_plist, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "dataset created succeeded");
+
+ status = H5Pclose(crp_plist);
+ VRFY_G((status >= 0), "");
+
+ /*put some trivial data in the data array */
+ ccdataset_fill(start, stride, count, block, data_array1, mem_selection);
+
+ MESG("data_array initialized");
+
+ switch (file_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY_G((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(file_dataspace);
+ VRFY_G((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(file_dataspace);
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ switch (mem_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY_G((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(mem_dataspace);
+ VRFY_G((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(mem_dataspace);
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ /* set up the collective transfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "");
+
+ status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((status >= 0), "MPIO collective transfer property succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((status >= 0), "set independent IO collectively succeeded");
+ }
+
+ switch (api_option) {
+ case API_LINK_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_ONE_IO);
+ VRFY_G((status >= 0), "collective chunk optimization succeeded");
+ break;
+
+ case API_MULTI_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_MULTI_IO);
+ VRFY_G((status >= 0), "collective chunk optimization succeeded ");
+ break;
+
+ case API_LINK_TRUE:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 2);
+ VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded");
+ break;
+
+ case API_LINK_FALSE:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 6);
+ VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded");
+ break;
+
+ case API_MULTI_COLL:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */
+ VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 50);
+ VRFY_G((status >= 0), "collective chunk optimization set chunk ratio succeeded");
+ break;
+
+ case API_MULTI_IND:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */
+ VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 100);
+ VRFY_G((status >= 0), "collective chunk optimization set chunk ratio succeeded");
+ break;
+
+ default:;
+ }
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- if(facc_type == FACC_MPIO) {
- switch(api_option) {
+ if (facc_type == FACC_MPIO) {
+ switch (api_option) {
case API_LINK_HARD:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
- NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY_G((status >= 0),"testing property list inserted succeeded");
- break;
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
case API_MULTI_HARD:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
- NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY_G((status >= 0),"testing property list inserted succeeded");
- break;
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
case API_LINK_TRUE:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
- NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY_G((status >= 0),"testing property list inserted succeeded");
- break;
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
case API_LINK_FALSE:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
- NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY_G((status >= 0),"testing property list inserted succeeded");
- break;
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
case API_MULTI_COLL:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
- NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY_G((status >= 0),"testing property list inserted succeeded");
- break;
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,
+ H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
case API_MULTI_IND:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
- NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY_G((status >= 0),"testing property list inserted succeeded");
- break;
-
- default:
- ;
- }
- }
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ default:;
+ }
+ }
#endif
- /* write data collectively */
- status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
- VRFY_G((status >= 0),"dataset write succeeded");
+ /* write data collectively */
+ status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY_G((status >= 0), "dataset write succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- if(facc_type == FACC_MPIO) {
- switch(api_option){
+ if (facc_type == FACC_MPIO) {
+ switch (api_option) {
case API_LINK_HARD:
- status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_HARD_NAME,&prop_value);
- VRFY_G((status >= 0),"testing property list get succeeded");
- VRFY_G((prop_value == 0),"API to set LINK COLLECTIVE IO directly succeeded");
- break;
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0), "API to set LINK COLLECTIVE IO directly succeeded");
+ break;
case API_MULTI_HARD:
- status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME,&prop_value);
- VRFY_G((status >= 0),"testing property list get succeeded");
- VRFY_G((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
- break;
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
+ break;
case API_LINK_TRUE:
- status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME,&prop_value);
- VRFY_G((status >= 0),"testing property list get succeeded");
- VRFY_G((prop_value == 0),"API to set LINK COLLECTIVE IO succeeded");
- break;
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0), "API to set LINK COLLECTIVE IO succeeded");
+ break;
case API_LINK_FALSE:
- status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME,&prop_value);
- VRFY_G((status >= 0),"testing property list get succeeded");
- VRFY_G((prop_value == 0),"API to set LINK IO transferring to multi-chunk IO succeeded");
- break;
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0), "API to set LINK IO transferring to multi-chunk IO succeeded");
+ break;
case API_MULTI_COLL:
- status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,&prop_value);
- VRFY_G((status >= 0),"testing property list get succeeded");
- VRFY_G((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
- break;
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
+ break;
case API_MULTI_IND:
- status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME,&prop_value);
- VRFY_G((status >= 0),"testing property list get succeeded");
- VRFY_G((prop_value == 0),"API to set MULTI-CHUNK IO transferring to independent IO succeeded");
- break;
-
- default:
- ;
- }
- }
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0),
+ "API to set MULTI-CHUNK IO transferring to independent IO succeeded");
+ break;
+
+ default:;
+ }
+ }
#endif
- status = H5Dclose(dataset);
- VRFY_G((status >= 0),"");
-
- status = H5Pclose(xfer_plist);
- VRFY_G((status >= 0),"property list closed");
-
- status = H5Sclose(file_dataspace);
- VRFY_G((status >= 0),"");
-
- status = H5Sclose(mem_dataspace);
- VRFY_G((status >= 0),"");
-
-
- status = H5Fclose(file);
- VRFY_G((status >= 0),"");
-
- if (data_array1) HDfree(data_array1);
-
- /* Use collective read to verify the correctness of collective write. */
-
- /* allocate memory for data buffer */
- data_array1 = (int *)HDmalloc(dims[0]*dims[1]*sizeof(int));
- VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded");
-
- /* allocate memory for data buffer */
- data_origin1 = (int *)HDmalloc(dims[0]*dims[1]*sizeof(int));
- VRFY_G((data_origin1 != NULL), "data_origin1 malloc succeeded");
-
- acc_plist = create_faccess_plist(comm, info, facc_type);
- VRFY_G((acc_plist >= 0),"MPIO creation property list succeeded");
-
- file = H5Fopen(FILENAME[0],H5F_ACC_RDONLY,acc_plist);
- VRFY_G((file >= 0),"H5Fcreate succeeded");
-
- status = H5Pclose(acc_plist);
- VRFY_G((status >= 0),"");
-
- /* open the collective dataset*/
- dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT);
- VRFY_G((dataset >= 0), "");
-
- /* set up dimensions of the slab this process accesses */
- ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor);
-
- /* obtain the file and mem dataspace*/
- file_dataspace = H5Dget_space (dataset);
- VRFY_G((file_dataspace >= 0), "");
-
- if (ALL != mem_selection) {
- mem_dataspace = H5Dget_space (dataset);
- VRFY_G((mem_dataspace >= 0), "");
- }
- else {
- /* Warning: H5Screate_simple requires an array of hsize_t elements
- * even if we only pass only a single value. Attempting anything else
- * appears to cause problems with 32 bit compilers.
- */
- hsize_t dsdims[1] = {num_points};
- mem_dataspace = H5Screate_simple (1, dsdims, NULL);
- VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded");
- }
-
- switch (file_selection) {
- case HYPER:
- status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY_G((status >= 0),"hyperslab selection succeeded");
- break;
-
- case POINT:
- if (num_points) {
- status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY_G((status >= 0),"Element selection succeeded");
- }
- else {
- status = H5Sselect_none(file_dataspace);
- VRFY_G((status >= 0),"none selection succeeded");
- }
- break;
-
- case ALL:
- status = H5Sselect_all(file_dataspace);
- VRFY_G((status >= 0), "H5Sselect_all succeeded");
- break;
- }
-
- switch (mem_selection) {
- case HYPER:
- status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY_G((status >= 0),"hyperslab selection succeeded");
- break;
-
- case POINT:
- if (num_points) {
- status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY_G((status >= 0),"Element selection succeeded");
- }
- else {
- status = H5Sselect_none(mem_dataspace);
- VRFY_G((status >= 0),"none selection succeeded");
- }
- break;
-
- case ALL:
- status = H5Sselect_all(mem_dataspace);
- VRFY_G((status >= 0), "H5Sselect_all succeeded");
- break;
- }
-
- /* fill dataset with test data */
- ccdataset_fill(start, stride,count,block, data_origin1, mem_selection);
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY_G((xfer_plist >= 0),"");
-
- status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY_G((status>= 0),"MPIO collective transfer property succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- status = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY_G((status>= 0),"set independent IO collectively succeeded");
- }
-
- status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
- VRFY_G((status >=0),"dataset read succeeded");
-
- /* verify the read data with original expected data */
- status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection);
- if (status) nerrors++;
-
- status = H5Pclose(xfer_plist);
- VRFY_G((status >= 0),"property list closed");
-
- /* close dataset collectively */
- status=H5Dclose(dataset);
- VRFY_G((status >= 0), "H5Dclose");
-
- /* release all IDs created */
- status = H5Sclose(file_dataspace);
- VRFY_G((status >= 0),"H5Sclose");
-
- status = H5Sclose(mem_dataspace);
- VRFY_G((status >= 0),"H5Sclose");
-
- /* close the file collectively */
- status = H5Fclose(file);
- VRFY_G((status >= 0),"H5Fclose");
-
- /* release data buffers */
- if(coords) HDfree(coords);
- if(data_array1) HDfree(data_array1);
- if(data_origin1) HDfree(data_origin1);
+ status = H5Dclose(dataset);
+ VRFY_G((status >= 0), "");
-}
+ status = H5Pclose(xfer_plist);
+ VRFY_G((status >= 0), "property list closed");
+
+ status = H5Sclose(file_dataspace);
+ VRFY_G((status >= 0), "");
+
+ status = H5Sclose(mem_dataspace);
+ VRFY_G((status >= 0), "");
+
+ status = H5Fclose(file);
+ VRFY_G((status >= 0), "");
+ if (data_array1)
+ HDfree(data_array1);
+ /* Use collective read to verify the correctness of collective write. */
+
+ /* allocate memory for data buffer */
+ data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* allocate memory for data buffer */
+ data_origin1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY_G((data_origin1 != NULL), "data_origin1 malloc succeeded");
+
+ acc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY_G((acc_plist >= 0), "MPIO creation property list succeeded");
+
+ file = H5Fopen(FILENAME[0], H5F_ACC_RDONLY, acc_plist);
+ VRFY_G((file >= 0), "H5Fcreate succeeded");
+
+ status = H5Pclose(acc_plist);
+ VRFY_G((status >= 0), "");
+
+ /* open the collective dataset*/
+ dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "");
+
+ /* set up dimensions of the slab this process accesses */
+ ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor);
+
+ /* obtain the file and mem dataspace*/
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "");
+
+ if (ALL != mem_selection) {
+ mem_dataspace = H5Dget_space(dataset);
+ VRFY_G((mem_dataspace >= 0), "");
+ }
+ else {
+ /* Warning: H5Screate_simple requires an array of hsize_t elements
+ * even if we only pass only a single value. Attempting anything else
+ * appears to cause problems with 32 bit compilers.
+ */
+ hsize_t dsdims[1] = {num_points};
+ mem_dataspace = H5Screate_simple(1, dsdims, NULL);
+ VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ }
+
+ switch (file_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY_G((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(file_dataspace);
+ VRFY_G((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(file_dataspace);
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ switch (mem_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY_G((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(mem_dataspace);
+ VRFY_G((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(mem_dataspace);
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ /* fill dataset with test data */
+ ccdataset_fill(start, stride, count, block, data_origin1, mem_selection);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "");
+
+ status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((status >= 0), "MPIO collective transfer property succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((status >= 0), "set independent IO collectively succeeded");
+ }
+
+ status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY_G((status >= 0), "dataset read succeeded");
+
+ /* verify the read data with original expected data */
+ status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection);
+ if (status)
+ nerrors++;
+
+ status = H5Pclose(xfer_plist);
+ VRFY_G((status >= 0), "property list closed");
+
+ /* close dataset collectively */
+ status = H5Dclose(dataset);
+ VRFY_G((status >= 0), "H5Dclose");
+
+ /* release all IDs created */
+ status = H5Sclose(file_dataspace);
+ VRFY_G((status >= 0), "H5Sclose");
+
+ status = H5Sclose(mem_dataspace);
+ VRFY_G((status >= 0), "H5Sclose");
+
+ /* close the file collectively */
+ status = H5Fclose(file);
+ VRFY_G((status >= 0), "H5Fclose");
+
+ /* release data buffers */
+ if (coords)
+ HDfree(coords);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_origin1)
+ HDfree(data_origin1);
+}
/*****************************************************************************
*
@@ -1907,31 +1874,26 @@ do_express_test(int world_mpi_rank)
express_test = GetTestExpress();
- result = MPI_Allreduce((void *)&express_test,
- (void *)&max_express_test,
- 1,
- MPI_INT,
- MPI_MAX,
- MPI_COMM_WORLD);
+ result =
+ MPI_Allreduce((void *)&express_test, (void *)&max_express_test, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
- if ( result != MPI_SUCCESS ) {
+ if (result != MPI_SUCCESS) {
nerrors++;
max_express_test = -1;
- if ( VERBOSE_MED && (world_mpi_rank == 0)) {
- HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n",
- world_mpi_rank, FUNC );
+ if (VERBOSE_MED && (world_mpi_rank == 0)) {
+ HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n", world_mpi_rank, FUNC);
}
}
- return(max_express_test);
+ return (max_express_test);
} /* do_express_test() */
-
-int main(int argc, char **argv)
+int
+main(int argc, char **argv)
{
- int ExpressMode = 0;
- hsize_t newsize = 1048576;
+ int ExpressMode = 0;
+ hsize_t newsize = 1048576;
/* Set the bigio processing limit to be 'newsize' bytes */
hsize_t oldsize = H5_mpi_set_bigio_count(newsize);
@@ -1941,20 +1903,20 @@ int main(int argc, char **argv)
* envoked and tested.
*/
if (newsize != oldsize) {
- bigcount = newsize * 2;
+ bigcount = newsize * 2;
}
MPI_Init(&argc, &argv);
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size_g);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank_g);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size_g);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank_g);
/* Attempt to turn off atexit post processing so that in case errors
* happen during the test and the process is aborted, it will not get
* hang in the atexit post processing in which it may try to make MPI
* calls. By then, MPI calls may not work.
*/
- if (H5dont_atexit() < 0){
- HDprintf("Failed to turn off atexit processing. Continue.\n");
+ if (H5dont_atexit() < 0) {
+ HDprintf("Failed to turn off atexit processing. Continue.\n");
};
/* set alarm. */
@@ -1969,17 +1931,17 @@ int main(int argc, char **argv)
MPI_Barrier(MPI_COMM_WORLD);
if (ExpressMode > 0) {
- if (mpi_rank_g == 0)
- HDprintf("***Express test mode on. Several tests are skipped\n");
+ if (mpi_rank_g == 0)
+ HDprintf("***Express test mode on. Several tests are skipped\n");
}
else {
- coll_chunk1();
- MPI_Barrier(MPI_COMM_WORLD);
- coll_chunk2();
- MPI_Barrier(MPI_COMM_WORLD);
- coll_chunk3();
- MPI_Barrier(MPI_COMM_WORLD);
- single_rank_independent_io();
+ coll_chunk1();
+ MPI_Barrier(MPI_COMM_WORLD);
+ coll_chunk2();
+ MPI_Barrier(MPI_COMM_WORLD);
+ coll_chunk3();
+ MPI_Barrier(MPI_COMM_WORLD);
+ single_rank_independent_io();
}
/* turn off alarm */
@@ -1995,4 +1957,3 @@ int main(int argc, char **argv)
return 0;
}
-
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index 8315c5b..e0e03f2 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -18,9 +18,9 @@
#include "testpar.h"
-#define H5AC_FRIEND /*suppress error about including H5ACpkg */
-#define H5C_FRIEND /*suppress error about including H5Cpkg */
-#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5AC_FRIEND /*suppress error about including H5ACpkg */
+#define H5C_FRIEND /*suppress error about including H5Cpkg */
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
#include "H5ACpkg.h"
#include "H5Cpkg.h"
@@ -30,58 +30,53 @@
#include "H5MFprivate.h"
#include "H5private.h"
-#define BASE_ADDR (haddr_t)1024
+#define BASE_ADDR (haddr_t)1024
-
-int nerrors = 0;
-int failures = 0;
-hbool_t verbose = TRUE; /* used to control error messages */
+int nerrors = 0;
+int failures = 0;
+hbool_t verbose = TRUE; /* used to control error messages */
#define NFILENAME 2
-const char *FILENAME[NFILENAME]={"CacheTestDummy", NULL};
+const char *FILENAME[NFILENAME] = {"CacheTestDummy", NULL};
#ifndef PATH_MAX
-#define PATH_MAX 512
-#endif /* !PATH_MAX */
+#define PATH_MAX 512
+#endif /* !PATH_MAX */
char filenames[NFILENAME][PATH_MAX];
-hid_t fapl; /* file access property list */
-haddr_t max_addr = 0; /* used to store the end of
- * the address space used by
- * the data array (see below).
- */
-hbool_t callbacks_verbose = FALSE; /* flag used to control whether
- * the callback functions are in
- * verbose mode.
- */
-
-
-int world_mpi_size = -1;
-int world_mpi_rank = -1;
-int world_server_mpi_rank = -1;
-MPI_Comm world_mpi_comm = MPI_COMM_NULL;
-int file_mpi_size = -1;
-int file_mpi_rank = -1;
-MPI_Comm file_mpi_comm = MPI_COMM_NULL;
-
+hid_t fapl; /* file access property list */
+haddr_t max_addr = 0; /* used to store the end of
+ * the address space used by
+ * the data array (see below).
+ */
+hbool_t callbacks_verbose = FALSE; /* flag used to control whether
+ * the callback functions are in
+ * verbose mode.
+ */
+
+int world_mpi_size = -1;
+int world_mpi_rank = -1;
+int world_server_mpi_rank = -1;
+MPI_Comm world_mpi_comm = MPI_COMM_NULL;
+int file_mpi_size = -1;
+int file_mpi_rank = -1;
+MPI_Comm file_mpi_comm = MPI_COMM_NULL;
/* the following globals are used to maintain rudementary statistics
* to check the validity of the statistics maintained by H5C.c
*/
-long datum_clears = 0;
-long datum_pinned_clears = 0;
-long datum_destroys = 0;
-long datum_flushes = 0;
-long datum_pinned_flushes = 0;
-long datum_loads = 0;
-long global_pins = 0;
-long global_dirty_pins = 0;
+long datum_clears = 0;
+long datum_pinned_clears = 0;
+long datum_destroys = 0;
+long datum_flushes = 0;
+long datum_pinned_flushes = 0;
+long datum_loads = 0;
+long global_pins = 0;
+long global_dirty_pins = 0;
long local_pins = 0;
-
/* the following fields are used by the server process only */
-int total_reads = 0;
-int total_writes = 0;
-
+int total_reads = 0;
+int total_writes = 0;
/*****************************************************************************
* struct datum
@@ -161,24 +156,23 @@ int total_writes = 0;
*
*****************************************************************************/
-struct datum
-{
- H5C_cache_entry_t header;
- haddr_t base_addr;
- size_t len;
- size_t local_len;
- int ver;
- hbool_t dirty;
- hbool_t valid;
- hbool_t locked;
- hbool_t global_pinned;
- hbool_t local_pinned;
- hbool_t cleared;
- hbool_t flushed;
- int reads;
- int writes;
- int index;
- struct H5AC_aux_t * aux_ptr;
+struct datum {
+ H5C_cache_entry_t header;
+ haddr_t base_addr;
+ size_t len;
+ size_t local_len;
+ int ver;
+ hbool_t dirty;
+ hbool_t valid;
+ hbool_t locked;
+ hbool_t global_pinned;
+ hbool_t local_pinned;
+ hbool_t cleared;
+ hbool_t flushed;
+ int reads;
+ int writes;
+ int index;
+ struct H5AC_aux_t *aux_ptr;
};
/*****************************************************************************
@@ -196,11 +190,10 @@ struct datum
*
*****************************************************************************/
-#define NUM_DATA_ENTRIES 100000
+#define NUM_DATA_ENTRIES 100000
struct datum data[NUM_DATA_ENTRIES];
-
/* Many tests use the size of data array as the size of test loops.
* On some machines, this results in unacceptably long test runs.
*
@@ -215,16 +208,15 @@ struct datum data[NUM_DATA_ENTRIES];
* Further, this value must be consistant across all processes.
*/
-#define STD_VIRT_NUM_DATA_ENTRIES NUM_DATA_ENTRIES
-#define EXPRESS_VIRT_NUM_DATA_ENTRIES (NUM_DATA_ENTRIES / 10)
+#define STD_VIRT_NUM_DATA_ENTRIES NUM_DATA_ENTRIES
+#define EXPRESS_VIRT_NUM_DATA_ENTRIES (NUM_DATA_ENTRIES / 10)
/* Use a smaller test size to avoid creating huge MPE logfiles. */
#ifdef H5_HAVE_MPE
-#define MPE_VIRT_NUM_DATA_ENTIES (NUM_DATA_ENTRIES / 100)
+#define MPE_VIRT_NUM_DATA_ENTIES (NUM_DATA_ENTRIES / 100)
#endif
int virt_num_data_entries = NUM_DATA_ENTRIES;
-
/*****************************************************************************
* data_index array
*
@@ -241,7 +233,6 @@ int virt_num_data_entries = NUM_DATA_ENTRIES;
int data_index[NUM_DATA_ENTRIES];
-
/*****************************************************************************
* The following two #defines are used to control code that is in turn used
* to force "POSIX" semantics on the server process used to simulate metadata
@@ -271,8 +262,7 @@ int data_index[NUM_DATA_ENTRIES];
*****************************************************************************/
#define DO_WRITE_REQ_ACK TRUE
-#define DO_SYNC_AFTER_WRITE FALSE
-
+#define DO_SYNC_AFTER_WRITE FALSE
/*****************************************************************************
* struct mssg
@@ -302,42 +292,40 @@ int data_index[NUM_DATA_ENTRIES];
*
*****************************************************************************/
-#define WRITE_REQ_CODE 0
-#define WRITE_REQ_ACK_CODE 1
-#define READ_REQ_CODE 2
-#define READ_REQ_REPLY_CODE 3
-#define SYNC_REQ_CODE 4
-#define SYNC_ACK_CODE 5
-#define REQ_TTL_WRITES_CODE 6
-#define REQ_TTL_WRITES_RPLY_CODE 7
-#define REQ_TTL_READS_CODE 8
+#define WRITE_REQ_CODE 0
+#define WRITE_REQ_ACK_CODE 1
+#define READ_REQ_CODE 2
+#define READ_REQ_REPLY_CODE 3
+#define SYNC_REQ_CODE 4
+#define SYNC_ACK_CODE 5
+#define REQ_TTL_WRITES_CODE 6
+#define REQ_TTL_WRITES_RPLY_CODE 7
+#define REQ_TTL_READS_CODE 8
#define REQ_TTL_READS_RPLY_CODE 9
#define REQ_ENTRY_WRITES_CODE 10
-#define REQ_ENTRY_WRITES_RPLY_CODE 11
-#define REQ_ENTRY_READS_CODE 12
-#define REQ_ENTRY_READS_RPLY_CODE 13
-#define REQ_RW_COUNT_RESET_CODE 14
-#define REQ_RW_COUNT_RESET_RPLY_CODE 15
-#define DONE_REQ_CODE 16
-#define MAX_REQ_CODE 16
-
-#define MSSG_MAGIC 0x1248
-
-struct mssg_t
-{
- int req;
- int src;
- int dest;
- long int mssg_num;
- haddr_t base_addr;
- unsigned len;
- int ver;
- unsigned count;
- unsigned magic;
+#define REQ_ENTRY_WRITES_RPLY_CODE 11
+#define REQ_ENTRY_READS_CODE 12
+#define REQ_ENTRY_READS_RPLY_CODE 13
+#define REQ_RW_COUNT_RESET_CODE 14
+#define REQ_RW_COUNT_RESET_RPLY_CODE 15
+#define DONE_REQ_CODE 16
+#define MAX_REQ_CODE 16
+
+#define MSSG_MAGIC 0x1248
+
+struct mssg_t {
+ int req;
+ int src;
+ int dest;
+ long int mssg_num;
+ haddr_t base_addr;
+ unsigned len;
+ int ver;
+ unsigned count;
+ unsigned magic;
};
-MPI_Datatype mpi_mssg_t; /* for MPI derived type created from mssg */
-
+MPI_Datatype mpi_mssg_t; /* for MPI derived type created from mssg */
/*****************************************************************************/
/************************** function declarations ****************************/
@@ -351,19 +339,16 @@ static void reset_stats(void);
static hbool_t set_up_file_communicator(void);
-
/* data array manipulation functions */
-static int addr_to_datum_index(haddr_t base_addr);
+static int addr_to_datum_index(haddr_t base_addr);
static void init_data(void);
-
/* test coodination related functions */
-static int do_express_test(void);
+static int do_express_test(void);
static void do_sync(void);
-static int get_max_nerrors(void);
-
+static int get_max_nerrors(void);
/* mssg xfer related functions */
@@ -372,48 +357,37 @@ static hbool_t send_mssg(struct mssg_t *mssg_ptr, hbool_t add_req_to_tag);
static hbool_t setup_derived_types(void);
static hbool_t takedown_derived_types(void);
-
/* server functions */
static hbool_t reset_server_counters(void);
static hbool_t server_main(void);
-static hbool_t serve_read_request(struct mssg_t * mssg_ptr);
-static hbool_t serve_sync_request(struct mssg_t * mssg_ptr);
-static hbool_t serve_write_request(struct mssg_t * mssg_ptr);
-static hbool_t serve_total_writes_request(struct mssg_t * mssg_ptr);
-static hbool_t serve_total_reads_request(struct mssg_t * mssg_ptr);
-static hbool_t serve_entry_writes_request(struct mssg_t * mssg_ptr);
-static hbool_t serve_entry_reads_request(struct mssg_t * mssg_ptr);
-static hbool_t serve_rw_count_reset_request(struct mssg_t * mssg_ptr);
-
+static hbool_t serve_read_request(struct mssg_t *mssg_ptr);
+static hbool_t serve_sync_request(struct mssg_t *mssg_ptr);
+static hbool_t serve_write_request(struct mssg_t *mssg_ptr);
+static hbool_t serve_total_writes_request(struct mssg_t *mssg_ptr);
+static hbool_t serve_total_reads_request(struct mssg_t *mssg_ptr);
+static hbool_t serve_entry_writes_request(struct mssg_t *mssg_ptr);
+static hbool_t serve_entry_reads_request(struct mssg_t *mssg_ptr);
+static hbool_t serve_rw_count_reset_request(struct mssg_t *mssg_ptr);
/* call back functions & related data structures */
-static herr_t datum_get_initial_load_size(void *udata_ptr,
- size_t *image_len_ptr);
+static herr_t datum_get_initial_load_size(void *udata_ptr, size_t *image_len_ptr);
-static void * datum_deserialize(const void * image_ptr,
- size_t len,
- void * udata_ptr,
- hbool_t * dirty_ptr);
+static void *datum_deserialize(const void *image_ptr, size_t len, void *udata_ptr, hbool_t *dirty_ptr);
-static herr_t datum_image_len(const void *thing,
- size_t *image_len_ptr);
+static herr_t datum_image_len(const void *thing, size_t *image_len_ptr);
-static herr_t datum_serialize(const H5F_t *f,
- void *image_ptr,
- size_t len,
- void *thing_ptr);
+static herr_t datum_serialize(const H5F_t *f, void *image_ptr, size_t len, void *thing_ptr);
static herr_t datum_notify(H5C_notify_action_t action, void *thing);
-static herr_t datum_free_icr(void * thing);
+static herr_t datum_free_icr(void *thing);
/* Masquerade as object header entries to the cache */
-#define DATUM_ENTRY_TYPE H5AC_OHDR_ID
-
-#define NUMBER_OF_ENTRY_TYPES 1
+#define DATUM_ENTRY_TYPE H5AC_OHDR_ID
+#define NUMBER_OF_ENTRY_TYPES 1
/* Note the use of the H5AC__CLASS_SKIP_READS and H5AC__CLASS_SKIP_WRITES
* flags. As a result of these flags, the metadata cache does no file I/O
@@ -429,9 +403,7 @@ static herr_t datum_free_icr(void * thing);
*
* JRM -- 1/13/15
*/
-const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
-{
- {
+const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] = {{
/* id */ DATUM_ENTRY_TYPE,
/* name */ "datum",
/* mem_type */ H5FD_MEM_OHDR,
@@ -446,48 +418,38 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
/* notify */ datum_notify,
/* free_icr */ datum_free_icr,
/* fsf_size */ NULL,
- }
-};
-
+}};
/* test utility functions */
-static void expunge_entry(H5F_t * file_ptr, int32_t idx);
-static void insert_entry(H5C_t * cache_ptr, H5F_t * file_ptr,
- int32_t idx, unsigned int flags);
-static void local_pin_and_unpin_random_entries(H5F_t * file_ptr, int min_idx,
- int max_idx, int min_count,
- int max_count);
-static void local_pin_random_entry(H5F_t * file_ptr, int min_idx, int max_idx);
-static void local_unpin_all_entries(H5F_t * file_ptr, hbool_t via_unprotect);
-static int local_unpin_next_pinned_entry(H5F_t * file_ptr, int start_idx,
- hbool_t via_unprotect);
-static void lock_and_unlock_random_entries(H5F_t * file_ptr, int min_idx, int max_idx,
- int min_count, int max_count);
-static void lock_and_unlock_random_entry(H5F_t * file_ptr,
- int min_idx, int max_idx);
-static void lock_entry(H5F_t * file_ptr, int32_t idx);
-static void mark_entry_dirty(int32_t idx);
-static void pin_entry(H5F_t * file_ptr, int32_t idx, hbool_t global, hbool_t dirty);
-static void pin_protected_entry(int32_t idx, hbool_t global);
-static void move_entry(H5F_t * file_ptr, int32_t old_idx, int32_t new_idx);
+static void expunge_entry(H5F_t *file_ptr, int32_t idx);
+static void insert_entry(H5C_t *cache_ptr, H5F_t *file_ptr, int32_t idx, unsigned int flags);
+static void local_pin_and_unpin_random_entries(H5F_t *file_ptr, int min_idx, int max_idx, int min_count,
+ int max_count);
+static void local_pin_random_entry(H5F_t *file_ptr, int min_idx, int max_idx);
+static void local_unpin_all_entries(H5F_t *file_ptr, hbool_t via_unprotect);
+static int local_unpin_next_pinned_entry(H5F_t *file_ptr, int start_idx, hbool_t via_unprotect);
+static void lock_and_unlock_random_entries(H5F_t *file_ptr, int min_idx, int max_idx, int min_count,
+ int max_count);
+static void lock_and_unlock_random_entry(H5F_t *file_ptr, int min_idx, int max_idx);
+static void lock_entry(H5F_t *file_ptr, int32_t idx);
+static void mark_entry_dirty(int32_t idx);
+static void pin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty);
+static void pin_protected_entry(int32_t idx, hbool_t global);
+static void move_entry(H5F_t *file_ptr, int32_t old_idx, int32_t new_idx);
static hbool_t reset_server_counts(void);
-static void resize_entry(int32_t idx, size_t new_size);
-static hbool_t setup_cache_for_test(hid_t * fid_ptr,
- H5F_t ** file_ptr_ptr,
- H5C_t ** cache_ptr_ptr,
- int metadata_write_strategy);
-static void setup_rand(void);
-static hbool_t take_down_cache(hid_t fid, H5C_t * cache_ptr);
+static void resize_entry(int32_t idx, size_t new_size);
+static hbool_t setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr,
+ int metadata_write_strategy);
+static void setup_rand(void);
+static hbool_t take_down_cache(hid_t fid, H5C_t *cache_ptr);
static hbool_t verify_entry_reads(haddr_t addr, int expected_entry_reads);
static hbool_t verify_entry_writes(haddr_t addr, int expected_entry_writes);
static hbool_t verify_total_reads(int expected_total_reads);
static hbool_t verify_total_writes(unsigned expected_total_writes);
-static void verify_writes(unsigned num_writes, haddr_t * written_entries_tbl);
-static void unlock_entry(H5F_t * file_ptr, int32_t type, unsigned int flags);
-static void unpin_entry(H5F_t * file_ptr, int32_t idx, hbool_t global,
- hbool_t dirty, hbool_t via_unprotect);
-
+static void verify_writes(unsigned num_writes, haddr_t *written_entries_tbl);
+static void unlock_entry(H5F_t *file_ptr, int32_t type, unsigned int flags);
+static void unpin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty, hbool_t via_unprotect);
/* test functions */
@@ -500,7 +462,6 @@ static hbool_t smoke_check_5(int metadata_write_strategy);
static hbool_t smoke_check_6(int metadata_write_strategy);
static hbool_t trace_file_check(int metadata_write_strategy);
-
/*****************************************************************************/
/****************************** stats functions ******************************/
/*****************************************************************************/
@@ -528,17 +489,12 @@ static hbool_t trace_file_check(int metadata_write_strategy);
static void
print_stats(void)
{
- HDfprintf(stdout,
- "%d: datum clears / pinned clears / destroys = %ld / %ld / %ld\n",
- world_mpi_rank, datum_clears, datum_pinned_clears,
- datum_destroys );
- HDfprintf(stdout,
- "%d: datum flushes / pinned flushes / loads = %ld / %ld / %ld\n",
- world_mpi_rank, datum_flushes, datum_pinned_flushes,
- datum_loads );
- HDfprintf(stdout,
- "%d: pins: global / global dirty / local = %ld / %ld / %ld\n",
- world_mpi_rank, global_pins, global_dirty_pins, local_pins);
+ HDfprintf(stdout, "%d: datum clears / pinned clears / destroys = %ld / %ld / %ld\n", world_mpi_rank,
+ datum_clears, datum_pinned_clears, datum_destroys);
+ HDfprintf(stdout, "%d: datum flushes / pinned flushes / loads = %ld / %ld / %ld\n", world_mpi_rank,
+ datum_flushes, datum_pinned_flushes, datum_loads);
+ HDfprintf(stdout, "%d: pins: global / global dirty / local = %ld / %ld / %ld\n", world_mpi_rank,
+ global_pins, global_dirty_pins, local_pins);
HDfflush(stdout);
return;
@@ -546,7 +502,6 @@ print_stats(void)
} /* print_stats() */
#endif /* NOT_USED */
-
/*****************************************************************************
*
* Function: reset_stats()
@@ -566,21 +521,20 @@ print_stats(void)
static void
reset_stats(void)
{
- datum_clears = 0;
- datum_pinned_clears = 0;
- datum_destroys = 0;
- datum_flushes = 0;
- datum_pinned_flushes = 0;
- datum_loads = 0;
+ datum_clears = 0;
+ datum_pinned_clears = 0;
+ datum_destroys = 0;
+ datum_flushes = 0;
+ datum_pinned_flushes = 0;
+ datum_loads = 0;
global_pins = 0;
- global_dirty_pins = 0;
- local_pins = 0;
+ global_dirty_pins = 0;
+ local_pins = 0;
return;
} /* reset_stats() */
-
/*****************************************************************************/
/**************************** MPI setup functions ****************************/
/*****************************************************************************/
@@ -607,132 +561,121 @@ reset_stats(void)
static hbool_t
set_up_file_communicator(void)
{
- hbool_t success = TRUE;
- int mpi_result;
- int num_excluded_ranks;
- int excluded_ranks[1];
+ hbool_t success = TRUE;
+ int mpi_result;
+ int num_excluded_ranks;
+ int excluded_ranks[1];
MPI_Group file_group;
MPI_Group world_group;
- if ( success ) {
+ if (success) {
mpi_result = MPI_Comm_group(world_mpi_comm, &world_group);
- if ( mpi_result != MPI_SUCCESS ) {
+ if (mpi_result != MPI_SUCCESS) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: MPI_Comm_group() failed with error %d.\n",
- world_mpi_rank, FUNC, mpi_result);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: MPI_Comm_group() failed with error %d.\n", world_mpi_rank, FUNC,
+ mpi_result);
}
}
}
- if ( success ) {
+ if (success) {
num_excluded_ranks = 1;
- excluded_ranks[0] = world_server_mpi_rank;
- mpi_result = MPI_Group_excl(world_group, num_excluded_ranks,
- excluded_ranks, &file_group);
+ excluded_ranks[0] = world_server_mpi_rank;
+ mpi_result = MPI_Group_excl(world_group, num_excluded_ranks, excluded_ranks, &file_group);
- if ( mpi_result != MPI_SUCCESS ) {
+ if (mpi_result != MPI_SUCCESS) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: MPI_Group_excl() failed with error %d.\n",
- world_mpi_rank, FUNC, mpi_result);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: MPI_Group_excl() failed with error %d.\n", world_mpi_rank, FUNC,
+ mpi_result);
}
}
}
- if ( success ) {
+ if (success) {
- mpi_result = MPI_Comm_create(world_mpi_comm, file_group,
- &file_mpi_comm);
+ mpi_result = MPI_Comm_create(world_mpi_comm, file_group, &file_mpi_comm);
- if ( mpi_result != MPI_SUCCESS ) {
+ if (mpi_result != MPI_SUCCESS) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: MPI_Comm_create() failed with error %d.\n",
- world_mpi_rank, FUNC, mpi_result);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: MPI_Comm_create() failed with error %d.\n", world_mpi_rank, FUNC,
+ mpi_result);
}
+ }
+ else {
- } else {
-
- if ( world_mpi_rank != world_server_mpi_rank ) {
+ if (world_mpi_rank != world_server_mpi_rank) {
- if ( file_mpi_comm == MPI_COMM_NULL ) {
+ if (file_mpi_comm == MPI_COMM_NULL) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: file_mpi_comm == MPI_COMM_NULL.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: file_mpi_comm == MPI_COMM_NULL.\n", world_mpi_rank, FUNC);
}
}
- } else {
+ }
+ else {
file_mpi_size = world_mpi_size - 1; /* needed by the server */
- if ( file_mpi_comm != MPI_COMM_NULL ) {
+ if (file_mpi_comm != MPI_COMM_NULL) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: file_mpi_comm != MPI_COMM_NULL.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: file_mpi_comm != MPI_COMM_NULL.\n", world_mpi_rank, FUNC);
}
}
}
}
}
- if ( ( success ) && ( world_mpi_rank != world_server_mpi_rank ) ) {
+ if ((success) && (world_mpi_rank != world_server_mpi_rank)) {
mpi_result = MPI_Comm_size(file_mpi_comm, &file_mpi_size);
- if ( mpi_result != MPI_SUCCESS ) {
+ if (mpi_result != MPI_SUCCESS) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: MPI_Comm_size() failed with error %d.\n",
- world_mpi_rank, FUNC, mpi_result);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: MPI_Comm_size() failed with error %d.\n", world_mpi_rank, FUNC,
+ mpi_result);
}
}
}
- if ( ( success ) && ( world_mpi_rank != world_server_mpi_rank ) ) {
+ if ((success) && (world_mpi_rank != world_server_mpi_rank)) {
mpi_result = MPI_Comm_rank(file_mpi_comm, &file_mpi_rank);
- if ( mpi_result != MPI_SUCCESS ) {
+ if (mpi_result != MPI_SUCCESS) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: MPI_Comm_rank() failed with error %d.\n",
- world_mpi_rank, FUNC, mpi_result);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: MPI_Comm_rank() failed with error %d.\n", world_mpi_rank, FUNC,
+ mpi_result);
}
}
}
- return(success);
+ return (success);
} /* set_up_file_communicator() */
-
/*****************************************************************************/
/******************** data array manipulation functions **********************/
/*****************************************************************************/
@@ -754,36 +697,33 @@ set_up_file_communicator(void)
static int
addr_to_datum_index(haddr_t base_addr)
{
- int top = NUM_DATA_ENTRIES - 1;
- int bottom = 0;
- int middle = (NUM_DATA_ENTRIES - 1) / 2;
+ int top = NUM_DATA_ENTRIES - 1;
+ int bottom = 0;
+ int middle = (NUM_DATA_ENTRIES - 1) / 2;
int ret_value = -1;
- while ( top >= bottom )
- {
- if ( base_addr < data[data_index[middle]].base_addr ) {
+ while (top >= bottom) {
+ if (base_addr < data[data_index[middle]].base_addr) {
- top = middle - 1;
+ top = middle - 1;
middle = (top + bottom) / 2;
-
- } else if ( base_addr > data[data_index[middle]].base_addr ) {
+ }
+ else if (base_addr > data[data_index[middle]].base_addr) {
bottom = middle + 1;
middle = (top + bottom) / 2;
-
- } else /* ( base_addr == data[data_index[middle]].base_addr ) */ {
+ }
+ else /* ( base_addr == data[data_index[middle]].base_addr ) */ {
ret_value = data_index[middle];
- bottom = top + 1; /* to force exit from while loop */
-
+ bottom = top + 1; /* to force exit from while loop */
}
}
- return(ret_value);
+ return (ret_value);
} /* addr_to_datum_index() */
-
/*****************************************************************************
*
* Function: init_data()
@@ -808,21 +748,18 @@ init_data(void)
* At present, I am using the first 20 entries of the Fibonacci
* sequence multiplied by 2. We will see how it works.
*/
- const int num_addr_offsets = 20;
- const haddr_t addr_offsets[20] = { 2, 2, 4, 6, 10,
- 16, 26, 42, 68, 110,
- 178, 288, 466, 754, 1220,
- 1974, 3194, 5168, 8362, 13539};
- int i;
- int j = 0;
- haddr_t addr = BASE_ADDR;
+ const int num_addr_offsets = 20;
+ const haddr_t addr_offsets[20] = {2, 2, 4, 6, 10, 16, 26, 42, 68, 110,
+ 178, 288, 466, 754, 1220, 1974, 3194, 5168, 8362, 13539};
+ int i;
+ int j = 0;
+ haddr_t addr = BASE_ADDR;
/* this must hold so moves don't change entry size. */
- HDassert( (NUM_DATA_ENTRIES / 2) % 20 == 0 );
- HDassert( (virt_num_data_entries / 2) % 20 == 0 );
+ HDassert((NUM_DATA_ENTRIES / 2) % 20 == 0);
+ HDassert((virt_num_data_entries / 2) % 20 == 0);
- for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
- {
+ for (i = 0; i < NUM_DATA_ENTRIES; i++) {
data[i].base_addr = addr;
data[i].len = (size_t)(addr_offsets[j]);
data[i].local_len = (size_t)(addr_offsets[j]);
@@ -830,19 +767,19 @@ init_data(void)
data[i].dirty = FALSE;
data[i].valid = FALSE;
data[i].locked = FALSE;
- data[i].global_pinned = FALSE;
- data[i].local_pinned = FALSE;
- data[i].cleared = FALSE;
+ data[i].global_pinned = FALSE;
+ data[i].local_pinned = FALSE;
+ data[i].cleared = FALSE;
data[i].flushed = FALSE;
data[i].reads = 0;
data[i].writes = 0;
- data[i].index = i;
- data[i].aux_ptr = NULL;
+ data[i].index = i;
+ data[i].aux_ptr = NULL;
- data_index[i] = i;
+ data_index[i] = i;
addr += addr_offsets[j];
- HDassert( addr > data[i].base_addr );
+ HDassert(addr > data[i].base_addr);
j = (j + 1) % num_addr_offsets;
}
@@ -854,7 +791,6 @@ init_data(void)
} /* init_data() */
-
/*****************************************************************************/
/******************** test coodination related functions *********************/
/*****************************************************************************/
@@ -888,28 +824,22 @@ do_express_test(void)
express_test = GetTestExpress();
- result = MPI_Allreduce((void *)&express_test,
- (void *)&max_express_test,
- 1,
- MPI_INT,
- MPI_MAX,
- world_mpi_comm);
+ result =
+ MPI_Allreduce((void *)&express_test, (void *)&max_express_test, 1, MPI_INT, MPI_MAX, world_mpi_comm);
- if ( result != MPI_SUCCESS ) {
+ if (result != MPI_SUCCESS) {
nerrors++;
max_express_test = -1;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n",
- world_mpi_rank, FUNC );
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n", world_mpi_rank, FUNC);
}
}
- return(max_express_test);
+ return (max_express_test);
} /* do_express_test() */
-
/*****************************************************************************
*
* Function: do_sync()
@@ -933,10 +863,10 @@ do_sync(void)
struct mssg_t mssg;
- if ( nerrors <= 0 ) {
+ if (nerrors <= 0) {
/* compose the message */
- mssg.req = SYNC_REQ_CODE;
+ mssg.req = SYNC_REQ_CODE;
mssg.src = world_mpi_rank;
mssg.dest = world_server_mpi_rank;
mssg.mssg_num = -1; /* set by send function */
@@ -946,43 +876,38 @@ do_sync(void)
mssg.count = 0;
mssg.magic = MSSG_MAGIC;
- if ( ! send_mssg(&mssg, FALSE) ) {
+ if (!send_mssg(&mssg, FALSE)) {
- nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ nerrors++;
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, FUNC);
}
}
}
- if ( nerrors <= 0 ) {
+ if (nerrors <= 0) {
- if ( ! recv_mssg(&mssg, SYNC_ACK_CODE) ) {
+ if (!recv_mssg(&mssg, SYNC_ACK_CODE)) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
}
- } else if ( ( mssg.req != SYNC_ACK_CODE ) ||
- ( mssg.src != world_server_mpi_rank ) ||
- ( mssg.dest != world_mpi_rank ) ||
- ( mssg.magic != MSSG_MAGIC ) ) {
+ }
+ else if ((mssg.req != SYNC_ACK_CODE) || (mssg.src != world_server_mpi_rank) ||
+ (mssg.dest != world_mpi_rank) || (mssg.magic != MSSG_MAGIC)) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad data in sync ack.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad data in sync ack.\n", world_mpi_rank, FUNC);
}
- }
+ }
}
return;
} /* do_sync() */
-
/*****************************************************************************
*
* Function: get_max_nerrors()
@@ -1004,28 +929,21 @@ get_max_nerrors(void)
int max_nerrors;
int result;
- result = MPI_Allreduce((void *)&nerrors,
- (void *)&max_nerrors,
- 1,
- MPI_INT,
- MPI_MAX,
- world_mpi_comm);
+ result = MPI_Allreduce((void *)&nerrors, (void *)&max_nerrors, 1, MPI_INT, MPI_MAX, world_mpi_comm);
- if ( result != MPI_SUCCESS ) {
+ if (result != MPI_SUCCESS) {
nerrors++;
max_nerrors = -1;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n",
- world_mpi_rank, FUNC );
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n", world_mpi_rank, FUNC);
}
}
- return(max_nerrors);
+ return (max_nerrors);
} /* get_max_nerrors() */
-
/*****************************************************************************/
/************************ mssg xfer related functions ************************/
/*****************************************************************************/
@@ -1050,70 +968,63 @@ get_max_nerrors(void)
*
*****************************************************************************/
-#define CACHE_TEST_TAG 99 /* different from any used by the library */
+#define CACHE_TEST_TAG 99 /* different from any used by the library */
static hbool_t
-recv_mssg(struct mssg_t *mssg_ptr,
- int mssg_tag_offset)
+recv_mssg(struct mssg_t *mssg_ptr, int mssg_tag_offset)
{
- hbool_t success = TRUE;
- int mssg_tag = CACHE_TEST_TAG;
- int result;
+ hbool_t success = TRUE;
+ int mssg_tag = CACHE_TEST_TAG;
+ int result;
MPI_Status status;
- if ( ( mssg_ptr == NULL ) ||
- ( mssg_tag_offset < 0 ) ||
- ( mssg_tag_offset> MAX_REQ_CODE ) ) {
+ if ((mssg_ptr == NULL) || (mssg_tag_offset < 0) || (mssg_tag_offset > MAX_REQ_CODE)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: bad param(s) on entry.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: bad param(s) on entry.\n", world_mpi_rank, FUNC);
}
- } else {
+ }
+ else {
mssg_tag += mssg_tag_offset;
}
- if ( success ) {
+ if (success) {
- result = MPI_Recv((void *)mssg_ptr, 1, mpi_mssg_t, MPI_ANY_SOURCE,
- mssg_tag, world_mpi_comm, &status);
+ result = MPI_Recv((void *)mssg_ptr, 1, mpi_mssg_t, MPI_ANY_SOURCE, mssg_tag, world_mpi_comm, &status);
- if ( result != MPI_SUCCESS ) {
+ if (result != MPI_SUCCESS) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: MPI_Recv() failed.\n",
- world_mpi_rank, FUNC );
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: MPI_Recv() failed.\n", world_mpi_rank, FUNC);
}
- } else if ( mssg_ptr->magic != MSSG_MAGIC ) {
+ }
+ else if (mssg_ptr->magic != MSSG_MAGIC) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: invalid magic.\n", world_mpi_rank,
- FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: invalid magic.\n", world_mpi_rank, FUNC);
}
- } else if ( mssg_ptr->src != status.MPI_SOURCE ) {
+ }
+ else if (mssg_ptr->src != status.MPI_SOURCE) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: mssg_ptr->src != status.MPI_SOURCE.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: mssg_ptr->src != status.MPI_SOURCE.\n", world_mpi_rank, FUNC);
}
}
}
- return(success);
+ return (success);
} /* recv_mssg() */
-
/*****************************************************************************
*
* Function: send_mssg()
@@ -1136,59 +1047,49 @@ recv_mssg(struct mssg_t *mssg_ptr,
*
*****************************************************************************/
static hbool_t
-send_mssg(struct mssg_t *mssg_ptr,
- hbool_t add_req_to_tag)
+send_mssg(struct mssg_t *mssg_ptr, hbool_t add_req_to_tag)
{
- hbool_t success = TRUE;
- int mssg_tag = CACHE_TEST_TAG;
- int result;
+ hbool_t success = TRUE;
+ int mssg_tag = CACHE_TEST_TAG;
+ int result;
static long mssg_num = 0;
- if ( ( mssg_ptr == NULL ) ||
- ( mssg_ptr->src != world_mpi_rank ) ||
- ( mssg_ptr->dest < 0 ) ||
- ( mssg_ptr->dest == mssg_ptr->src ) ||
- ( mssg_ptr->dest >= world_mpi_size ) ||
- ( mssg_ptr->req < 0 ) ||
- ( mssg_ptr->req > MAX_REQ_CODE ) ||
- ( mssg_ptr->magic != MSSG_MAGIC ) ) {
+ if ((mssg_ptr == NULL) || (mssg_ptr->src != world_mpi_rank) || (mssg_ptr->dest < 0) ||
+ (mssg_ptr->dest == mssg_ptr->src) || (mssg_ptr->dest >= world_mpi_size) || (mssg_ptr->req < 0) ||
+ (mssg_ptr->req > MAX_REQ_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Invalid mssg on entry.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Invalid mssg on entry.\n", world_mpi_rank, FUNC);
}
}
- if ( success ) {
+ if (success) {
mssg_ptr->mssg_num = mssg_num++;
- if ( add_req_to_tag ) {
+ if (add_req_to_tag) {
- mssg_tag += mssg_ptr->req;
- }
+ mssg_tag += mssg_ptr->req;
+ }
- result = MPI_Send((void *)mssg_ptr, 1, mpi_mssg_t,
- mssg_ptr->dest, mssg_tag, world_mpi_comm);
+ result = MPI_Send((void *)mssg_ptr, 1, mpi_mssg_t, mssg_ptr->dest, mssg_tag, world_mpi_comm);
- if ( result != MPI_SUCCESS ) {
+ if (result != MPI_SUCCESS) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: MPI_Send() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: MPI_Send() failed.\n", world_mpi_rank, FUNC);
}
}
}
- return(success);
+ return (success);
} /* send_mssg() */
-
/*****************************************************************************
*
* Function: setup_derived_types()
@@ -1206,78 +1107,72 @@ send_mssg(struct mssg_t *mssg_ptr,
static hbool_t
setup_derived_types(void)
{
- hbool_t success = TRUE;
- int i;
- int result;
- MPI_Datatype mpi_types[9] = {MPI_INT, MPI_INT, MPI_INT, MPI_LONG,
- HADDR_AS_MPI_TYPE, MPI_INT, MPI_INT,
- MPI_UNSIGNED, MPI_UNSIGNED};
- int block_len[9] = {1, 1, 1, 1, 1, 1, 1, 1, 1};
- MPI_Aint displs[9];
+ hbool_t success = TRUE;
+ int i;
+ int result;
+ MPI_Datatype mpi_types[9] = {MPI_INT, MPI_INT, MPI_INT, MPI_LONG, HADDR_AS_MPI_TYPE,
+ MPI_INT, MPI_INT, MPI_UNSIGNED, MPI_UNSIGNED};
+ int block_len[9] = {1, 1, 1, 1, 1, 1, 1, 1, 1};
+ MPI_Aint displs[9];
struct mssg_t sample; /* used to compute displacements */
/* setup the displacements array */
- if ( ( MPI_SUCCESS != MPI_Get_address(&sample.req, &displs[0]) ) ||
- ( MPI_SUCCESS != MPI_Get_address(&sample.src, &displs[1]) ) ||
- ( MPI_SUCCESS != MPI_Get_address(&sample.dest, &displs[2]) ) ||
- ( MPI_SUCCESS != MPI_Get_address(&sample.mssg_num, &displs[3]) ) ||
- ( MPI_SUCCESS != MPI_Get_address(&sample.base_addr, &displs[4]) ) ||
- ( MPI_SUCCESS != MPI_Get_address(&sample.len, &displs[5]) ) ||
- ( MPI_SUCCESS != MPI_Get_address(&sample.ver, &displs[6]) ) ||
- ( MPI_SUCCESS != MPI_Get_address(&sample.count, &displs[7]) ) ||
- ( MPI_SUCCESS != MPI_Get_address(&sample.magic, &displs[8]) ) ) {
+ if ((MPI_SUCCESS != MPI_Get_address(&sample.req, &displs[0])) ||
+ (MPI_SUCCESS != MPI_Get_address(&sample.src, &displs[1])) ||
+ (MPI_SUCCESS != MPI_Get_address(&sample.dest, &displs[2])) ||
+ (MPI_SUCCESS != MPI_Get_address(&sample.mssg_num, &displs[3])) ||
+ (MPI_SUCCESS != MPI_Get_address(&sample.base_addr, &displs[4])) ||
+ (MPI_SUCCESS != MPI_Get_address(&sample.len, &displs[5])) ||
+ (MPI_SUCCESS != MPI_Get_address(&sample.ver, &displs[6])) ||
+ (MPI_SUCCESS != MPI_Get_address(&sample.count, &displs[7])) ||
+ (MPI_SUCCESS != MPI_Get_address(&sample.magic, &displs[8]))) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: MPI_Get_address() call failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: MPI_Get_address() call failed.\n", world_mpi_rank, FUNC);
}
-
- } else {
+ }
+ else {
/* Now calculate the actual displacements */
- for ( i = 8; i >= 0; --i)
- {
+ for (i = 8; i >= 0; --i) {
displs[i] -= displs[0];
}
}
- if ( success ) {
+ if (success) {
result = MPI_Type_create_struct(9, block_len, displs, mpi_types, &mpi_mssg_t);
- if ( result != MPI_SUCCESS ) {
+ if (result != MPI_SUCCESS) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: MPI_Type_create_struct() call failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: MPI_Type_create_struct() call failed.\n", world_mpi_rank, FUNC);
}
}
}
- if ( success ) {
+ if (success) {
result = MPI_Type_commit(&mpi_mssg_t);
- if ( result != MPI_SUCCESS) {
+ if (result != MPI_SUCCESS) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: MPI_Type_commit() call failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: MPI_Type_commit() call failed.\n", world_mpi_rank, FUNC);
}
}
}
- return(success);
+ return (success);
} /* setup_derived_types */
-
/*****************************************************************************
*
* Function: takedown_derived_types()
@@ -1296,25 +1191,23 @@ static hbool_t
takedown_derived_types(void)
{
hbool_t success = TRUE;
- int result;
+ int result;
result = MPI_Type_free(&mpi_mssg_t);
- if ( result != MPI_SUCCESS ) {
+ if (result != MPI_SUCCESS) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: MPI_Type_free() call failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: MPI_Type_free() call failed.\n", world_mpi_rank, FUNC);
}
}
- return(success);
+ return (success);
} /* takedown_derived_types() */
-
/*****************************************************************************/
/***************************** server functions ******************************/
/*****************************************************************************/
@@ -1337,55 +1230,51 @@ static hbool_t
reset_server_counters(void)
{
hbool_t success = TRUE;
- int i;
- long actual_total_reads = 0;
- long actual_total_writes = 0;
+ int i;
+ long actual_total_reads = 0;
+ long actual_total_writes = 0;
- for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
- {
- if ( data[i].reads > 0 ) {
+ for (i = 0; i < NUM_DATA_ENTRIES; i++) {
+ if (data[i].reads > 0) {
actual_total_reads += data[i].reads;
data[i].reads = 0;
}
- if ( data[i].writes > 0 ) {
+ if (data[i].writes > 0) {
actual_total_writes += data[i].writes;
data[i].writes = 0;
}
}
- if ( actual_total_reads != total_reads ) {
+ if (actual_total_reads != total_reads) {
success = FALSE;
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: actual/total reads mismatch (%ld/%d).\n",
- world_mpi_rank, FUNC,
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: actual/total reads mismatch (%ld/%d).\n", world_mpi_rank, FUNC,
actual_total_reads, total_reads);
}
}
- if ( actual_total_writes != total_writes ) {
+ if (actual_total_writes != total_writes) {
success = FALSE;
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: actual/total writes mismatch (%ld/%d).\n",
- world_mpi_rank, FUNC,
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: actual/total writes mismatch (%ld/%d).\n", world_mpi_rank, FUNC,
actual_total_writes, total_writes);
}
}
- total_reads = 0;
+ total_reads = 0;
total_writes = 0;
- return(success);
+ return (success);
} /* reset_server_counters() */
-
/*****************************************************************************
*
* Function: server_main()
@@ -1412,131 +1301,126 @@ reset_server_counters(void)
static hbool_t
server_main(void)
{
- hbool_t done = FALSE;
- hbool_t success = TRUE;
- int done_count = 0;
+ hbool_t done = FALSE;
+ hbool_t success = TRUE;
+ int done_count = 0;
struct mssg_t mssg;
- if ( world_mpi_rank != world_server_mpi_rank ) {
+ if (world_mpi_rank != world_server_mpi_rank) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: This isn't the server process?!?!?\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: This isn't the server process?!?!?\n", world_mpi_rank, FUNC);
}
}
-
- while ( ( success ) && ( ! done ) )
- {
+ while ((success) && (!done)) {
success = recv_mssg(&mssg, 0);
- if ( success ) {
+ if (success) {
- switch ( mssg.req )
- {
- case WRITE_REQ_CODE:
- success = serve_write_request(&mssg);
- break;
+ switch (mssg.req) {
+ case WRITE_REQ_CODE:
+ success = serve_write_request(&mssg);
+ break;
- case WRITE_REQ_ACK_CODE:
+ case WRITE_REQ_ACK_CODE:
success = FALSE;
- if(verbose)
+ if (verbose)
HDfprintf(stdout, "%s: Received write ack?!?.\n", FUNC);
- break;
+ break;
- case READ_REQ_CODE:
+ case READ_REQ_CODE:
success = serve_read_request(&mssg);
- break;
+ break;
- case READ_REQ_REPLY_CODE:
+ case READ_REQ_REPLY_CODE:
success = FALSE;
- if(verbose)
+ if (verbose)
HDfprintf(stdout, "%s: Received read req reply?!?.\n", FUNC);
- break;
+ break;
- case SYNC_REQ_CODE:
+ case SYNC_REQ_CODE:
success = serve_sync_request(&mssg);
- break;
+ break;
- case SYNC_ACK_CODE:
+ case SYNC_ACK_CODE:
success = FALSE;
- if(verbose)
+ if (verbose)
HDfprintf(stdout, "%s: Received sync ack?!?.\n", FUNC);
- break;
+ break;
- case REQ_TTL_WRITES_CODE:
- success = serve_total_writes_request(&mssg);
- break;
+ case REQ_TTL_WRITES_CODE:
+ success = serve_total_writes_request(&mssg);
+ break;
- case REQ_TTL_WRITES_RPLY_CODE:
+ case REQ_TTL_WRITES_RPLY_CODE:
success = FALSE;
- if(verbose)
+ if (verbose)
HDfprintf(stdout, "%s: Received total writes reply?!?.\n", FUNC);
- break;
+ break;
- case REQ_TTL_READS_CODE:
- success = serve_total_reads_request(&mssg);
- break;
+ case REQ_TTL_READS_CODE:
+ success = serve_total_reads_request(&mssg);
+ break;
- case REQ_TTL_READS_RPLY_CODE:
+ case REQ_TTL_READS_RPLY_CODE:
success = FALSE;
- if(verbose)
+ if (verbose)
HDfprintf(stdout, "%s: Received total reads reply?!?.\n", FUNC);
- break;
+ break;
- case REQ_ENTRY_WRITES_CODE:
- success = serve_entry_writes_request(&mssg);
- break;
+ case REQ_ENTRY_WRITES_CODE:
+ success = serve_entry_writes_request(&mssg);
+ break;
- case REQ_ENTRY_WRITES_RPLY_CODE:
+ case REQ_ENTRY_WRITES_RPLY_CODE:
success = FALSE;
- if(verbose)
+ if (verbose)
HDfprintf(stdout, "%s: Received entry writes reply?!?.\n", FUNC);
- break;
+ break;
- case REQ_ENTRY_READS_CODE:
- success = serve_entry_reads_request(&mssg);
- break;
+ case REQ_ENTRY_READS_CODE:
+ success = serve_entry_reads_request(&mssg);
+ break;
- case REQ_ENTRY_READS_RPLY_CODE:
+ case REQ_ENTRY_READS_RPLY_CODE:
success = FALSE;
- if(verbose)
+ if (verbose)
HDfprintf(stdout, "%s: Received entry reads reply?!?.\n", FUNC);
- break;
+ break;
- case REQ_RW_COUNT_RESET_CODE:
- success = serve_rw_count_reset_request(&mssg);
- break;
+ case REQ_RW_COUNT_RESET_CODE:
+ success = serve_rw_count_reset_request(&mssg);
+ break;
- case REQ_RW_COUNT_RESET_RPLY_CODE:
+ case REQ_RW_COUNT_RESET_RPLY_CODE:
success = FALSE;
- if(verbose)
+ if (verbose)
HDfprintf(stdout, "%s: Received RW count reset reply?!?.\n", FUNC);
- break;
+ break;
- case DONE_REQ_CODE:
- done_count++;
- if(done_count >= file_mpi_size)
- done = TRUE;
- break;
+ case DONE_REQ_CODE:
+ done_count++;
+ if (done_count >= file_mpi_size)
+ done = TRUE;
+ break;
- default:
+ default:
nerrors++;
success = FALSE;
- if(verbose)
- HDfprintf(stdout, "%d:%s: Unknown request code.\n", world_mpi_rank, FUNC);
- break;
+ if (verbose)
+ HDfprintf(stdout, "%d:%s: Unknown request code.\n", world_mpi_rank, FUNC);
+ break;
}
}
}
- return(success);
+ return (success);
} /* server_main() */
-
/*****************************************************************************
*
* Function: serve_read_request()
@@ -1556,64 +1440,58 @@ server_main(void)
*
*****************************************************************************/
static hbool_t
-serve_read_request(struct mssg_t * mssg_ptr)
+serve_read_request(struct mssg_t *mssg_ptr)
{
- hbool_t report_mssg = FALSE;
- hbool_t success = TRUE;
- int target_index;
- haddr_t target_addr;
+ hbool_t report_mssg = FALSE;
+ hbool_t success = TRUE;
+ int target_index;
+ haddr_t target_addr;
struct mssg_t reply;
- if ( ( mssg_ptr == NULL ) ||
- ( mssg_ptr->req != READ_REQ_CODE ) ||
- ( mssg_ptr->magic != MSSG_MAGIC ) ) {
+ if ((mssg_ptr == NULL) || (mssg_ptr->req != READ_REQ_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, FUNC);
}
}
- if ( success ) {
+ if (success) {
- target_addr = mssg_ptr->base_addr;
+ target_addr = mssg_ptr->base_addr;
target_index = addr_to_datum_index(target_addr);
- if ( target_index < 0 ) {
+ if (target_index < 0) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: addr lookup failed for %" PRIuHADDR ".\n",
- world_mpi_rank, FUNC, target_addr);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, FUNC,
+ target_addr);
}
- } else if ( data[target_index].len != mssg_ptr->len ) {
+ }
+ else if (data[target_index].len != mssg_ptr->len) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: data[i].len = %zu != mssg->len = %d.\n",
- world_mpi_rank, FUNC,
- data[target_index].len, mssg_ptr->len);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: data[i].len = %zu != mssg->len = %d.\n", world_mpi_rank, FUNC,
+ data[target_index].len, mssg_ptr->len);
}
- } else if ( ! (data[target_index].valid) ) {
+ }
+ else if (!(data[target_index].valid)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
+ if (verbose) {
HDfprintf(stdout,
- "%d:%s: proc %d read invalid entry. "
- "idx/base_addr = %d/%" PRIuHADDR ".\n",
- world_mpi_rank, FUNC,
- mssg_ptr->src,
- target_index,
- data[target_index].base_addr);
+ "%d:%s: proc %d read invalid entry. "
+ "idx/base_addr = %d/%" PRIuHADDR ".\n",
+ world_mpi_rank, FUNC, mssg_ptr->src, target_index, data[target_index].base_addr);
}
- } else {
+ }
+ else {
/* compose the reply message */
reply.req = READ_REQ_REPLY_CODE;
@@ -1622,47 +1500,41 @@ serve_read_request(struct mssg_t * mssg_ptr)
reply.mssg_num = -1; /* set by send function */
reply.base_addr = data[target_index].base_addr;
H5_CHECKED_ASSIGN(reply.len, unsigned, data[target_index].len, size_t);
- reply.ver = data[target_index].ver;
- reply.count = 0;
- reply.magic = MSSG_MAGIC;
+ reply.ver = data[target_index].ver;
+ reply.count = 0;
+ reply.magic = MSSG_MAGIC;
- /* and update the counters */
- total_reads++;
+ /* and update the counters */
+ total_reads++;
(data[target_index].reads)++;
}
}
- if ( success ) {
+ if (success) {
success = send_mssg(&reply, TRUE);
}
- if ( report_mssg ) {
+ if (report_mssg) {
- if ( success ) {
+ if (success) {
- HDfprintf(stdout, "%d read 0x%llx. len = %d. ver = %d.\n",
- (int)(mssg_ptr->src),
- (long long)(data[target_index].base_addr),
- (int)(data[target_index].len),
+ HDfprintf(stdout, "%d read 0x%llx. len = %d. ver = %d.\n", (int)(mssg_ptr->src),
+ (long long)(data[target_index].base_addr), (int)(data[target_index].len),
(int)(data[target_index].ver));
+ }
+ else {
- } else {
-
- HDfprintf(stdout, "%d read 0x%llx FAILED. len = %d. ver = %d.\n",
- (int)(mssg_ptr->src),
- (long long)(data[target_index].base_addr),
- (int)(data[target_index].len),
+ HDfprintf(stdout, "%d read 0x%llx FAILED. len = %d. ver = %d.\n", (int)(mssg_ptr->src),
+ (long long)(data[target_index].base_addr), (int)(data[target_index].len),
(int)(data[target_index].ver));
-
}
}
- return(success);
+ return (success);
} /* serve_read_request() */
-
/*****************************************************************************
*
* Function: serve_sync_request()
@@ -1685,25 +1557,22 @@ serve_read_request(struct mssg_t * mssg_ptr)
*
*****************************************************************************/
static hbool_t
-serve_sync_request(struct mssg_t * mssg_ptr)
+serve_sync_request(struct mssg_t *mssg_ptr)
{
- hbool_t report_mssg = FALSE;
- hbool_t success = TRUE;
+ hbool_t report_mssg = FALSE;
+ hbool_t success = TRUE;
struct mssg_t reply;
- if ( ( mssg_ptr == NULL ) ||
- ( mssg_ptr->req != SYNC_REQ_CODE ) ||
- ( mssg_ptr->magic != MSSG_MAGIC ) ) {
+ if ((mssg_ptr == NULL) || (mssg_ptr->req != SYNC_REQ_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, FUNC);
}
}
- if ( success ) {
+ if (success) {
/* compose the reply message */
reply.req = SYNC_ACK_CODE;
@@ -1713,33 +1582,31 @@ serve_sync_request(struct mssg_t * mssg_ptr)
reply.base_addr = 0;
reply.len = 0;
reply.ver = 0;
- reply.count = 0;
+ reply.count = 0;
reply.magic = MSSG_MAGIC;
}
- if ( success ) {
+ if (success) {
success = send_mssg(&reply, TRUE);
}
- if ( report_mssg ) {
+ if (report_mssg) {
- if ( success ) {
+ if (success) {
HDfprintf(stdout, "%d sync.\n", (int)(mssg_ptr->src));
-
- } else {
+ }
+ else {
HDfprintf(stdout, "%d sync FAILED.\n", (int)(mssg_ptr->src));
-
}
}
- return(success);
+ return (success);
} /* serve_sync_request() */
-
/*****************************************************************************
*
* Function: serve_write_request()
@@ -1759,81 +1626,75 @@ serve_sync_request(struct mssg_t * mssg_ptr)
*
*****************************************************************************/
static hbool_t
-serve_write_request(struct mssg_t * mssg_ptr)
+serve_write_request(struct mssg_t *mssg_ptr)
{
hbool_t report_mssg = FALSE;
- hbool_t success = TRUE;
- int target_index;
- int new_ver_num = 0;
+ hbool_t success = TRUE;
+ int target_index;
+ int new_ver_num = 0;
haddr_t target_addr;
#if DO_WRITE_REQ_ACK
struct mssg_t reply;
#endif /* DO_WRITE_REQ_ACK */
- if ( ( mssg_ptr == NULL ) ||
- ( mssg_ptr->req != WRITE_REQ_CODE ) ||
- ( mssg_ptr->magic != MSSG_MAGIC ) ) {
+ if ((mssg_ptr == NULL) || (mssg_ptr->req != WRITE_REQ_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, FUNC);
}
}
- if ( success ) {
+ if (success) {
- target_addr = mssg_ptr->base_addr;
+ target_addr = mssg_ptr->base_addr;
target_index = addr_to_datum_index(target_addr);
- if ( target_index < 0 ) {
+ if (target_index < 0) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: addr lookup failed for %" PRIuHADDR ".\n",
- world_mpi_rank, FUNC, target_addr);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, FUNC,
+ target_addr);
}
- } else if ( data[target_index].len != mssg_ptr->len ) {
+ }
+ else if (data[target_index].len != mssg_ptr->len) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: data[i].len = %zu != mssg->len = %d.\n",
- world_mpi_rank, FUNC,
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: data[i].len = %zu != mssg->len = %d.\n", world_mpi_rank, FUNC,
data[target_index].len, mssg_ptr->len);
}
}
}
- if ( success ) {
+ if (success) {
new_ver_num = mssg_ptr->ver;
/* this check should catch duplicate writes */
- if ( new_ver_num <= data[target_index].ver ) {
+ if (new_ver_num <= data[target_index].ver) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: new ver = %d <= old ver = %d.\n",
- world_mpi_rank, FUNC,
- new_ver_num, data[target_index].ver);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: new ver = %d <= old ver = %d.\n", world_mpi_rank, FUNC, new_ver_num,
+ data[target_index].ver);
}
}
}
- if ( success ) {
+ if (success) {
- /* process the write */
- data[target_index].ver = new_ver_num;
+ /* process the write */
+ data[target_index].ver = new_ver_num;
data[target_index].valid = TRUE;
/* and update the counters */
- total_writes++;
+ total_writes++;
(data[target_index].writes)++;
#if DO_WRITE_REQ_ACK
@@ -1845,43 +1706,36 @@ serve_write_request(struct mssg_t * mssg_ptr)
reply.mssg_num = -1; /* set by send function */
reply.base_addr = data[target_index].base_addr;
H5_CHECKED_ASSIGN(reply.len, unsigned, data[target_index].len, size_t);
- reply.ver = data[target_index].ver;
- reply.count = 0;
- reply.magic = MSSG_MAGIC;
+ reply.ver = data[target_index].ver;
+ reply.count = 0;
+ reply.magic = MSSG_MAGIC;
- /* and send it */
+ /* and send it */
success = send_mssg(&reply, TRUE);
#endif /* DO_WRITE_REQ_ACK */
-
}
- if ( report_mssg ) {
+ if (report_mssg) {
- if ( success ) {
+ if (success) {
- HDfprintf(stdout, "%d write 0x%llx. len = %d. ver = %d.\n",
- (int)(mssg_ptr->src),
- (long long)(data[target_index].base_addr),
- (int)(data[target_index].len),
+ HDfprintf(stdout, "%d write 0x%llx. len = %d. ver = %d.\n", (int)(mssg_ptr->src),
+ (long long)(data[target_index].base_addr), (int)(data[target_index].len),
(int)(data[target_index].ver));
+ }
+ else {
- } else {
-
- HDfprintf(stdout, "%d write 0x%llx FAILED. len = %d. ver = %d.\n",
- (int)(mssg_ptr->src),
- (long long)(data[target_index].base_addr),
- (int)(data[target_index].len),
+ HDfprintf(stdout, "%d write 0x%llx FAILED. len = %d. ver = %d.\n", (int)(mssg_ptr->src),
+ (long long)(data[target_index].base_addr), (int)(data[target_index].len),
(int)(data[target_index].ver));
-
}
}
- return(success);
+ return (success);
} /* serve_write_request() */
-
/*****************************************************************************
*
* Function: serve_total_writes_request()
@@ -1902,25 +1756,22 @@ serve_write_request(struct mssg_t * mssg_ptr)
*
*****************************************************************************/
static hbool_t
-serve_total_writes_request(struct mssg_t * mssg_ptr)
+serve_total_writes_request(struct mssg_t *mssg_ptr)
{
- hbool_t report_mssg = FALSE;
- hbool_t success = TRUE;
+ hbool_t report_mssg = FALSE;
+ hbool_t success = TRUE;
struct mssg_t reply;
- if ( ( mssg_ptr == NULL ) ||
- ( mssg_ptr->req != REQ_TTL_WRITES_CODE ) ||
- ( mssg_ptr->magic != MSSG_MAGIC ) ) {
+ if ((mssg_ptr == NULL) || (mssg_ptr->req != REQ_TTL_WRITES_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, FUNC);
}
}
- if ( success ) {
+ if (success) {
/* compose the reply message */
reply.req = REQ_TTL_WRITES_RPLY_CODE;
@@ -1934,33 +1785,27 @@ serve_total_writes_request(struct mssg_t * mssg_ptr)
reply.magic = MSSG_MAGIC;
}
- if ( success ) {
+ if (success) {
success = send_mssg(&reply, TRUE);
}
- if ( report_mssg ) {
-
- if ( success ) {
-
- HDfprintf(stdout, "%d request total writes %d.\n",
- (int)(mssg_ptr->src),
- total_writes);
+ if (report_mssg) {
- } else {
+ if (success) {
- HDfprintf(stdout, "%d request total writes %d -- FAILED.\n",
- (int)(mssg_ptr->src),
- total_writes);
+ HDfprintf(stdout, "%d request total writes %d.\n", (int)(mssg_ptr->src), total_writes);
+ }
+ else {
+ HDfprintf(stdout, "%d request total writes %d -- FAILED.\n", (int)(mssg_ptr->src), total_writes);
}
}
- return(success);
+ return (success);
} /* serve_total_writes_request() */
-
/*****************************************************************************
*
* Function: serve_total_reads_request()
@@ -1981,25 +1826,22 @@ serve_total_writes_request(struct mssg_t * mssg_ptr)
*
*****************************************************************************/
static hbool_t
-serve_total_reads_request(struct mssg_t * mssg_ptr)
+serve_total_reads_request(struct mssg_t *mssg_ptr)
{
- hbool_t report_mssg = FALSE;
- hbool_t success = TRUE;
+ hbool_t report_mssg = FALSE;
+ hbool_t success = TRUE;
struct mssg_t reply;
- if ( ( mssg_ptr == NULL ) ||
- ( mssg_ptr->req != REQ_TTL_READS_CODE ) ||
- ( mssg_ptr->magic != MSSG_MAGIC ) ) {
+ if ((mssg_ptr == NULL) || (mssg_ptr->req != REQ_TTL_READS_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, FUNC);
}
}
- if ( success ) {
+ if (success) {
/* compose the reply message */
reply.req = REQ_TTL_READS_RPLY_CODE;
@@ -2013,33 +1855,27 @@ serve_total_reads_request(struct mssg_t * mssg_ptr)
reply.magic = MSSG_MAGIC;
}
- if ( success ) {
+ if (success) {
success = send_mssg(&reply, TRUE);
}
- if ( report_mssg ) {
-
- if ( success ) {
-
- HDfprintf(stdout, "%d request total reads %d.\n",
- (int)(mssg_ptr->src),
- total_reads);
+ if (report_mssg) {
- } else {
+ if (success) {
- HDfprintf(stdout, "%d request total reads %d -- FAILED.\n",
- (int)(mssg_ptr->src),
- total_reads);
+ HDfprintf(stdout, "%d request total reads %d.\n", (int)(mssg_ptr->src), total_reads);
+ }
+ else {
+ HDfprintf(stdout, "%d request total reads %d -- FAILED.\n", (int)(mssg_ptr->src), total_reads);
}
}
- return(success);
+ return (success);
} /* serve_total_reads_request() */
-
/*****************************************************************************
*
* Function: serve_entry_writes_request()
@@ -2060,41 +1896,38 @@ serve_total_reads_request(struct mssg_t * mssg_ptr)
*
*****************************************************************************/
static hbool_t
-serve_entry_writes_request(struct mssg_t * mssg_ptr)
+serve_entry_writes_request(struct mssg_t *mssg_ptr)
{
- hbool_t report_mssg = FALSE;
- hbool_t success = TRUE;
- int target_index;
- haddr_t target_addr;
+ hbool_t report_mssg = FALSE;
+ hbool_t success = TRUE;
+ int target_index;
+ haddr_t target_addr;
struct mssg_t reply;
- if ( ( mssg_ptr == NULL ) ||
- ( mssg_ptr->req != REQ_ENTRY_WRITES_CODE ) ||
- ( mssg_ptr->magic != MSSG_MAGIC ) ) {
+ if ((mssg_ptr == NULL) || (mssg_ptr->req != REQ_ENTRY_WRITES_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, FUNC);
}
}
- if ( success ) {
+ if (success) {
- target_addr = mssg_ptr->base_addr;
+ target_addr = mssg_ptr->base_addr;
target_index = addr_to_datum_index(target_addr);
- if ( target_index < 0 ) {
+ if (target_index < 0) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: addr lookup failed for %" PRIuHADDR ".\n",
- world_mpi_rank, FUNC, target_addr);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, FUNC,
+ target_addr);
}
- } else {
+ }
+ else {
/* compose the reply message */
reply.req = REQ_ENTRY_WRITES_RPLY_CODE;
@@ -2109,35 +1942,29 @@ serve_entry_writes_request(struct mssg_t * mssg_ptr)
}
}
- if ( success ) {
+ if (success) {
success = send_mssg(&reply, TRUE);
}
- if ( report_mssg ) {
+ if (report_mssg) {
- if ( success ) {
+ if (success) {
- HDfprintf(stdout, "%d request entry 0x%llx writes = %ld.\n",
- (int)(mssg_ptr->src),
- (long long)(data[target_index].base_addr),
- (long)(data[target_index].writes));
-
- } else {
-
- HDfprintf(stdout, "%d request entry 0x%llx writes = %ld FAILED.\n",
- (int)(mssg_ptr->src),
- (long long)(data[target_index].base_addr),
- (long)(data[target_index].writes));
+ HDfprintf(stdout, "%d request entry 0x%llx writes = %ld.\n", (int)(mssg_ptr->src),
+ (long long)(data[target_index].base_addr), (long)(data[target_index].writes));
+ }
+ else {
+ HDfprintf(stdout, "%d request entry 0x%llx writes = %ld FAILED.\n", (int)(mssg_ptr->src),
+ (long long)(data[target_index].base_addr), (long)(data[target_index].writes));
}
}
- return(success);
+ return (success);
} /* serve_entry_writes_request() */
-
/*****************************************************************************
*
* Function: serve_entry_reads_request()
@@ -2158,41 +1985,38 @@ serve_entry_writes_request(struct mssg_t * mssg_ptr)
*
*****************************************************************************/
static hbool_t
-serve_entry_reads_request(struct mssg_t * mssg_ptr)
+serve_entry_reads_request(struct mssg_t *mssg_ptr)
{
- hbool_t report_mssg = FALSE;
- hbool_t success = TRUE;
- int target_index;
- haddr_t target_addr;
+ hbool_t report_mssg = FALSE;
+ hbool_t success = TRUE;
+ int target_index;
+ haddr_t target_addr;
struct mssg_t reply;
- if ( ( mssg_ptr == NULL ) ||
- ( mssg_ptr->req != REQ_ENTRY_READS_CODE ) ||
- ( mssg_ptr->magic != MSSG_MAGIC ) ) {
+ if ((mssg_ptr == NULL) || (mssg_ptr->req != REQ_ENTRY_READS_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, FUNC);
}
}
- if ( success ) {
+ if (success) {
- target_addr = mssg_ptr->base_addr;
+ target_addr = mssg_ptr->base_addr;
target_index = addr_to_datum_index(target_addr);
- if ( target_index < 0 ) {
+ if (target_index < 0) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: addr lookup failed for %" PRIuHADDR ".\n",
- world_mpi_rank, FUNC, target_addr);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, FUNC,
+ target_addr);
}
- } else {
+ }
+ else {
/* compose the reply message */
reply.req = REQ_ENTRY_READS_RPLY_CODE;
@@ -2207,35 +2031,29 @@ serve_entry_reads_request(struct mssg_t * mssg_ptr)
}
}
- if ( success ) {
+ if (success) {
success = send_mssg(&reply, TRUE);
}
- if ( report_mssg ) {
-
- if ( success ) {
-
- HDfprintf(stdout, "%d request entry 0x%llx reads = %ld.\n",
- (int)(mssg_ptr->src),
- (long long)(data[target_index].base_addr),
- (long)(data[target_index].reads));
+ if (report_mssg) {
- } else {
+ if (success) {
- HDfprintf(stdout, "%d request entry 0x%llx reads = %ld FAILED.\n",
- (int)(mssg_ptr->src),
- (long long)(data[target_index].base_addr),
- (long)(data[target_index].reads));
+ HDfprintf(stdout, "%d request entry 0x%llx reads = %ld.\n", (int)(mssg_ptr->src),
+ (long long)(data[target_index].base_addr), (long)(data[target_index].reads));
+ }
+ else {
+ HDfprintf(stdout, "%d request entry 0x%llx reads = %ld FAILED.\n", (int)(mssg_ptr->src),
+ (long long)(data[target_index].base_addr), (long)(data[target_index].reads));
}
}
- return(success);
+ return (success);
} /* serve_entry_reads_request() */
-
/*****************************************************************************
*
* Function: serve_rw_count_reset_request()
@@ -2255,30 +2073,27 @@ serve_entry_reads_request(struct mssg_t * mssg_ptr)
*
*****************************************************************************/
static hbool_t
-serve_rw_count_reset_request(struct mssg_t * mssg_ptr)
+serve_rw_count_reset_request(struct mssg_t *mssg_ptr)
{
- hbool_t report_mssg = FALSE;
- hbool_t success = TRUE;
+ hbool_t report_mssg = FALSE;
+ hbool_t success = TRUE;
struct mssg_t reply;
- if ( ( mssg_ptr == NULL ) ||
- ( mssg_ptr->req != REQ_RW_COUNT_RESET_CODE ) ||
- ( mssg_ptr->magic != MSSG_MAGIC ) ) {
+ if ((mssg_ptr == NULL) || (mssg_ptr->req != REQ_RW_COUNT_RESET_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, FUNC);
}
}
- if ( success ) {
+ if (success) {
success = reset_server_counters();
}
- if ( success ) {
+ if (success) {
/* compose the reply message */
reply.req = REQ_RW_COUNT_RESET_RPLY_CODE;
@@ -2292,36 +2107,31 @@ serve_rw_count_reset_request(struct mssg_t * mssg_ptr)
reply.magic = MSSG_MAGIC;
}
- if ( success ) {
+ if (success) {
success = send_mssg(&reply, TRUE);
}
- if ( report_mssg ) {
-
- if ( success ) {
+ if (report_mssg) {
- HDfprintf(stdout, "%d request R/W counter reset.\n",
- (int)(mssg_ptr->src));
+ if (success) {
- } else {
-
- HDfprintf(stdout, "%d request R/w counter reset FAILED.\n",
- (int)(mssg_ptr->src));
+ HDfprintf(stdout, "%d request R/W counter reset.\n", (int)(mssg_ptr->src));
+ }
+ else {
+ HDfprintf(stdout, "%d request R/w counter reset FAILED.\n", (int)(mssg_ptr->src));
}
}
- return(success);
+ return (success);
} /* serve_rw_count_reset_request() */
-
/*****************************************************************************/
/**************************** Call back functions ****************************/
/*****************************************************************************/
-
/*-------------------------------------------------------------------------
* Function: datum_get_initial_load_size
*
@@ -2337,40 +2147,38 @@ serve_rw_count_reset_request(struct mssg_t * mssg_ptr)
static herr_t
datum_get_initial_load_size(void *udata_ptr, size_t *image_len_ptr)
{
- haddr_t addr = *(haddr_t *)udata_ptr;
- int idx;
- struct datum * entry_ptr;
+ haddr_t addr = *(haddr_t *)udata_ptr;
+ int idx;
+ struct datum *entry_ptr;
- HDassert( udata_ptr );
- HDassert( image_len_ptr );
+ HDassert(udata_ptr);
+ HDassert(image_len_ptr);
idx = addr_to_datum_index(addr);
- HDassert( idx >= 0 );
- HDassert( idx < NUM_DATA_ENTRIES );
- HDassert( idx < virt_num_data_entries );
+ HDassert(idx >= 0);
+ HDassert(idx < NUM_DATA_ENTRIES);
+ HDassert(idx < virt_num_data_entries);
entry_ptr = &(data[idx]);
- HDassert( addr == entry_ptr->base_addr );
- HDassert( ! entry_ptr->global_pinned );
- HDassert( ! entry_ptr->local_pinned );
+ HDassert(addr == entry_ptr->base_addr);
+ HDassert(!entry_ptr->global_pinned);
+ HDassert(!entry_ptr->local_pinned);
- if ( callbacks_verbose ) {
+ if (callbacks_verbose) {
- HDfprintf(stdout,
- "%d: get_initial_load_size() idx = %d, addr = %ld, len = %d.\n",
- world_mpi_rank, idx, (long)addr, (int)entry_ptr->local_len);
- fflush(stdout);
+ HDfprintf(stdout, "%d: get_initial_load_size() idx = %d, addr = %ld, len = %d.\n", world_mpi_rank,
+ idx, (long)addr, (int)entry_ptr->local_len);
+ fflush(stdout);
}
/* Set image length size */
*image_len_ptr = entry_ptr->local_len;
- return(SUCCEED);
+ return (SUCCEED);
} /* get_initial_load_size() */
-
/*-------------------------------------------------------------------------
* Function: datum_deserialize
*
@@ -2384,54 +2192,48 @@ datum_get_initial_load_size(void *udata_ptr, size_t *image_len_ptr)
*-------------------------------------------------------------------------
*/
static void *
-datum_deserialize(const void H5_ATTR_NDEBUG_UNUSED *image_ptr,
- H5_ATTR_UNUSED size_t len,
- void * udata_ptr,
- hbool_t * dirty_ptr)
+datum_deserialize(const void H5_ATTR_NDEBUG_UNUSED *image_ptr, H5_ATTR_UNUSED size_t len, void *udata_ptr,
+ hbool_t *dirty_ptr)
{
- haddr_t addr = *(haddr_t *)udata_ptr;
- hbool_t success = TRUE;
- int idx;
- struct datum * entry_ptr = NULL;
+ haddr_t addr = *(haddr_t *)udata_ptr;
+ hbool_t success = TRUE;
+ int idx;
+ struct datum *entry_ptr = NULL;
- HDassert( image_ptr != NULL );
+ HDassert(image_ptr != NULL);
idx = addr_to_datum_index(addr);
- HDassert( idx >= 0 );
- HDassert( idx < NUM_DATA_ENTRIES );
- HDassert( idx < virt_num_data_entries );
+ HDassert(idx >= 0);
+ HDassert(idx < NUM_DATA_ENTRIES);
+ HDassert(idx < virt_num_data_entries);
entry_ptr = &(data[idx]);
- HDassert( addr == entry_ptr->base_addr );
- HDassert( ! entry_ptr->global_pinned );
- HDassert( ! entry_ptr->local_pinned );
+ HDassert(addr == entry_ptr->base_addr);
+ HDassert(!entry_ptr->global_pinned);
+ HDassert(!entry_ptr->local_pinned);
- HDassert( dirty_ptr );
+ HDassert(dirty_ptr);
- if ( callbacks_verbose ) {
+ if (callbacks_verbose) {
- HDfprintf(stdout,
- "%d: deserialize() idx = %d, addr = %ld, len = %d, is_dirty = %d.\n",
- world_mpi_rank, idx, (long)addr, (int)len,
- (int)(entry_ptr->header.is_dirty));
- fflush(stdout);
+ HDfprintf(stdout, "%d: deserialize() idx = %d, addr = %ld, len = %d, is_dirty = %d.\n",
+ world_mpi_rank, idx, (long)addr, (int)len, (int)(entry_ptr->header.is_dirty));
+ fflush(stdout);
}
*dirty_ptr = FALSE;
- if ( ! success ) {
+ if (!success) {
entry_ptr = NULL;
-
}
- return(entry_ptr);
+ return (entry_ptr);
} /* deserialize() */
-
/*-------------------------------------------------------------------------
* Function: datum_image_len
*
@@ -2450,39 +2252,36 @@ datum_deserialize(const void H5_ATTR_NDEBUG_UNUSED *image_ptr,
static herr_t
datum_image_len(const void *thing, size_t *image_len)
{
- int idx;
- struct datum * entry_ptr;
+ int idx;
+ struct datum *entry_ptr;
- HDassert( thing );
- HDassert( image_len );
+ HDassert(thing);
+ HDassert(image_len);
entry_ptr = (struct datum *)thing;
idx = addr_to_datum_index(entry_ptr->base_addr);
- HDassert( idx >= 0 );
- HDassert( idx < NUM_DATA_ENTRIES );
- HDassert( idx < virt_num_data_entries );
- HDassert( &(data[idx]) == entry_ptr );
- HDassert( entry_ptr->local_len > 0 );
- HDassert( entry_ptr->local_len <= entry_ptr->len );
+ HDassert(idx >= 0);
+ HDassert(idx < NUM_DATA_ENTRIES);
+ HDassert(idx < virt_num_data_entries);
+ HDassert(&(data[idx]) == entry_ptr);
+ HDassert(entry_ptr->local_len > 0);
+ HDassert(entry_ptr->local_len <= entry_ptr->len);
- if(callbacks_verbose) {
- HDfprintf(stdout,
- "%d: image_len() idx = %d, addr = %ld, len = %d.\n",
- world_mpi_rank, idx, (long)(entry_ptr->base_addr),
- (int)(entry_ptr->local_len));
- fflush(stdout);
+ if (callbacks_verbose) {
+ HDfprintf(stdout, "%d: image_len() idx = %d, addr = %ld, len = %d.\n", world_mpi_rank, idx,
+ (long)(entry_ptr->base_addr), (int)(entry_ptr->local_len));
+ fflush(stdout);
}
- HDassert( entry_ptr->header.addr == entry_ptr->base_addr );
+ HDassert(entry_ptr->header.addr == entry_ptr->base_addr);
*image_len = entry_ptr->local_len;
- return(SUCCEED);
+ return (SUCCEED);
} /* datum_image_len() */
-
/*-------------------------------------------------------------------------
* Function: datum_serialize
*
@@ -2496,68 +2295,62 @@ datum_image_len(const void *thing, size_t *image_len)
*-------------------------------------------------------------------------
*/
static herr_t
-datum_serialize(const H5F_t *f,
- void H5_ATTR_NDEBUG_UNUSED *image_ptr,
- size_t len,
- void *thing_ptr)
+datum_serialize(const H5F_t *f, void H5_ATTR_NDEBUG_UNUSED *image_ptr, size_t len, void *thing_ptr)
{
- herr_t ret_value = SUCCEED;
- int idx;
- struct datum * entry_ptr;
- struct H5AC_aux_t * aux_ptr;
+ herr_t ret_value = SUCCEED;
+ int idx;
+ struct datum * entry_ptr;
+ struct H5AC_aux_t *aux_ptr;
- HDassert( thing_ptr );
- HDassert( image_ptr );
+ HDassert(thing_ptr);
+ HDassert(image_ptr);
entry_ptr = (struct datum *)thing_ptr;
- HDassert( f );
- HDassert( f->shared );
- HDassert( f->shared->cache );
- HDassert( f->shared->cache->magic == H5C__H5C_T_MAGIC );
- HDassert( f->shared->cache->aux_ptr );
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+ HDassert(f->shared->cache->magic == H5C__H5C_T_MAGIC);
+ HDassert(f->shared->cache->aux_ptr);
aux_ptr = (H5AC_aux_t *)(f->shared->cache->aux_ptr);
- HDassert( aux_ptr );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+ HDassert(aux_ptr);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
entry_ptr->aux_ptr = aux_ptr;
idx = addr_to_datum_index(entry_ptr->base_addr);
- HDassert( idx >= 0 );
- HDassert( idx < NUM_DATA_ENTRIES );
- HDassert( idx < virt_num_data_entries );
- HDassert( &(data[idx]) == entry_ptr );
+ HDassert(idx >= 0);
+ HDassert(idx < NUM_DATA_ENTRIES);
+ HDassert(idx < virt_num_data_entries);
+ HDassert(&(data[idx]) == entry_ptr);
- if ( callbacks_verbose ) {
+ if (callbacks_verbose) {
- HDfprintf(stdout,
- "%d: serialize() idx = %d, addr = %ld, len = %d.\n",
- world_mpi_rank, idx, (long)entry_ptr->header.addr, (int)len);
- fflush(stdout);
+ HDfprintf(stdout, "%d: serialize() idx = %d, addr = %ld, len = %d.\n", world_mpi_rank, idx,
+ (long)entry_ptr->header.addr, (int)len);
+ fflush(stdout);
}
- HDassert( entry_ptr->header.addr == entry_ptr->base_addr );
- HDassert( ( entry_ptr->header.size == entry_ptr->len ) ||
- ( entry_ptr->header.size == entry_ptr->local_len ) );
+ HDassert(entry_ptr->header.addr == entry_ptr->base_addr);
+ HDassert((entry_ptr->header.size == entry_ptr->len) || (entry_ptr->header.size == entry_ptr->local_len));
- HDassert( entry_ptr->header.is_dirty == entry_ptr->dirty );
+ HDassert(entry_ptr->header.is_dirty == entry_ptr->dirty);
datum_flushes++;
- if ( entry_ptr->header.is_pinned ) {
+ if (entry_ptr->header.is_pinned) {
datum_pinned_flushes++;
- HDassert( entry_ptr->global_pinned || entry_ptr->local_pinned );
+ HDassert(entry_ptr->global_pinned || entry_ptr->local_pinned);
}
- return(ret_value);
+ return (ret_value);
} /* datum_serialize() */
-
/*-------------------------------------------------------------------------
* Function: datum_notify
*
@@ -2574,61 +2367,56 @@ datum_serialize(const H5F_t *f,
static herr_t
datum_notify(H5C_notify_action_t action, void *thing)
{
- hbool_t was_dirty = FALSE;
- herr_t ret_value = SUCCEED;
- struct datum * entry_ptr;
- struct H5AC_aux_t * aux_ptr;
- struct mssg_t mssg;
- int idx;
+ hbool_t was_dirty = FALSE;
+ herr_t ret_value = SUCCEED;
+ struct datum * entry_ptr;
+ struct H5AC_aux_t *aux_ptr;
+ struct mssg_t mssg;
+ int idx;
- HDassert( thing );
+ HDassert(thing);
entry_ptr = (struct datum *)thing;
idx = addr_to_datum_index(entry_ptr->base_addr);
- HDassert( idx >= 0 );
- HDassert( idx < NUM_DATA_ENTRIES );
- HDassert( idx < virt_num_data_entries );
- HDassert( &(data[idx]) == entry_ptr );
+ HDassert(idx >= 0);
+ HDassert(idx < NUM_DATA_ENTRIES);
+ HDassert(idx < virt_num_data_entries);
+ HDassert(&(data[idx]) == entry_ptr);
- if ( callbacks_verbose ) {
+ if (callbacks_verbose) {
- HDfprintf(stdout,
- "%d: notify() action = %d, idx = %d, addr = %ld.\n",
- world_mpi_rank, (int) action, idx,
- (long)entry_ptr->header.addr);
+ HDfprintf(stdout, "%d: notify() action = %d, idx = %d, addr = %ld.\n", world_mpi_rank, (int)action,
+ idx, (long)entry_ptr->header.addr);
fflush(stdout);
}
- HDassert( entry_ptr->header.addr == entry_ptr->base_addr );
+ HDassert(entry_ptr->header.addr == entry_ptr->base_addr);
/* Skip this check when the entry is being dirtied, since the resize
* operation sends the message before the len/local_len is updated
* (after the resize operation completes successfully) (QAK - 2016/10/19)
*/
- if(H5AC_NOTIFY_ACTION_ENTRY_DIRTIED != action)
- HDassert( ( entry_ptr->header.size == entry_ptr->len ) ||
- ( entry_ptr->header.size == entry_ptr->local_len ) );
+ if (H5AC_NOTIFY_ACTION_ENTRY_DIRTIED != action)
+ HDassert((entry_ptr->header.size == entry_ptr->len) ||
+ (entry_ptr->header.size == entry_ptr->local_len));
- switch ( action )
- {
+ switch (action) {
case H5AC_NOTIFY_ACTION_AFTER_INSERT:
- if ( callbacks_verbose ) {
+ if (callbacks_verbose) {
- HDfprintf(stdout,
- "%d: notify() action = insert, idx = %d, addr = %ld.\n",
- world_mpi_rank, idx, (long)entry_ptr->header.addr);
+ HDfprintf(stdout, "%d: notify() action = insert, idx = %d, addr = %ld.\n", world_mpi_rank,
+ idx, (long)entry_ptr->header.addr);
fflush(stdout);
}
/* do nothing */
break;
case H5AC_NOTIFY_ACTION_AFTER_LOAD:
- if ( callbacks_verbose ) {
+ if (callbacks_verbose) {
- HDfprintf(stdout,
- "%d: notify() action = load, idx = %d, addr = %ld.\n",
- world_mpi_rank, idx, (long)entry_ptr->header.addr);
+ HDfprintf(stdout, "%d: notify() action = load, idx = %d, addr = %ld.\n", world_mpi_rank, idx,
+ (long)entry_ptr->header.addr);
fflush(stdout);
}
@@ -2639,52 +2427,45 @@ datum_notify(H5C_notify_action_t action, void *thing)
mssg.mssg_num = -1; /* set by send function */
mssg.base_addr = entry_ptr->base_addr;
H5_CHECKED_ASSIGN(mssg.len, unsigned, entry_ptr->len, size_t);
- mssg.ver = 0; /* bogus -- should be corrected by server */
- mssg.count = 0; /* not used */
- mssg.magic = MSSG_MAGIC;
+ mssg.ver = 0; /* bogus -- should be corrected by server */
+ mssg.count = 0; /* not used */
+ mssg.magic = MSSG_MAGIC;
- if ( ! send_mssg(&mssg, FALSE) ) {
+ if (!send_mssg(&mssg, FALSE)) {
nerrors++;
ret_value = FAIL;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, FUNC);
}
}
- if ( ret_value == SUCCEED ) {
+ if (ret_value == SUCCEED) {
- if ( ! recv_mssg(&mssg, READ_REQ_REPLY_CODE) ) {
+ if (!recv_mssg(&mssg, READ_REQ_REPLY_CODE)) {
nerrors++;
ret_value = FAIL;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
}
}
}
- if ( ret_value == SUCCEED ) {
+ if (ret_value == SUCCEED) {
- if ( ( mssg.req != READ_REQ_REPLY_CODE ) ||
- ( mssg.src != world_server_mpi_rank ) ||
- ( mssg.dest != world_mpi_rank ) ||
- ( mssg.base_addr != entry_ptr->base_addr ) ||
- ( mssg.len != entry_ptr->len ) ||
- ( mssg.ver < entry_ptr->ver ) ||
- ( mssg.magic != MSSG_MAGIC ) ) {
+ if ((mssg.req != READ_REQ_REPLY_CODE) || (mssg.src != world_server_mpi_rank) ||
+ (mssg.dest != world_mpi_rank) || (mssg.base_addr != entry_ptr->base_addr) ||
+ (mssg.len != entry_ptr->len) || (mssg.ver < entry_ptr->ver) ||
+ (mssg.magic != MSSG_MAGIC)) {
nerrors++;
ret_value = FAIL;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: Bad data in read req reply.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad data in read req reply.\n", world_mpi_rank, FUNC);
}
-#if 0 /* This has been useful debugging code -- keep it for now. */
+#if 0 /* This has been useful debugging code -- keep it for now. */
if ( mssg.req != READ_REQ_REPLY_CODE ) {
HDfprintf(stdout,
@@ -2743,48 +2524,44 @@ datum_notify(H5C_notify_action_t action, void *thing)
world_mpi_rank, FUNC);
}
#endif /* JRM */
+ }
+ else {
- } else {
-
- entry_ptr->ver = mssg.ver;
+ entry_ptr->ver = mssg.ver;
entry_ptr->dirty = FALSE;
datum_loads++;
}
}
break;
- case H5C_NOTIFY_ACTION_AFTER_FLUSH:
- if ( callbacks_verbose ) {
+ case H5C_NOTIFY_ACTION_AFTER_FLUSH:
+ if (callbacks_verbose) {
- HDfprintf(stdout,
- "%d: notify() action = flush, idx = %d, addr = %ld.\n",
- world_mpi_rank, idx, (long)entry_ptr->header.addr);
+ HDfprintf(stdout, "%d: notify() action = flush, idx = %d, addr = %ld.\n", world_mpi_rank, idx,
+ (long)entry_ptr->header.addr);
fflush(stdout);
}
- HDassert( entry_ptr->aux_ptr );
- HDassert( entry_ptr->aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- aux_ptr = entry_ptr->aux_ptr;
+ HDassert(entry_ptr->aux_ptr);
+ HDassert(entry_ptr->aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ aux_ptr = entry_ptr->aux_ptr;
entry_ptr->aux_ptr = NULL;
- HDassert(entry_ptr->header.is_dirty); /* JRM */
+ HDassert(entry_ptr->header.is_dirty); /* JRM */
- if ( ( file_mpi_rank != 0 ) &&
- ( entry_ptr->dirty ) &&
- ( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY ) ) {
+ if ((file_mpi_rank != 0) && (entry_ptr->dirty) &&
+ (aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY)) {
ret_value = FAIL;
- HDfprintf(stdout,
- "%d:%s: Flushed dirty entry from non-zero file process.",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Flushed dirty entry from non-zero file process.", world_mpi_rank,
+ FUNC);
}
- if ( ret_value == SUCCEED ) {
+ if (ret_value == SUCCEED) {
- if ( entry_ptr->header.is_dirty ) {
+ if (entry_ptr->header.is_dirty) {
- was_dirty = TRUE; /* so we will receive the ack
+ was_dirty = TRUE; /* so we will receive the ack
* if requested
*/
@@ -2795,53 +2572,46 @@ datum_notify(H5C_notify_action_t action, void *thing)
mssg.mssg_num = -1; /* set by send function */
mssg.base_addr = entry_ptr->base_addr;
H5_CHECKED_ASSIGN(mssg.len, unsigned, entry_ptr->len, size_t);
- mssg.ver = entry_ptr->ver;
- mssg.count = 0;
- mssg.magic = MSSG_MAGIC;
+ mssg.ver = entry_ptr->ver;
+ mssg.count = 0;
+ mssg.magic = MSSG_MAGIC;
- if ( ! send_mssg(&mssg, FALSE) ) {
+ if (!send_mssg(&mssg, FALSE)) {
nerrors++;
ret_value = FAIL;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, FUNC);
}
}
- else
- {
- entry_ptr->dirty = FALSE;
- entry_ptr->flushed = TRUE;
+ else {
+ entry_ptr->dirty = FALSE;
+ entry_ptr->flushed = TRUE;
}
}
}
#if DO_WRITE_REQ_ACK
- if ( ( ret_value == SUCCEED ) && ( was_dirty ) ) {
+ if ((ret_value == SUCCEED) && (was_dirty)) {
- if ( ! recv_mssg(&mssg, WRITE_REQ_ACK_CODE) ) {
+ if (!recv_mssg(&mssg, WRITE_REQ_ACK_CODE)) {
nerrors++;
ret_value = FAIL;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
}
- } else if ( ( mssg.req != WRITE_REQ_ACK_CODE ) ||
- ( mssg.src != world_server_mpi_rank ) ||
- ( mssg.dest != world_mpi_rank ) ||
- ( mssg.base_addr != entry_ptr->base_addr ) ||
- ( mssg.len != entry_ptr->len ) ||
- ( mssg.ver != entry_ptr->ver ) ||
- ( mssg.magic != MSSG_MAGIC ) ) {
+ }
+ else if ((mssg.req != WRITE_REQ_ACK_CODE) || (mssg.src != world_server_mpi_rank) ||
+ (mssg.dest != world_mpi_rank) || (mssg.base_addr != entry_ptr->base_addr) ||
+ (mssg.len != entry_ptr->len) || (mssg.ver != entry_ptr->ver) ||
+ (mssg.magic != MSSG_MAGIC)) {
nerrors++;
ret_value = FAIL;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: Bad data in write req ack.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad data in write req ack.\n", world_mpi_rank, FUNC);
}
}
}
@@ -2850,19 +2620,18 @@ datum_notify(H5C_notify_action_t action, void *thing)
datum_flushes++;
- if ( entry_ptr->header.is_pinned ) {
+ if (entry_ptr->header.is_pinned) {
datum_pinned_flushes++;
HDassert(entry_ptr->global_pinned || entry_ptr->local_pinned);
}
- break;
+ break;
case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
- if ( callbacks_verbose ) {
+ if (callbacks_verbose) {
- HDfprintf(stdout,
- "%d: notify() action = evict, idx = %d, addr = %ld.\n",
- world_mpi_rank, idx, (long)entry_ptr->header.addr);
+ HDfprintf(stdout, "%d: notify() action = evict, idx = %d, addr = %ld.\n", world_mpi_rank, idx,
+ (long)entry_ptr->header.addr);
fflush(stdout);
}
@@ -2870,11 +2639,10 @@ datum_notify(H5C_notify_action_t action, void *thing)
break;
case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED:
- if ( callbacks_verbose ) {
+ if (callbacks_verbose) {
- HDfprintf(stdout,
- "%d: notify() action = entry dirty, idx = %d, addr = %ld.\n",
- world_mpi_rank, idx, (long)entry_ptr->header.addr);
+ HDfprintf(stdout, "%d: notify() action = entry dirty, idx = %d, addr = %ld.\n",
+ world_mpi_rank, idx, (long)entry_ptr->header.addr);
fflush(stdout);
}
@@ -2882,32 +2650,30 @@ datum_notify(H5C_notify_action_t action, void *thing)
break;
case H5AC_NOTIFY_ACTION_ENTRY_CLEANED:
- if ( callbacks_verbose ) {
+ if (callbacks_verbose) {
- HDfprintf(stdout,
- "%d: notify() action = entry clean, idx = %d, addr = %ld.\n",
- world_mpi_rank, idx, (long)entry_ptr->header.addr);
+ HDfprintf(stdout, "%d: notify() action = entry clean, idx = %d, addr = %ld.\n",
+ world_mpi_rank, idx, (long)entry_ptr->header.addr);
fflush(stdout);
}
entry_ptr->cleared = TRUE;
- entry_ptr->dirty = FALSE;
+ entry_ptr->dirty = FALSE;
datum_clears++;
- if(entry_ptr->header.is_pinned) {
+ if (entry_ptr->header.is_pinned) {
datum_pinned_clears++;
- HDassert( entry_ptr->global_pinned || entry_ptr->local_pinned );
+ HDassert(entry_ptr->global_pinned || entry_ptr->local_pinned);
} /* end if */
break;
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
- if ( callbacks_verbose ) {
+ if (callbacks_verbose) {
- HDfprintf(stdout,
- "%d: notify() action = child entry dirty, idx = %d, addr = %ld.\n",
- world_mpi_rank, idx, (long)entry_ptr->header.addr);
+ HDfprintf(stdout, "%d: notify() action = child entry dirty, idx = %d, addr = %ld.\n",
+ world_mpi_rank, idx, (long)entry_ptr->header.addr);
fflush(stdout);
}
@@ -2915,11 +2681,10 @@ datum_notify(H5C_notify_action_t action, void *thing)
break;
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
- if ( callbacks_verbose ) {
+ if (callbacks_verbose) {
- HDfprintf(stdout,
- "%d: notify() action = child entry clean, idx = %d, addr = %ld.\n",
- world_mpi_rank, idx, (long)entry_ptr->header.addr);
+ HDfprintf(stdout, "%d: notify() action = child entry clean, idx = %d, addr = %ld.\n",
+ world_mpi_rank, idx, (long)entry_ptr->header.addr);
fflush(stdout);
}
@@ -2927,11 +2692,10 @@ datum_notify(H5C_notify_action_t action, void *thing)
break;
case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
- if ( callbacks_verbose ) {
+ if (callbacks_verbose) {
- HDfprintf(stdout,
- "%d: notify() action = child entry unserialized, idx = %d, addr = %ld.\n",
- world_mpi_rank, idx, (long)entry_ptr->header.addr);
+ HDfprintf(stdout, "%d: notify() action = child entry unserialized, idx = %d, addr = %ld.\n",
+ world_mpi_rank, idx, (long)entry_ptr->header.addr);
fflush(stdout);
}
@@ -2939,32 +2703,29 @@ datum_notify(H5C_notify_action_t action, void *thing)
break;
case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
- if ( callbacks_verbose ) {
+ if (callbacks_verbose) {
- HDfprintf(stdout,
- "%d: notify() action = child entry serialized, idx = %d, addr = %ld.\n",
- world_mpi_rank, idx, (long)entry_ptr->header.addr);
+ HDfprintf(stdout, "%d: notify() action = child entry serialized, idx = %d, addr = %ld.\n",
+ world_mpi_rank, idx, (long)entry_ptr->header.addr);
fflush(stdout);
}
/* do nothing */
break;
- default:
+ default:
nerrors++;
ret_value = FAIL;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Unknown notify action.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Unknown notify action.\n", world_mpi_rank, FUNC);
}
- break;
+ break;
}
- return(ret_value);
+ return (ret_value);
} /* datum_notify() */
-
/*-------------------------------------------------------------------------
* Function: datum_free_icr
*
@@ -2982,45 +2743,42 @@ datum_notify(H5C_notify_action_t action, void *thing)
*-------------------------------------------------------------------------
*/
static herr_t
-datum_free_icr(void * thing)
+datum_free_icr(void *thing)
{
- int idx;
- struct datum * entry_ptr;
+ int idx;
+ struct datum *entry_ptr;
- HDassert( thing );
+ HDassert(thing);
entry_ptr = (struct datum *)thing;
idx = addr_to_datum_index(entry_ptr->base_addr);
- HDassert( idx >= 0 );
- HDassert( idx < NUM_DATA_ENTRIES );
- HDassert( idx < virt_num_data_entries );
- HDassert( &(data[idx]) == entry_ptr );
+ HDassert(idx >= 0);
+ HDassert(idx < NUM_DATA_ENTRIES);
+ HDassert(idx < virt_num_data_entries);
+ HDassert(&(data[idx]) == entry_ptr);
- if ( callbacks_verbose ) {
+ if (callbacks_verbose) {
- HDfprintf(stdout,
- "%d: free_icr() idx = %d, dirty = %d.\n",
- world_mpi_rank, idx, (int)(entry_ptr->dirty));
- fflush(stdout);
+ HDfprintf(stdout, "%d: free_icr() idx = %d, dirty = %d.\n", world_mpi_rank, idx,
+ (int)(entry_ptr->dirty));
+ fflush(stdout);
}
- HDassert( entry_ptr->header.addr == entry_ptr->base_addr );
- HDassert( ( entry_ptr->header.size == entry_ptr->len ) ||
- ( entry_ptr->header.size == entry_ptr->local_len ) );
+ HDassert(entry_ptr->header.addr == entry_ptr->base_addr);
+ HDassert((entry_ptr->header.size == entry_ptr->len) || (entry_ptr->header.size == entry_ptr->local_len));
- HDassert( !(entry_ptr->header.is_dirty) );
- HDassert( !(entry_ptr->global_pinned) );
- HDassert( !(entry_ptr->local_pinned) );
- HDassert( !(entry_ptr->header.is_pinned) );
+ HDassert(!(entry_ptr->header.is_dirty));
+ HDassert(!(entry_ptr->global_pinned));
+ HDassert(!(entry_ptr->local_pinned));
+ HDassert(!(entry_ptr->header.is_pinned));
datum_destroys++;
- return(SUCCEED);
+ return (SUCCEED);
} /* datum_free_icr() */
-
/*****************************************************************************/
/************************** test utility functions ***************************/
/*****************************************************************************/
@@ -3040,57 +2798,54 @@ datum_free_icr(void * thing)
*
*****************************************************************************/
static void
-expunge_entry(H5F_t * file_ptr,
- int32_t idx)
+expunge_entry(H5F_t *file_ptr, int32_t idx)
{
- hbool_t in_cache;
- herr_t result;
- struct datum * entry_ptr;
+ hbool_t in_cache;
+ herr_t result;
+ struct datum *entry_ptr;
- HDassert( file_ptr );
- HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
- HDassert( idx < virt_num_data_entries );
+ HDassert(file_ptr);
+ HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES));
+ HDassert(idx < virt_num_data_entries);
entry_ptr = &(data[idx]);
- HDassert( !(entry_ptr->locked) );
- HDassert( !(entry_ptr->global_pinned) );
- HDassert( !(entry_ptr->local_pinned) );
+ HDassert(!(entry_ptr->locked));
+ HDassert(!(entry_ptr->global_pinned));
+ HDassert(!(entry_ptr->local_pinned));
entry_ptr->dirty = FALSE;
- if ( nerrors == 0 ) {
+ if (nerrors == 0) {
result = H5AC_expunge_entry(file_ptr, &(types[0]), entry_ptr->header.addr, H5AC__NO_FLAGS_SET);
- if ( result < 0 ) {
+ if (result < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Error in H5AC_expunge_entry().\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Error in H5AC_expunge_entry().\n", world_mpi_rank, FUNC);
}
}
- HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
- HDassert( ! ((entry_ptr->header).is_dirty) );
+ HDassert(((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE);
+ HDassert(!((entry_ptr->header).is_dirty));
- result = H5C_get_entry_status(file_ptr, entry_ptr->base_addr,
- NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ result = H5C_get_entry_status(file_ptr, entry_ptr->base_addr, NULL, &in_cache, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL);
- if ( result < 0 ) {
+ if (result < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Error in H5C_get_entry_status().\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Error in H5C_get_entry_status().\n", world_mpi_rank, FUNC);
}
- } else if ( in_cache ) {
+ }
+ else if (in_cache) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Expunged entry still in cache?!?\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Expunged entry still in cache?!?\n", world_mpi_rank, FUNC);
}
}
}
@@ -3099,7 +2854,6 @@ expunge_entry(H5F_t * file_ptr,
} /* expunge_entry() */
-
/*****************************************************************************
* Function: insert_entry()
*
@@ -3122,93 +2876,83 @@ expunge_entry(H5F_t * file_ptr,
*
*****************************************************************************/
static void
-insert_entry(H5C_t * cache_ptr,
- H5F_t * file_ptr,
- int32_t idx,
- unsigned int flags)
+insert_entry(H5C_t *cache_ptr, H5F_t *file_ptr, int32_t idx, unsigned int flags)
{
- hbool_t insert_pinned;
- herr_t result;
- struct datum * entry_ptr;
+ hbool_t insert_pinned;
+ herr_t result;
+ struct datum *entry_ptr;
- HDassert( cache_ptr );
- HDassert( file_ptr );
- HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
- HDassert( idx < virt_num_data_entries );
+ HDassert(cache_ptr);
+ HDassert(file_ptr);
+ HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES));
+ HDassert(idx < virt_num_data_entries);
entry_ptr = &(data[idx]);
- HDassert( !(entry_ptr->locked) );
+ HDassert(!(entry_ptr->locked));
- insert_pinned = ((flags & H5C__PIN_ENTRY_FLAG) != 0 );
+ insert_pinned = ((flags & H5C__PIN_ENTRY_FLAG) != 0);
- if ( nerrors == 0 ) {
+ if (nerrors == 0) {
(entry_ptr->ver)++;
entry_ptr->dirty = TRUE;
- result = H5AC_insert_entry(file_ptr, &(types[0]),
- entry_ptr->base_addr, (void *)(&(entry_ptr->header)), flags);
+ result = H5AC_insert_entry(file_ptr, &(types[0]), entry_ptr->base_addr,
+ (void *)(&(entry_ptr->header)), flags);
- if ( ( result < 0 ) ||
- ( entry_ptr->header.type != &(types[0]) ) ||
- ( entry_ptr->len != entry_ptr->header.size ) ||
- ( entry_ptr->base_addr != entry_ptr->header.addr ) ) {
+ if ((result < 0) || (entry_ptr->header.type != &(types[0])) ||
+ (entry_ptr->len != entry_ptr->header.size) || (entry_ptr->base_addr != entry_ptr->header.addr)) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Error in H5AC_insert_entry().\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Error in H5AC_insert_entry().\n", world_mpi_rank, FUNC);
}
}
- if ( ! (entry_ptr->header.is_dirty) ) {
+ if (!(entry_ptr->header.is_dirty)) {
- /* it is possible that we just exceeded the dirty bytes
- * threshold, triggering a write of the newly inserted
- * entry. Test for this, and only flag an error if this
- * is not the case.
- */
+ /* it is possible that we just exceeded the dirty bytes
+ * threshold, triggering a write of the newly inserted
+ * entry. Test for this, and only flag an error if this
+ * is not the case.
+ */
- struct H5AC_aux_t * aux_ptr;
+ struct H5AC_aux_t *aux_ptr;
- aux_ptr = ((H5AC_aux_t *)(cache_ptr->aux_ptr));
+ aux_ptr = ((H5AC_aux_t *)(cache_ptr->aux_ptr));
- if ( ! ( ( aux_ptr != NULL ) &&
- ( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC ) &&
- ( aux_ptr->dirty_bytes == 0 ) ) ) {
+ if (!((aux_ptr != NULL) && (aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC) &&
+ (aux_ptr->dirty_bytes == 0))) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n",
- world_mpi_rank, FUNC, idx,
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n", world_mpi_rank, FUNC, idx,
(int)(data[idx].header.is_dirty));
- }
+ }
}
}
- if ( insert_pinned ) {
+ if (insert_pinned) {
- HDassert( entry_ptr->header.is_pinned );
+ HDassert(entry_ptr->header.is_pinned);
entry_ptr->global_pinned = TRUE;
- global_pins++;
-
- } else {
+ global_pins++;
+ }
+ else {
- HDassert( ! ( entry_ptr->header.is_pinned ) );
+ HDassert(!(entry_ptr->header.is_pinned));
entry_ptr->global_pinned = FALSE;
-
}
/* HDassert( entry_ptr->header.is_dirty ); */
- HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
+ HDassert(((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE);
}
return;
} /* insert_entry() */
-
/*****************************************************************************
* Function: local_pin_and_unpin_random_entries()
*
@@ -3224,59 +2968,52 @@ insert_entry(H5C_t * cache_ptr,
*
*****************************************************************************/
static void
-local_pin_and_unpin_random_entries(H5F_t * file_ptr,
- int min_idx,
- int max_idx,
- int min_count,
- int max_count)
+local_pin_and_unpin_random_entries(H5F_t *file_ptr, int min_idx, int max_idx, int min_count, int max_count)
{
- if ( nerrors == 0 ) {
+ if (nerrors == 0) {
hbool_t via_unprotect;
- int count;
- int i;
- int idx;
-
- HDassert( file_ptr );
- HDassert( 0 <= min_idx );
- HDassert( min_idx < max_idx );
- HDassert( max_idx < NUM_DATA_ENTRIES );
- HDassert( max_idx < virt_num_data_entries );
- HDassert( 0 <= min_count );
- HDassert( min_count < max_count );
+ int count;
+ int i;
+ int idx;
+
+ HDassert(file_ptr);
+ HDassert(0 <= min_idx);
+ HDassert(min_idx < max_idx);
+ HDassert(max_idx < NUM_DATA_ENTRIES);
+ HDassert(max_idx < virt_num_data_entries);
+ HDassert(0 <= min_count);
+ HDassert(min_count < max_count);
- count = (HDrand() % (max_count - min_count)) + min_count;
+ count = (HDrand() % (max_count - min_count)) + min_count;
- HDassert( min_count <= count );
- HDassert( count <= max_count );
+ HDassert(min_count <= count);
+ HDassert(count <= max_count);
- for ( i = 0; i < count; i++ )
- {
+ for (i = 0; i < count; i++) {
local_pin_random_entry(file_ptr, min_idx, max_idx);
- }
+ }
- count = (HDrand() % (max_count - min_count)) + min_count;
+ count = (HDrand() % (max_count - min_count)) + min_count;
- HDassert( min_count <= count );
- HDassert( count <= max_count );
+ HDassert(min_count <= count);
+ HDassert(count <= max_count);
- i = 0;
- idx = 0;
+ i = 0;
+ idx = 0;
- while ( ( i < count ) && ( idx >= 0 ) )
- {
- via_unprotect = ( (((unsigned)i) & 0x0001) == 0 );
- idx = local_unpin_next_pinned_entry(file_ptr, idx, via_unprotect);
- i++;
- }
+ while ((i < count) && (idx >= 0)) {
+ via_unprotect = ((((unsigned)i) & 0x0001) == 0);
+ idx = local_unpin_next_pinned_entry(file_ptr, idx, via_unprotect);
+ i++;
+ }
}
return;
} /* local_pin_and_unpin_random_entries() */
-
/*****************************************************************************
* Function: local_pin_random_entry()
*
@@ -3294,27 +3031,23 @@ local_pin_and_unpin_random_entries(H5F_t * file_ptr,
*
*****************************************************************************/
static void
-local_pin_random_entry(H5F_t * file_ptr,
- int min_idx,
- int max_idx)
+local_pin_random_entry(H5F_t *file_ptr, int min_idx, int max_idx)
{
int idx;
- if ( nerrors == 0 ) {
+ if (nerrors == 0) {
- HDassert( file_ptr );
- HDassert( 0 <= min_idx );
- HDassert( min_idx < max_idx );
- HDassert( max_idx < NUM_DATA_ENTRIES );
- HDassert( max_idx < virt_num_data_entries );
+ HDassert(file_ptr);
+ HDassert(0 <= min_idx);
+ HDassert(min_idx < max_idx);
+ HDassert(max_idx < NUM_DATA_ENTRIES);
+ HDassert(max_idx < virt_num_data_entries);
- do
- {
+ do {
idx = (HDrand() % (max_idx - min_idx)) + min_idx;
- HDassert( min_idx <= idx );
- HDassert( idx <= max_idx );
- }
- while ( data[idx].global_pinned || data[idx].local_pinned );
+ HDassert(min_idx <= idx);
+ HDassert(idx <= max_idx);
+ } while (data[idx].global_pinned || data[idx].local_pinned);
pin_entry(file_ptr, idx, FALSE, FALSE);
}
@@ -3323,7 +3056,6 @@ local_pin_random_entry(H5F_t * file_ptr,
} /* local_pin_random_entry() */
-
/*****************************************************************************
* Function: local_unpin_all_entries()
*
@@ -3338,30 +3070,26 @@ local_pin_random_entry(H5F_t * file_ptr,
*
*****************************************************************************/
static void
-local_unpin_all_entries(H5F_t * file_ptr,
- hbool_t via_unprotect)
+local_unpin_all_entries(H5F_t *file_ptr, hbool_t via_unprotect)
{
- if ( nerrors == 0 ) {
+ if (nerrors == 0) {
int idx;
- HDassert( file_ptr );
+ HDassert(file_ptr);
- idx = 0;
+ idx = 0;
- while ( idx >= 0 )
- {
- idx = local_unpin_next_pinned_entry(file_ptr,
- idx, via_unprotect);
- }
+ while (idx >= 0) {
+ idx = local_unpin_next_pinned_entry(file_ptr, idx, via_unprotect);
+ }
}
return;
} /* local_unpin_all_entries() */
-
/*****************************************************************************
* Function: local_unpin_next_pinned_entry()
*
@@ -3379,47 +3107,42 @@ local_unpin_all_entries(H5F_t * file_ptr,
*
*****************************************************************************/
static int
-local_unpin_next_pinned_entry(H5F_t * file_ptr,
- int start_idx,
- hbool_t via_unprotect)
+local_unpin_next_pinned_entry(H5F_t *file_ptr, int start_idx, hbool_t via_unprotect)
{
- int i = 0;
+ int i = 0;
int idx = -1;
- if ( nerrors == 0 ) {
+ if (nerrors == 0) {
- HDassert( file_ptr );
- HDassert( 0 <= start_idx );
- HDassert( start_idx < NUM_DATA_ENTRIES );
- HDassert( start_idx < virt_num_data_entries );
+ HDassert(file_ptr);
+ HDassert(0 <= start_idx);
+ HDassert(start_idx < NUM_DATA_ENTRIES);
+ HDassert(start_idx < virt_num_data_entries);
- idx = start_idx;
+ idx = start_idx;
- while ( ( i < virt_num_data_entries ) &&
- ( ! ( data[idx].local_pinned ) ) )
- {
- i++;
- idx++;
- if ( idx >= virt_num_data_entries ) {
- idx = 0;
+ while ((i < virt_num_data_entries) && (!(data[idx].local_pinned))) {
+ i++;
+ idx++;
+ if (idx >= virt_num_data_entries) {
+ idx = 0;
+ }
}
- }
-
- if ( data[idx].local_pinned ) {
- unpin_entry(file_ptr, idx, FALSE, FALSE, via_unprotect);
+ if (data[idx].local_pinned) {
- } else {
+ unpin_entry(file_ptr, idx, FALSE, FALSE, via_unprotect);
+ }
+ else {
- idx = -1;
- }
+ idx = -1;
+ }
}
- return(idx);
+ return (idx);
} /* local_unpin_next_pinned_entry() */
-
/*****************************************************************************
* Function: lock_and_unlock_random_entries()
*
@@ -3436,28 +3159,23 @@ local_unpin_next_pinned_entry(H5F_t * file_ptr,
*
*****************************************************************************/
static void
-lock_and_unlock_random_entries(H5F_t * file_ptr,
- int min_idx,
- int max_idx,
- int min_count,
- int max_count)
+lock_and_unlock_random_entries(H5F_t *file_ptr, int min_idx, int max_idx, int min_count, int max_count)
{
int count;
int i;
- if ( nerrors == 0 ) {
+ if (nerrors == 0) {
- HDassert( file_ptr );
- HDassert( 0 <= min_count );
- HDassert( min_count < max_count );
+ HDassert(file_ptr);
+ HDassert(0 <= min_count);
+ HDassert(min_count < max_count);
count = (HDrand() % (max_count - min_count)) + min_count;
- HDassert( min_count <= count );
- HDassert( count <= max_count );
+ HDassert(min_count <= count);
+ HDassert(count <= max_count);
- for ( i = 0; i < count; i++ )
- {
+ for (i = 0; i < count; i++) {
lock_and_unlock_random_entry(file_ptr, min_idx, max_idx);
}
}
@@ -3466,7 +3184,6 @@ lock_and_unlock_random_entries(H5F_t * file_ptr,
} /* lock_and_unlock_random_entries() */
-
/*****************************************************************************
* Function: lock_and_unlock_random_entry()
*
@@ -3482,34 +3199,31 @@ lock_and_unlock_random_entries(H5F_t * file_ptr,
*
*****************************************************************************/
static void
-lock_and_unlock_random_entry(H5F_t * file_ptr,
- int min_idx,
- int max_idx)
+lock_and_unlock_random_entry(H5F_t *file_ptr, int min_idx, int max_idx)
{
int idx;
- if ( nerrors == 0 ) {
+ if (nerrors == 0) {
- HDassert( file_ptr );
- HDassert( 0 <= min_idx );
- HDassert( min_idx < max_idx );
- HDassert( max_idx < NUM_DATA_ENTRIES );
- HDassert( max_idx < virt_num_data_entries );
+ HDassert(file_ptr);
+ HDassert(0 <= min_idx);
+ HDassert(min_idx < max_idx);
+ HDassert(max_idx < NUM_DATA_ENTRIES);
+ HDassert(max_idx < virt_num_data_entries);
idx = (HDrand() % (max_idx - min_idx)) + min_idx;
- HDassert( min_idx <= idx );
- HDassert( idx <= max_idx );
+ HDassert(min_idx <= idx);
+ HDassert(idx <= max_idx);
- lock_entry(file_ptr, idx);
- unlock_entry(file_ptr, idx, H5AC__NO_FLAGS_SET);
+ lock_entry(file_ptr, idx);
+ unlock_entry(file_ptr, idx, H5AC__NO_FLAGS_SET);
}
return;
} /* lock_and_unlock_random_entry() */
-
/*****************************************************************************
* Function: lock_entry()
*
@@ -3530,51 +3244,45 @@ lock_and_unlock_random_entry(H5F_t * file_ptr,
*
*****************************************************************************/
static void
-lock_entry(H5F_t * file_ptr,
- int32_t idx)
+lock_entry(H5F_t *file_ptr, int32_t idx)
{
- struct datum * entry_ptr;
- H5C_cache_entry_t * cache_entry_ptr;
+ struct datum * entry_ptr;
+ H5C_cache_entry_t *cache_entry_ptr;
- if ( nerrors == 0 ) {
+ if (nerrors == 0) {
- HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
- HDassert( idx < virt_num_data_entries );
+ HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES));
+ HDassert(idx < virt_num_data_entries);
entry_ptr = &(data[idx]);
- HDassert( ! (entry_ptr->locked) );
+ HDassert(!(entry_ptr->locked));
- cache_entry_ptr = (H5C_cache_entry_t *)H5AC_protect(file_ptr,
- &(types[0]), entry_ptr->base_addr,
- &entry_ptr->base_addr,
- H5AC__NO_FLAGS_SET);
+ cache_entry_ptr = (H5C_cache_entry_t *)H5AC_protect(file_ptr, &(types[0]), entry_ptr->base_addr,
+ &entry_ptr->base_addr, H5AC__NO_FLAGS_SET);
- if ( ( cache_entry_ptr != (void *)(&(entry_ptr->header)) ) ||
- ( entry_ptr->header.type != &(types[0]) ) ||
- ( ( entry_ptr->len != entry_ptr->header.size ) &&
- ( entry_ptr->local_len != entry_ptr->header.size ) ) ||
- ( entry_ptr->base_addr != entry_ptr->header.addr ) ) {
+ if ((cache_entry_ptr != (void *)(&(entry_ptr->header))) || (entry_ptr->header.type != &(types[0])) ||
+ ((entry_ptr->len != entry_ptr->header.size) &&
+ (entry_ptr->local_len != entry_ptr->header.size)) ||
+ (entry_ptr->base_addr != entry_ptr->header.addr)) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: error in H5AC_protect().\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: error in H5AC_protect().\n", world_mpi_rank, FUNC);
}
- } else {
-
- entry_ptr->locked = TRUE;
+ }
+ else {
- }
+ entry_ptr->locked = TRUE;
+ }
- HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
+ HDassert(((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE);
}
return;
} /* lock_entry() */
-
/*****************************************************************************
* Function: mark_entry_dirty()
*
@@ -3591,44 +3299,40 @@ lock_entry(H5F_t * file_ptr,
static void
mark_entry_dirty(int32_t idx)
{
- herr_t result;
- struct datum * entry_ptr;
+ herr_t result;
+ struct datum *entry_ptr;
- if ( nerrors == 0 ) {
+ if (nerrors == 0) {
- HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
- HDassert( idx < virt_num_data_entries );
+ HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES));
+ HDassert(idx < virt_num_data_entries);
entry_ptr = &(data[idx]);
- HDassert ( entry_ptr->locked || entry_ptr->global_pinned );
- HDassert ( ! (entry_ptr->local_pinned) );
+ HDassert(entry_ptr->locked || entry_ptr->global_pinned);
+ HDassert(!(entry_ptr->local_pinned));
(entry_ptr->ver)++;
entry_ptr->dirty = TRUE;
- result = H5AC_mark_entry_dirty( (void *)entry_ptr);
+ result = H5AC_mark_entry_dirty((void *)entry_ptr);
- if ( result < 0 ) {
+ if (result < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: error in H5AC_mark_entry_dirty().\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: error in H5AC_mark_entry_dirty().\n", world_mpi_rank, FUNC);
}
}
- else if ( ! ( entry_ptr->locked ) )
- {
- global_dirty_pins++;
- }
+ else if (!(entry_ptr->locked)) {
+ global_dirty_pins++;
+ }
}
return;
} /* mark_entry_dirty() */
-
/*****************************************************************************
* Function: pin_entry()
*
@@ -3643,58 +3347,53 @@ mark_entry_dirty(int32_t idx)
*
*****************************************************************************/
static void
-pin_entry(H5F_t * file_ptr,
- int32_t idx,
- hbool_t global,
- hbool_t dirty)
+pin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty)
{
- unsigned int flags = H5AC__PIN_ENTRY_FLAG;
- struct datum * entry_ptr;
+ unsigned int flags = H5AC__PIN_ENTRY_FLAG;
+ struct datum *entry_ptr;
- if ( nerrors == 0 ) {
+ if (nerrors == 0) {
- HDassert( file_ptr );
- HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
- HDassert( idx < virt_num_data_entries );
+ HDassert(file_ptr);
+ HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES));
+ HDassert(idx < virt_num_data_entries);
entry_ptr = &(data[idx]);
- HDassert ( ! (entry_ptr->global_pinned) );
- HDassert ( ! (entry_ptr->local_pinned) );
- HDassert ( ! ( dirty && ( ! global ) ) );
+ HDassert(!(entry_ptr->global_pinned));
+ HDassert(!(entry_ptr->local_pinned));
+ HDassert(!(dirty && (!global)));
- lock_entry(file_ptr, idx);
-
- if ( dirty ) {
-
- flags |= H5AC__DIRTIED_FLAG;
- }
+ lock_entry(file_ptr, idx);
- unlock_entry(file_ptr, idx, flags);
+ if (dirty) {
- HDassert( (entry_ptr->header).is_pinned );
- HDassert( ( ! dirty ) || ( (entry_ptr->header).is_dirty ) );
+ flags |= H5AC__DIRTIED_FLAG;
+ }
- if ( global ) {
+ unlock_entry(file_ptr, idx, flags);
- entry_ptr->global_pinned = TRUE;
+ HDassert((entry_ptr->header).is_pinned);
+ HDassert((!dirty) || ((entry_ptr->header).is_dirty));
- global_pins++;
+ if (global) {
- } else {
+ entry_ptr->global_pinned = TRUE;
- entry_ptr->local_pinned = TRUE;
+ global_pins++;
+ }
+ else {
- local_pins++;
+ entry_ptr->local_pinned = TRUE;
- }
+ local_pins++;
+ }
}
return;
} /* pin_entry() */
-
/*****************************************************************************
* Function: pin_protected_entry()
*
@@ -3710,60 +3409,53 @@ pin_entry(H5F_t * file_ptr,
*
*****************************************************************************/
static void
-pin_protected_entry(int32_t idx,
- hbool_t global)
+pin_protected_entry(int32_t idx, hbool_t global)
{
- herr_t result;
- struct datum * entry_ptr;
+ herr_t result;
+ struct datum *entry_ptr;
- HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
- HDassert( idx < virt_num_data_entries );
+ HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES));
+ HDassert(idx < virt_num_data_entries);
entry_ptr = &(data[idx]);
- HDassert( entry_ptr->locked );
+ HDassert(entry_ptr->locked);
- if ( nerrors == 0 ) {
+ if (nerrors == 0) {
- result = H5AC_pin_protected_entry((void *)entry_ptr);
+ result = H5AC_pin_protected_entry((void *)entry_ptr);
- if ( ( result < 0 ) ||
- ( entry_ptr->header.type != &(types[0]) ) ||
- ( ( entry_ptr->len != entry_ptr->header.size ) &&
- ( entry_ptr->local_len != entry_ptr->header.size ) )||
- ( entry_ptr->base_addr != entry_ptr->header.addr ) ||
- ( ! ( (entry_ptr->header).is_pinned ) ) ) {
+ if ((result < 0) || (entry_ptr->header.type != &(types[0])) ||
+ ((entry_ptr->len != entry_ptr->header.size) &&
+ (entry_ptr->local_len != entry_ptr->header.size)) ||
+ (entry_ptr->base_addr != entry_ptr->header.addr) || (!((entry_ptr->header).is_pinned))) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: Error in H5AC_pin_protected entry().\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Error in H5AC_pin_protected entry().\n", world_mpi_rank, FUNC);
}
}
- if ( global ) {
-
- entry_ptr->global_pinned = TRUE;
-
- global_pins++;
+ if (global) {
- } else {
+ entry_ptr->global_pinned = TRUE;
- entry_ptr->local_pinned = TRUE;
+ global_pins++;
+ }
+ else {
- local_pins++;
+ entry_ptr->local_pinned = TRUE;
- }
+ local_pins++;
+ }
- HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
+ HDassert(((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE);
}
return;
} /* pin_protected_entry() */
-
/*****************************************************************************
* Function: move_entry()
*
@@ -3781,33 +3473,31 @@ pin_protected_entry(int32_t idx,
*
*****************************************************************************/
static void
-move_entry(H5F_t * file_ptr,
- int32_t old_idx,
- int32_t new_idx)
+move_entry(H5F_t *file_ptr, int32_t old_idx, int32_t new_idx)
{
- herr_t result;
+ herr_t result;
int tmp;
- size_t tmp_len;
- haddr_t old_addr = HADDR_UNDEF;
- haddr_t new_addr = HADDR_UNDEF;
- struct datum * old_entry_ptr;
- struct datum * new_entry_ptr;
+ size_t tmp_len;
+ haddr_t old_addr = HADDR_UNDEF;
+ haddr_t new_addr = HADDR_UNDEF;
+ struct datum *old_entry_ptr;
+ struct datum *new_entry_ptr;
- if ( ( nerrors == 0 ) && ( old_idx != new_idx ) ) {
+ if ((nerrors == 0) && (old_idx != new_idx)) {
- HDassert( file_ptr );
- HDassert( ( 0 <= old_idx ) && ( old_idx < NUM_DATA_ENTRIES ) );
- HDassert( old_idx < virt_num_data_entries );
- HDassert( ( 0 <= new_idx ) && ( new_idx < NUM_DATA_ENTRIES ) );
- HDassert( new_idx < virt_num_data_entries );
+ HDassert(file_ptr);
+ HDassert((0 <= old_idx) && (old_idx < NUM_DATA_ENTRIES));
+ HDassert(old_idx < virt_num_data_entries);
+ HDassert((0 <= new_idx) && (new_idx < NUM_DATA_ENTRIES));
+ HDassert(new_idx < virt_num_data_entries);
old_entry_ptr = &(data[old_idx]);
new_entry_ptr = &(data[new_idx]);
- HDassert( ((old_entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
- HDassert( !(old_entry_ptr->header.is_protected) );
- HDassert( !(old_entry_ptr->locked) );
- HDassert( old_entry_ptr->len == new_entry_ptr->len );
+ HDassert(((old_entry_ptr->header).type)->id == DATUM_ENTRY_TYPE);
+ HDassert(!(old_entry_ptr->header.is_protected));
+ HDassert(!(old_entry_ptr->locked));
+ HDassert(old_entry_ptr->len == new_entry_ptr->len);
old_addr = old_entry_ptr->base_addr;
new_addr = new_entry_ptr->base_addr;
@@ -3819,8 +3509,8 @@ move_entry(H5F_t * file_ptr,
* now as it is possible that the rename will trigger a
* sync point.
*/
- if(old_entry_ptr->ver < new_entry_ptr->ver)
- old_entry_ptr->ver = new_entry_ptr->ver;
+ if (old_entry_ptr->ver < new_entry_ptr->ver)
+ old_entry_ptr->ver = new_entry_ptr->ver;
else
(old_entry_ptr->ver)++;
@@ -3834,60 +3524,56 @@ move_entry(H5F_t * file_ptr,
old_entry_ptr->index = new_entry_ptr->index;
new_entry_ptr->index = tmp;
- if(old_entry_ptr->local_len != new_entry_ptr->local_len) {
- tmp_len = old_entry_ptr->local_len;
- old_entry_ptr->local_len = new_entry_ptr->local_len;
- new_entry_ptr->local_len = tmp_len;
- } /* end if */
+ if (old_entry_ptr->local_len != new_entry_ptr->local_len) {
+ tmp_len = old_entry_ptr->local_len;
+ old_entry_ptr->local_len = new_entry_ptr->local_len;
+ new_entry_ptr->local_len = tmp_len;
+ } /* end if */
result = H5AC_move_entry(file_ptr, &(types[0]), old_addr, new_addr);
- if ( ( result < 0 ) || ( old_entry_ptr->header.addr != new_addr ) ) {
+ if ((result < 0) || (old_entry_ptr->header.addr != new_addr)) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5AC_move_entry() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5AC_move_entry() failed.\n", world_mpi_rank, FUNC);
}
+ }
+ else {
- } else {
-
- HDassert( ((old_entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
+ HDassert(((old_entry_ptr->header).type)->id == DATUM_ENTRY_TYPE);
- if ( ! (old_entry_ptr->header.is_dirty) ) {
+ if (!(old_entry_ptr->header.is_dirty)) {
- /* it is possible that we just exceeded the dirty bytes
- * threshold, triggering a write of the newly inserted
- * entry. Test for this, and only flag an error if this
- * is not the case.
- */
+ /* it is possible that we just exceeded the dirty bytes
+ * threshold, triggering a write of the newly inserted
+ * entry. Test for this, and only flag an error if this
+ * is not the case.
+ */
- struct H5AC_aux_t * aux_ptr;
+ struct H5AC_aux_t *aux_ptr;
- aux_ptr = ((H5AC_aux_t *)(file_ptr->shared->cache->aux_ptr));
+ aux_ptr = ((H5AC_aux_t *)(file_ptr->shared->cache->aux_ptr));
- if ( ! ( ( aux_ptr != NULL ) &&
- ( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC ) &&
- ( aux_ptr->dirty_bytes == 0 ) ) ) {
+ if (!((aux_ptr != NULL) && (aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC) &&
+ (aux_ptr->dirty_bytes == 0))) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: data[%d].header.is_dirty = %d.\n",
- world_mpi_rank, FUNC, new_idx,
- (int)(data[new_idx].header.is_dirty));
- }
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n", world_mpi_rank, FUNC,
+ new_idx, (int)(data[new_idx].header.is_dirty));
+ }
}
- } else {
+ }
+ else {
- HDassert( old_entry_ptr->header.is_dirty );
+ HDassert(old_entry_ptr->header.is_dirty);
}
}
}
} /* move_entry() */
-
/*****************************************************************************
*
* Function: reset_server_counts()
@@ -3905,10 +3591,10 @@ move_entry(H5F_t * file_ptr,
static hbool_t
reset_server_counts(void)
{
- hbool_t success = TRUE; /* will set to FALSE if appropriate. */
+ hbool_t success = TRUE; /* will set to FALSE if appropriate. */
struct mssg_t mssg;
- if ( success ) {
+ if (success) {
/* compose the message */
mssg.req = REQ_RW_COUNT_RESET_CODE;
@@ -3921,51 +3607,42 @@ reset_server_counts(void)
mssg.count = 0;
mssg.magic = MSSG_MAGIC;
- if ( ! send_mssg(&mssg, FALSE) ) {
+ if (!send_mssg(&mssg, FALSE)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, FUNC);
}
}
}
- if ( success ) {
+ if (success) {
- if ( ! recv_mssg(&mssg, REQ_RW_COUNT_RESET_RPLY_CODE) ) {
+ if (!recv_mssg(&mssg, REQ_RW_COUNT_RESET_RPLY_CODE)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
}
- } else if ( ( mssg.req != REQ_RW_COUNT_RESET_RPLY_CODE ) ||
- ( mssg.src != world_server_mpi_rank ) ||
- ( mssg.dest != world_mpi_rank ) ||
- ( mssg.base_addr != 0 ) ||
- ( mssg.len != 0 ) ||
- ( mssg.ver != 0 ) ||
- ( mssg.count != 0 ) ||
- ( mssg.magic != MSSG_MAGIC ) ) {
+ }
+ else if ((mssg.req != REQ_RW_COUNT_RESET_RPLY_CODE) || (mssg.src != world_server_mpi_rank) ||
+ (mssg.dest != world_mpi_rank) || (mssg.base_addr != 0) || (mssg.len != 0) ||
+ (mssg.ver != 0) || (mssg.count != 0) || (mssg.magic != MSSG_MAGIC)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: Bad data in req r/w counter reset reply.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad data in req r/w counter reset reply.\n", world_mpi_rank, FUNC);
}
}
}
- return(success);
+ return (success);
} /* reset_server_counts() */
-
/*****************************************************************************
* Function: resize_entry()
*
@@ -3982,46 +3659,43 @@ reset_server_counts(void)
*
*****************************************************************************/
static void
-resize_entry(int32_t idx,
- size_t new_size)
+resize_entry(int32_t idx, size_t new_size)
{
- herr_t result;
- struct datum * entry_ptr;
+ herr_t result;
+ struct datum *entry_ptr;
- if ( nerrors == 0 ) {
+ if (nerrors == 0) {
- HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
- HDassert( idx < virt_num_data_entries );
+ HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES));
+ HDassert(idx < virt_num_data_entries);
entry_ptr = &(data[idx]);
- HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
- HDassert( !(entry_ptr->locked) );
- HDassert( ( entry_ptr->global_pinned ) &&
- ( ! entry_ptr->local_pinned ) );
- HDassert( ( entry_ptr->header.size == entry_ptr->len ) ||
- ( entry_ptr->header.size == entry_ptr->local_len ) );
- HDassert( new_size > 0 );
- HDassert( new_size <= entry_ptr->len );
+ HDassert(((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE);
+ HDassert(!(entry_ptr->locked));
+ HDassert((entry_ptr->global_pinned) && (!entry_ptr->local_pinned));
+ HDassert((entry_ptr->header.size == entry_ptr->len) ||
+ (entry_ptr->header.size == entry_ptr->local_len));
+ HDassert(new_size > 0);
+ HDassert(new_size <= entry_ptr->len);
- result = H5AC_resize_entry((void *)entry_ptr, new_size);
+ result = H5AC_resize_entry((void *)entry_ptr, new_size);
- if ( result < 0 ) {
+ if (result < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5AC_resize_entry() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5AC_resize_entry() failed.\n", world_mpi_rank, FUNC);
}
+ }
+ else {
- } else {
-
- HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
- HDassert( entry_ptr->header.is_dirty );
- HDassert( entry_ptr->header.size == new_size );
+ HDassert(((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE);
+ HDassert(entry_ptr->header.is_dirty);
+ HDassert(entry_ptr->header.size == new_size);
- entry_ptr->dirty = TRUE;
- entry_ptr->local_len = new_size;
+ entry_ptr->dirty = TRUE;
+ entry_ptr->local_len = new_size;
/* touch up version. */
@@ -4033,7 +3707,6 @@ resize_entry(int32_t idx,
} /* resize_entry() */
-
/*****************************************************************************
*
* Function: setup_cache_for_test()
@@ -4054,103 +3727,94 @@ resize_entry(int32_t idx,
*
*****************************************************************************/
static hbool_t
-setup_cache_for_test(hid_t * fid_ptr,
- H5F_t ** file_ptr_ptr,
- H5C_t ** cache_ptr_ptr,
- int metadata_write_strategy)
+setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr, int metadata_write_strategy)
{
- hbool_t success = FALSE; /* will set to TRUE if appropriate. */
- hbool_t enable_rpt_fcn = FALSE;
- hid_t fid = -1;
+ hbool_t success = FALSE; /* will set to TRUE if appropriate. */
+ hbool_t enable_rpt_fcn = FALSE;
+ hid_t fid = -1;
H5AC_cache_config_t config;
H5AC_cache_config_t test_config;
- H5F_t * file_ptr = NULL;
- H5C_t * cache_ptr = NULL;
- haddr_t actual_base_addr;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
+ haddr_t actual_base_addr;
- HDassert ( fid_ptr != NULL );
- HDassert ( file_ptr_ptr != NULL );
- HDassert ( cache_ptr_ptr != NULL );
+ HDassert(fid_ptr != NULL);
+ HDassert(file_ptr_ptr != NULL);
+ HDassert(cache_ptr_ptr != NULL);
fid = H5Fcreate(filenames[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
/* Push API context */
H5CX_push();
- if ( fid < 0 ) {
+ if (fid < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fcreate() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5Fcreate() failed.\n", world_mpi_rank, FUNC);
}
- } else if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
+ }
+ else if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, FUNC);
}
- } else {
+ }
+ else {
file_ptr = (H5F_t *)H5VL_object_verify(fid, H5I_FILE);
}
- if ( file_ptr == NULL ) {
+ if (file_ptr == NULL) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Can't get file_ptr.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Can't get file_ptr.\n", world_mpi_rank, FUNC);
}
- } else {
+ }
+ else {
cache_ptr = file_ptr->shared->cache;
}
- if ( cache_ptr == NULL ) {
+ if (cache_ptr == NULL) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Can't get cache_ptr.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Can't get cache_ptr.\n", world_mpi_rank, FUNC);
}
- } else if ( cache_ptr->magic != H5C__H5C_T_MAGIC ) {
+ }
+ else if (cache_ptr->magic != H5C__H5C_T_MAGIC) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad cache_ptr magic.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad cache_ptr magic.\n", world_mpi_rank, FUNC);
}
- } else {
+ }
+ else {
cache_ptr->ignore_tags = TRUE;
- *fid_ptr = fid;
- *file_ptr_ptr = file_ptr;
- *cache_ptr_ptr = cache_ptr;
+ *fid_ptr = fid;
+ *file_ptr_ptr = file_ptr;
+ *cache_ptr_ptr = cache_ptr;
H5C_stats__reset(cache_ptr);
success = TRUE;
}
- if ( success ) {
+ if (success) {
config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
- if ( H5AC_get_cache_auto_resize_config(cache_ptr, &config)
- != SUCCEED ) {
+ if (H5AC_get_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
- HDfprintf(stdout,
- "%d:%s: H5AC_get_cache_auto_resize_config(1) failed.\n",
- world_mpi_rank, FUNC);
-
- } else {
+ HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config(1) failed.\n", world_mpi_rank, FUNC);
+ }
+ else {
config.rpt_fcn_enabled = enable_rpt_fcn;
config.metadata_write_strategy = metadata_write_strategy;
- if ( H5AC_set_cache_auto_resize_config(cache_ptr, &config)
- != SUCCEED ) {
+ if (H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
- HDfprintf(stdout,
- "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n",
- world_mpi_rank, FUNC);
-
- } else if ( enable_rpt_fcn ) {
+ HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", world_mpi_rank,
+ FUNC);
+ }
+ else if (enable_rpt_fcn) {
- HDfprintf(stdout, "%d:%s: rpt_fcn enabled.\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: rpt_fcn enabled.\n", world_mpi_rank, FUNC);
}
}
}
@@ -4160,32 +3824,29 @@ setup_cache_for_test(hid_t * fid_ptr,
* we can't do our usual checks in the serial case.
*/
- if ( success ) /* verify that the metadata write strategy is as expected */
+ if (success) /* verify that the metadata write strategy is as expected */
{
- if ( cache_ptr->aux_ptr == NULL ) {
+ if (cache_ptr->aux_ptr == NULL) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: cache_ptr->aux_ptr == NULL.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: cache_ptr->aux_ptr == NULL.\n", world_mpi_rank, FUNC);
}
- } else if ( ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic !=
- H5AC__H5AC_AUX_T_MAGIC ) {
+ }
+ else if (((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic != H5AC__H5AC_AUX_T_MAGIC) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: cache_ptr->aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: cache_ptr->aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC.\n",
+ world_mpi_rank, FUNC);
}
- } else if( ((H5AC_aux_t *)(cache_ptr->aux_ptr))->metadata_write_strategy
- != metadata_write_strategy ) {
+ }
+ else if (((H5AC_aux_t *)(cache_ptr->aux_ptr))->metadata_write_strategy != metadata_write_strategy) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: bad cache_ptr->aux_ptr->metadata_write_strategy\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: bad cache_ptr->aux_ptr->metadata_write_strategy\n", world_mpi_rank,
+ FUNC);
}
}
}
@@ -4194,27 +3855,21 @@ setup_cache_for_test(hid_t * fid_ptr,
* when we get the current configuration.
*/
- if ( success ) {
+ if (success) {
test_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
- if ( H5AC_get_cache_auto_resize_config(cache_ptr, &test_config)
- != SUCCEED ) {
-
- HDfprintf(stdout,
- "%d:%s: H5AC_get_cache_auto_resize_config(2) failed.\n",
- world_mpi_rank, FUNC);
+ if (H5AC_get_cache_auto_resize_config(cache_ptr, &test_config) != SUCCEED) {
- } else if ( test_config.metadata_write_strategy !=
- metadata_write_strategy ) {
+ HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config(2) failed.\n", world_mpi_rank, FUNC);
+ }
+ else if (test_config.metadata_write_strategy != metadata_write_strategy) {
nerrors++;
- if ( verbose ) {
+ if (verbose) {
- HDfprintf(stdout,
- "%d:%s: unexpected metadata_write_strategy.\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: unexpected metadata_write_strategy.\n", world_mpi_rank, FUNC);
}
}
}
@@ -4224,85 +3879,76 @@ setup_cache_for_test(hid_t * fid_ptr,
* another flush. If the sync point done callback is set, this will
* cause a spurious failure.
*/
- if ( success ) { /* allocate space for test entries */
+ if (success) { /* allocate space for test entries */
- actual_base_addr = H5MF_alloc(file_ptr, H5FD_MEM_DEFAULT,
- (hsize_t)(max_addr + BASE_ADDR));
+ actual_base_addr = H5MF_alloc(file_ptr, H5FD_MEM_DEFAULT, (hsize_t)(max_addr + BASE_ADDR));
- if ( actual_base_addr == HADDR_UNDEF ) {
+ if (actual_base_addr == HADDR_UNDEF) {
success = FALSE;
- nerrors++;
+ nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5MF_alloc() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5MF_alloc() failed.\n", world_mpi_rank, FUNC);
}
-
- } else if ( actual_base_addr > BASE_ADDR ) {
+ }
+ else if (actual_base_addr > BASE_ADDR) {
/* If this happens, must increase BASE_ADDR so that the
* actual_base_addr is <= BASE_ADDR. This should only happen
* if the size of the superblock is increase.
*/
success = FALSE;
- nerrors++;
+ nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: actual_base_addr > BASE_ADDR.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: actual_base_addr > BASE_ADDR.\n", world_mpi_rank, FUNC);
}
}
}
-
/* flush the file again -- space allocation dirtied superblock */
- if ( success ) {
+ if (success) {
- if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
+ if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: second H5Fflush() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: second H5Fflush() failed.\n", world_mpi_rank, FUNC);
}
}
}
#if DO_SYNC_AFTER_WRITE
- if ( success ) {
+ if (success) {
- if ( H5AC__set_write_done_callback(cache_ptr, do_sync) != SUCCEED ) {
+ if (H5AC__set_write_done_callback(cache_ptr, do_sync) != SUCCEED) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: H5C_set_write_done_callback failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5C_set_write_done_callback failed.\n", world_mpi_rank, FUNC);
}
- }
+ }
}
#endif /* DO_SYNC_AFTER_WRITE */
- if ( success ) {
+ if (success) {
- if ( H5AC__set_sync_point_done_callback(cache_ptr, verify_writes) != SUCCEED ) {
+ if (H5AC__set_sync_point_done_callback(cache_ptr, verify_writes) != SUCCEED) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: H5AC__set_sync_point_done_callback failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5AC__set_sync_point_done_callback failed.\n", world_mpi_rank,
+ FUNC);
}
- }
+ }
}
- return(success);
+ return (success);
} /* setup_cache_for_test() */
-
/*****************************************************************************
*
* Function: verify_writes()
@@ -4333,34 +3979,32 @@ setup_cache_for_test(hid_t * fid_ptr,
static void
verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
{
- const hbool_t report = FALSE;
- hbool_t proceed = TRUE;
- unsigned u = 0;
+ const hbool_t report = FALSE;
+ hbool_t proceed = TRUE;
+ unsigned u = 0;
- HDassert( world_mpi_rank != world_server_mpi_rank );
- HDassert( ( num_writes == 0 ) ||
- ( written_entries_tbl != NULL ) );
+ HDassert(world_mpi_rank != world_server_mpi_rank);
+ HDassert((num_writes == 0) || (written_entries_tbl != NULL));
/* barrier to ensure that all other processes are ready to leave
* the sync point as well.
*/
- if ( proceed ) {
+ if (proceed) {
- if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) {
+ if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
proceed = FALSE;
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: barrier 1 failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: barrier 1 failed.\n", world_mpi_rank, FUNC);
}
}
}
- if(proceed)
+ if (proceed)
proceed = verify_total_writes(num_writes);
- while(proceed && u < num_writes) {
+ while (proceed && u < num_writes) {
proceed = verify_entry_writes(written_entries_tbl[u], 1);
u++;
}
@@ -4368,37 +4012,33 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
/* barrier to ensure that all other processes have finished verifying
* the number of writes before we reset the counters.
*/
- if ( proceed ) {
+ if (proceed) {
- if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) {
+ if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
proceed = FALSE;
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: barrier 2 failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: barrier 2 failed.\n", world_mpi_rank, FUNC);
}
}
}
- if ( proceed ) {
+ if (proceed) {
proceed = reset_server_counts();
}
/* if requested, display status of check to stdout */
- if ( ( report ) && ( file_mpi_rank == 0 ) ) {
-
- if ( proceed ) {
-
- HDfprintf(stdout, "%d:%s: verified %u writes.\n",
- world_mpi_rank, FUNC, num_writes);
+ if ((report) && (file_mpi_rank == 0)) {
- } else {
+ if (proceed) {
- HDfprintf(stdout, "%d:%s: FAILED to verify %u writes.\n",
- world_mpi_rank, FUNC, num_writes);
+ HDfprintf(stdout, "%d:%s: verified %u writes.\n", world_mpi_rank, FUNC, num_writes);
+ }
+ else {
+ HDfprintf(stdout, "%d:%s: FAILED to verify %u writes.\n", world_mpi_rank, FUNC, num_writes);
}
}
@@ -4408,15 +4048,14 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
* but I can think of at least one likely change to the metadata write
* strategies that will require it -- hence its insertion now.
*/
- if ( proceed ) {
+ if (proceed) {
- if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) {
+ if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
proceed = FALSE;
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: barrier 3 failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: barrier 3 failed.\n", world_mpi_rank, FUNC);
}
}
}
@@ -4425,7 +4064,6 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
} /* verify_writes() */
-
/*****************************************************************************
*
* Function: setup_rand()
@@ -4448,38 +4086,35 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
static void
setup_rand(void)
{
- hbool_t use_predefined_seeds = FALSE;
- int num_predefined_seeds = 3;
- unsigned predefined_seeds[3] = {18669, 89925, 12577};
- unsigned seed;
+ hbool_t use_predefined_seeds = FALSE;
+ int num_predefined_seeds = 3;
+ unsigned predefined_seeds[3] = {18669, 89925, 12577};
+ unsigned seed;
struct timeval tv;
- if ( ( use_predefined_seeds ) &&
- ( world_mpi_size == num_predefined_seeds ) ) {
+ if ((use_predefined_seeds) && (world_mpi_size == num_predefined_seeds)) {
- HDassert( world_mpi_rank >= 0 );
- HDassert( world_mpi_rank < world_mpi_size );
+ HDassert(world_mpi_rank >= 0);
+ HDassert(world_mpi_rank < world_mpi_size);
- seed = predefined_seeds[world_mpi_rank];
- HDfprintf(stdout, "%d:%s: predefined_seed = %d.\n",
- world_mpi_rank, FUNC, seed);
+ seed = predefined_seeds[world_mpi_rank];
+ HDfprintf(stdout, "%d:%s: predefined_seed = %d.\n", world_mpi_rank, FUNC, seed);
fflush(stdout);
- HDsrand(seed);
-
- } else {
+ HDsrand(seed);
+ }
+ else {
- if ( HDgettimeofday(&tv, NULL) != 0 ) {
+ if (HDgettimeofday(&tv, NULL) != 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: gettimeofday() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: gettimeofday() failed.\n", world_mpi_rank, FUNC);
}
- } else {
+ }
+ else {
seed = (unsigned)tv.tv_usec;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: seed = %d.\n",
- world_mpi_rank, FUNC, seed);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: seed = %d.\n", world_mpi_rank, FUNC, seed);
fflush(stdout);
}
HDsrand(seed);
@@ -4490,7 +4125,6 @@ setup_rand(void)
} /* setup_rand() */
-
/*****************************************************************************
*
* Function: take_down_cache()
@@ -4508,20 +4142,19 @@ setup_rand(void)
*
*****************************************************************************/
static hbool_t
-take_down_cache(hid_t fid, H5C_t * cache_ptr)
+take_down_cache(hid_t fid, H5C_t *cache_ptr)
{
hbool_t success = TRUE; /* will set to FALSE if appropriate. */
/* flush the file -- this should write out any remaining test
* entries in the cache.
*/
- if ( ( success ) && ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) ) {
+ if ((success) && (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)) {
success = FALSE;
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, FUNC);
}
}
@@ -4530,65 +4163,58 @@ take_down_cache(hid_t fid, H5C_t * cache_ptr)
* un-related to the test entries, and thereby corrupt our counts
* of entry writes.
*/
- if ( success ) {
+ if (success) {
- if ( H5AC__set_sync_point_done_callback(cache_ptr, NULL) != SUCCEED ) {
+ if (H5AC__set_sync_point_done_callback(cache_ptr, NULL) != SUCCEED) {
success = FALSE;
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: H5AC__set_sync_point_done_callback failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5AC__set_sync_point_done_callback failed.\n", world_mpi_rank,
+ FUNC);
}
}
-
-
}
/* close the file */
- if ( ( success ) && ( H5Fclose(fid) < 0 ) ) {
+ if ((success) && (H5Fclose(fid) < 0)) {
success = FALSE;
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fclose() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5Fclose() failed.\n", world_mpi_rank, FUNC);
}
-
}
/* Pop API context */
H5CX_pop();
- if ( success ) {
+ if (success) {
- if ( world_mpi_rank == world_server_mpi_rank ) {
+ if (world_mpi_rank == world_server_mpi_rank) {
- if ( HDremove(filenames[0]) < 0 ) {
+ if (HDremove(filenames[0]) < 0) {
success = FALSE;
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: HDremove() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: HDremove() failed.\n", world_mpi_rank, FUNC);
}
}
- } else {
+ }
+ else {
- /* verify that there have been no further writes of test
+ /* verify that there have been no further writes of test
* entries during the close
*/
success = verify_total_writes(0);
-
}
}
- return(success);
+ return (success);
} /* take_down_cache() */
-
/*****************************************************************************
* Function: verify_entry_reads
*
@@ -4610,14 +4236,13 @@ take_down_cache(hid_t fid, H5C_t * cache_ptr)
*-------------------------------------------------------------------------
*/
static hbool_t
-verify_entry_reads(haddr_t addr,
- int expected_entry_reads)
+verify_entry_reads(haddr_t addr, int expected_entry_reads)
{
- hbool_t success = TRUE;
- int reported_entry_reads = 0;
+ hbool_t success = TRUE;
+ int reported_entry_reads = 0;
struct mssg_t mssg;
- if ( success ) {
+ if (success) {
/* compose the message */
mssg.req = REQ_ENTRY_READS_CODE;
@@ -4630,73 +4255,63 @@ verify_entry_reads(haddr_t addr,
mssg.count = 0; /* not used */
mssg.magic = MSSG_MAGIC;
- if ( ! send_mssg(&mssg, FALSE) ) {
+ if (!send_mssg(&mssg, FALSE)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, FUNC);
}
}
}
- if ( success ) {
+ if (success) {
- if ( ! recv_mssg(&mssg, REQ_ENTRY_READS_RPLY_CODE) ) {
+ if (!recv_mssg(&mssg, REQ_ENTRY_READS_RPLY_CODE)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
}
}
}
- if ( success ) {
+ if (success) {
- if ( ( mssg.req != REQ_ENTRY_READS_RPLY_CODE ) ||
- ( mssg.src != world_server_mpi_rank ) ||
- ( mssg.dest != world_mpi_rank ) ||
- ( mssg.base_addr != addr ) ||
- ( mssg.len != 0 ) ||
- ( mssg.ver != 0 ) ||
- ( mssg.magic != MSSG_MAGIC ) ) {
+ if ((mssg.req != REQ_ENTRY_READS_RPLY_CODE) || (mssg.src != world_server_mpi_rank) ||
+ (mssg.dest != world_mpi_rank) || (mssg.base_addr != addr) || (mssg.len != 0) || (mssg.ver != 0) ||
+ (mssg.magic != MSSG_MAGIC)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad data in req entry reads reply.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad data in req entry reads reply.\n", world_mpi_rank, FUNC);
}
- } else {
+ }
+ else {
H5_CHECKED_ASSIGN(reported_entry_reads, int, mssg.count, unsigned);
}
}
- if ( success ) {
+ if (success) {
- if ( reported_entry_reads != expected_entry_reads ) {
+ if (reported_entry_reads != expected_entry_reads) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: rep/exp entry 0x%" PRIxHADDR
- " reads mismatch (%d/%d).\n",
- world_mpi_rank, FUNC, addr,
- reported_entry_reads, expected_entry_reads);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: rep/exp entry 0x%" PRIxHADDR " reads mismatch (%d/%d).\n",
+ world_mpi_rank, FUNC, addr, reported_entry_reads, expected_entry_reads);
}
}
}
- return(success);
+ return (success);
} /* verify_entry_reads() */
-
/*****************************************************************************
* Function: verify_entry_writes
*
@@ -4718,14 +4333,13 @@ verify_entry_reads(haddr_t addr,
*-------------------------------------------------------------------------
*/
static hbool_t
-verify_entry_writes(haddr_t addr,
- int expected_entry_writes)
+verify_entry_writes(haddr_t addr, int expected_entry_writes)
{
- hbool_t success = TRUE;
- int reported_entry_writes = 0;
+ hbool_t success = TRUE;
+ int reported_entry_writes = 0;
struct mssg_t mssg;
- if ( success ) {
+ if (success) {
/* compose the message */
mssg.req = REQ_ENTRY_WRITES_CODE;
@@ -4738,72 +4352,63 @@ verify_entry_writes(haddr_t addr,
mssg.count = 0; /* not used */
mssg.magic = MSSG_MAGIC;
- if ( ! send_mssg(&mssg, FALSE) ) {
+ if (!send_mssg(&mssg, FALSE)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, FUNC);
}
}
}
- if ( success ) {
+ if (success) {
- if ( ! recv_mssg(&mssg, REQ_ENTRY_WRITES_RPLY_CODE) ) {
+ if (!recv_mssg(&mssg, REQ_ENTRY_WRITES_RPLY_CODE)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
}
}
}
- if ( success ) {
+ if (success) {
- if ( ( mssg.req != REQ_ENTRY_WRITES_RPLY_CODE ) ||
- ( mssg.src != world_server_mpi_rank ) ||
- ( mssg.dest != world_mpi_rank ) ||
- ( mssg.base_addr != addr ) ||
- ( mssg.len != 0 ) ||
- ( mssg.ver != 0 ) ||
- ( mssg.magic != MSSG_MAGIC ) ) {
+ if ((mssg.req != REQ_ENTRY_WRITES_RPLY_CODE) || (mssg.src != world_server_mpi_rank) ||
+ (mssg.dest != world_mpi_rank) || (mssg.base_addr != addr) || (mssg.len != 0) || (mssg.ver != 0) ||
+ (mssg.magic != MSSG_MAGIC)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad data in req entry writes reply.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad data in req entry writes reply.\n", world_mpi_rank, FUNC);
}
- } else {
+ }
+ else {
H5_CHECKED_ASSIGN(reported_entry_writes, int, mssg.count, unsigned);
}
}
- if ( success ) {
+ if (success) {
- if ( reported_entry_writes != expected_entry_writes ) {
+ if (reported_entry_writes != expected_entry_writes) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: rep/exp entry 0x%llx writes mismatch (%d/%d).\n",
- world_mpi_rank, FUNC, (long long)addr,
- reported_entry_writes, expected_entry_writes);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: rep/exp entry 0x%llx writes mismatch (%d/%d).\n", world_mpi_rank,
+ FUNC, (long long)addr, reported_entry_writes, expected_entry_writes);
}
}
}
- return(success);
+ return (success);
} /* verify_entry_writes() */
-
/*****************************************************************************
*
* Function: verify_total_reads()
@@ -4826,11 +4431,11 @@ verify_entry_writes(haddr_t addr,
static hbool_t
verify_total_reads(int expected_total_reads)
{
- hbool_t success = TRUE; /* will set to FALSE if appropriate. */
- long reported_total_reads;
+ hbool_t success = TRUE; /* will set to FALSE if appropriate. */
+ long reported_total_reads;
struct mssg_t mssg;
- if ( success ) {
+ if (success) {
/* compose the message */
mssg.req = REQ_TTL_READS_CODE;
@@ -4843,68 +4448,59 @@ verify_total_reads(int expected_total_reads)
mssg.count = 0;
mssg.magic = MSSG_MAGIC;
- if ( ! send_mssg(&mssg, FALSE) ) {
+ if (!send_mssg(&mssg, FALSE)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, FUNC);
}
}
}
- if ( success ) {
+ if (success) {
- if ( ! recv_mssg(&mssg, REQ_TTL_READS_RPLY_CODE) ) {
+ if (!recv_mssg(&mssg, REQ_TTL_READS_RPLY_CODE)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
}
- } else if ( ( mssg.req != REQ_TTL_READS_RPLY_CODE ) ||
- ( mssg.src != world_server_mpi_rank ) ||
- ( mssg.dest != world_mpi_rank ) ||
- ( mssg.base_addr != 0 ) ||
- ( mssg.len != 0 ) ||
- ( mssg.ver != 0 ) ||
- ( mssg.magic != MSSG_MAGIC ) ) {
+ }
+ else if ((mssg.req != REQ_TTL_READS_RPLY_CODE) || (mssg.src != world_server_mpi_rank) ||
+ (mssg.dest != world_mpi_rank) || (mssg.base_addr != 0) || (mssg.len != 0) ||
+ (mssg.ver != 0) || (mssg.magic != MSSG_MAGIC)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad data in req total reads reply.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad data in req total reads reply.\n", world_mpi_rank, FUNC);
}
- } else {
+ }
+ else {
reported_total_reads = mssg.count;
}
}
- if ( success ) {
+ if (success) {
- if ( reported_total_reads != expected_total_reads ) {
+ if (reported_total_reads != expected_total_reads) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: reported/expected total reads mismatch (%ld/%d).\n",
- world_mpi_rank, FUNC,
- reported_total_reads, expected_total_reads);
-
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: reported/expected total reads mismatch (%ld/%d).\n", world_mpi_rank,
+ FUNC, reported_total_reads, expected_total_reads);
}
}
}
- return(success);
+ return (success);
} /* verify_total_reads() */
-
/*****************************************************************************
*
* Function: verify_total_writes()
@@ -4927,11 +4523,11 @@ verify_total_reads(int expected_total_reads)
static hbool_t
verify_total_writes(unsigned expected_total_writes)
{
- hbool_t success = TRUE; /* will set to FALSE if appropriate. */
- unsigned reported_total_writes;
+ hbool_t success = TRUE; /* will set to FALSE if appropriate. */
+ unsigned reported_total_writes;
struct mssg_t mssg;
- if ( success ) {
+ if (success) {
/* compose the message */
mssg.req = REQ_TTL_WRITES_CODE;
@@ -4944,67 +4540,59 @@ verify_total_writes(unsigned expected_total_writes)
mssg.count = 0;
mssg.magic = MSSG_MAGIC;
- if ( ! send_mssg(&mssg, FALSE) ) {
+ if (!send_mssg(&mssg, FALSE)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, FUNC);
}
}
}
- if ( success ) {
+ if (success) {
- if ( ! recv_mssg(&mssg, REQ_TTL_WRITES_RPLY_CODE) ) {
+ if (!recv_mssg(&mssg, REQ_TTL_WRITES_RPLY_CODE)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
}
- } else if ( ( mssg.req != REQ_TTL_WRITES_RPLY_CODE ) ||
- ( mssg.src != world_server_mpi_rank ) ||
- ( mssg.dest != world_mpi_rank ) ||
- ( mssg.base_addr != 0 ) ||
- ( mssg.len != 0 ) ||
- ( mssg.ver != 0 ) ||
- ( mssg.magic != MSSG_MAGIC ) ) {
+ }
+ else if ((mssg.req != REQ_TTL_WRITES_RPLY_CODE) || (mssg.src != world_server_mpi_rank) ||
+ (mssg.dest != world_mpi_rank) || (mssg.base_addr != 0) || (mssg.len != 0) ||
+ (mssg.ver != 0) || (mssg.magic != MSSG_MAGIC)) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad data in req total reads reply.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad data in req total reads reply.\n", world_mpi_rank, FUNC);
}
- } else {
+ }
+ else {
reported_total_writes = mssg.count;
}
}
- if ( success ) {
+ if (success) {
- if ( reported_total_writes != expected_total_writes ) {
+ if (reported_total_writes != expected_total_writes) {
nerrors++;
success = FALSE;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: reported/expected total writes mismatch (%u/%u).\n",
- world_mpi_rank, FUNC,
- reported_total_writes, expected_total_writes);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: reported/expected total writes mismatch (%u/%u).\n", world_mpi_rank,
+ FUNC, reported_total_writes, expected_total_writes);
}
}
}
- return(success);
+ return (success);
} /* verify_total_writes() */
-
/*****************************************************************************
* Function: unlock_entry()
*
@@ -5024,64 +4612,55 @@ verify_total_writes(unsigned expected_total_writes)
*
*****************************************************************************/
static void
-unlock_entry(H5F_t * file_ptr,
- int32_t idx,
- unsigned int flags)
+unlock_entry(H5F_t *file_ptr, int32_t idx, unsigned int flags)
{
- herr_t dirtied;
- herr_t result;
- struct datum * entry_ptr;
+ herr_t dirtied;
+ herr_t result;
+ struct datum *entry_ptr;
- if ( nerrors == 0 ) {
+ if (nerrors == 0) {
- HDassert( file_ptr );
- HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
- HDassert( idx < virt_num_data_entries );
+ HDassert(file_ptr);
+ HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES));
+ HDassert(idx < virt_num_data_entries);
entry_ptr = &(data[idx]);
- HDassert( entry_ptr->locked );
+ HDassert(entry_ptr->locked);
- dirtied = ((flags & H5AC__DIRTIED_FLAG) == H5AC__DIRTIED_FLAG );
+ dirtied = ((flags & H5AC__DIRTIED_FLAG) == H5AC__DIRTIED_FLAG);
- if ( dirtied ) {
+ if (dirtied) {
(entry_ptr->ver)++;
entry_ptr->dirty = TRUE;
}
- result = H5AC_unprotect(file_ptr, &(types[0]),
- entry_ptr->base_addr, (void *)(&(entry_ptr->header)), flags);
+ result = H5AC_unprotect(file_ptr, &(types[0]), entry_ptr->base_addr, (void *)(&(entry_ptr->header)),
+ flags);
- if ( ( result < 0 ) ||
- ( entry_ptr->header.type != &(types[0]) ) ||
- ( ( entry_ptr->len != entry_ptr->header.size ) &&
- ( entry_ptr->local_len != entry_ptr->header.size ) ) ||
- ( entry_ptr->base_addr != entry_ptr->header.addr ) ) {
+ if ((result < 0) || (entry_ptr->header.type != &(types[0])) ||
+ ((entry_ptr->len != entry_ptr->header.size) &&
+ (entry_ptr->local_len != entry_ptr->header.size)) ||
+ (entry_ptr->base_addr != entry_ptr->header.addr)) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: error in H5AC_unprotect().\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: error in H5AC_unprotect().\n", world_mpi_rank, FUNC);
}
- } else {
+ }
+ else {
entry_ptr->locked = FALSE;
+ }
- }
-
- HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
+ HDassert(((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE);
- if ( ( (flags & H5AC__DIRTIED_FLAG) != 0 ) &&
- ( (flags & H5C__DELETED_FLAG) == 0 ) &&
- ( ! ( ( ( world_mpi_rank == 0 ) && ( entry_ptr->flushed ) )
- ||
- ( ( world_mpi_rank != 0 ) && ( entry_ptr->cleared ) )
- )
- )
- ) {
- HDassert( entry_ptr->header.is_dirty );
- HDassert( entry_ptr->dirty );
+ if (((flags & H5AC__DIRTIED_FLAG) != 0) && ((flags & H5C__DELETED_FLAG) == 0) &&
+ (!(((world_mpi_rank == 0) && (entry_ptr->flushed)) ||
+ ((world_mpi_rank != 0) && (entry_ptr->cleared))))) {
+ HDassert(entry_ptr->header.is_dirty);
+ HDassert(entry_ptr->dirty);
}
}
@@ -5089,7 +4668,6 @@ unlock_entry(H5F_t * file_ptr,
} /* unlock_entry() */
-
/*****************************************************************************
* Function: unpin_entry()
*
@@ -5109,84 +4687,74 @@ unlock_entry(H5F_t * file_ptr,
*
*****************************************************************************/
static void
-unpin_entry(H5F_t * file_ptr,
- int32_t idx,
- hbool_t global,
- hbool_t dirty,
- hbool_t via_unprotect)
+unpin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty, hbool_t via_unprotect)
{
- herr_t result;
- unsigned int flags = H5AC__UNPIN_ENTRY_FLAG;
- struct datum * entry_ptr;
+ herr_t result;
+ unsigned int flags = H5AC__UNPIN_ENTRY_FLAG;
+ struct datum *entry_ptr;
- if ( nerrors == 0 ) {
+ if (nerrors == 0) {
- HDassert( file_ptr );
- HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) );
- HDassert( idx < virt_num_data_entries );
+ HDassert(file_ptr);
+ HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES));
+ HDassert(idx < virt_num_data_entries);
entry_ptr = &(data[idx]);
- HDassert( (entry_ptr->header).is_pinned );
- HDassert ( ! ( entry_ptr->global_pinned && entry_ptr->local_pinned) );
- HDassert ( ( global && entry_ptr->global_pinned ) ||
- ( ! global && entry_ptr->local_pinned ) );
- HDassert ( ! ( dirty && ( ! global ) ) );
+ HDassert((entry_ptr->header).is_pinned);
+ HDassert(!(entry_ptr->global_pinned && entry_ptr->local_pinned));
+ HDassert((global && entry_ptr->global_pinned) || (!global && entry_ptr->local_pinned));
+ HDassert(!(dirty && (!global)));
- if ( via_unprotect ) {
+ if (via_unprotect) {
- lock_entry(file_ptr, idx);
+ lock_entry(file_ptr, idx);
- if ( dirty ) {
-
- flags |= H5AC__DIRTIED_FLAG;
- }
+ if (dirty) {
- unlock_entry(file_ptr, idx, flags);
-
- } else {
+ flags |= H5AC__DIRTIED_FLAG;
+ }
- if ( dirty ) {
+ unlock_entry(file_ptr, idx, flags);
+ }
+ else {
- mark_entry_dirty(idx);
+ if (dirty) {
- }
+ mark_entry_dirty(idx);
+ }
- result = H5AC_unpin_entry(entry_ptr);
+ result = H5AC_unpin_entry(entry_ptr);
- if ( result < 0 ) {
+ if (result < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: error in H5AC_unpin_entry().\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: error in H5AC_unpin_entry().\n", world_mpi_rank, FUNC);
}
+ }
}
- }
-
- HDassert( ! ((entry_ptr->header).is_pinned) );
- if ( global ) {
+ HDassert(!((entry_ptr->header).is_pinned));
- entry_ptr->global_pinned = FALSE;
+ if (global) {
- } else {
-
- entry_ptr->local_pinned = FALSE;
+ entry_ptr->global_pinned = FALSE;
+ }
+ else {
- }
+ entry_ptr->local_pinned = FALSE;
+ }
}
return;
} /* unpin_entry() */
-
/*****************************************************************************/
/****************************** test functions *******************************/
/*****************************************************************************/
-
/*****************************************************************************
*
* Function: server_smoke_check()
@@ -5203,11 +4771,11 @@ unpin_entry(H5F_t * file_ptr,
static hbool_t
server_smoke_check(void)
{
- hbool_t success = TRUE;
- int max_nerrors;
+ hbool_t success = TRUE;
+ int max_nerrors;
struct mssg_t mssg;
- if ( world_mpi_rank == 0 ) {
+ if (world_mpi_rank == 0) {
TESTING("server smoke check");
}
@@ -5216,15 +4784,14 @@ server_smoke_check(void)
init_data();
reset_stats();
- if ( world_mpi_rank == world_server_mpi_rank ) {
+ if (world_mpi_rank == world_server_mpi_rank) {
- if ( ! server_main() ) {
+ if (!server_main()) {
/* some error occured in the server -- report failure */
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
}
}
}
@@ -5237,101 +4804,93 @@ server_smoke_check(void)
mssg.mssg_num = -1; /* set by send function */
mssg.base_addr = data[world_mpi_rank].base_addr;
H5_CHECKED_ASSIGN(mssg.len, unsigned, data[world_mpi_rank].len, size_t);
- mssg.ver = ++(data[world_mpi_rank].ver);
- mssg.count = 0;
- mssg.magic = MSSG_MAGIC;
+ mssg.ver = ++(data[world_mpi_rank].ver);
+ mssg.count = 0;
+ mssg.magic = MSSG_MAGIC;
- if ( ! ( success = send_mssg(&mssg, FALSE) ) ) {
+ if (!(success = send_mssg(&mssg, FALSE))) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on write.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on write.\n", world_mpi_rank, FUNC);
}
}
#if DO_WRITE_REQ_ACK
/* try to receive the write ack from the server */
- if ( success ) {
+ if (success) {
success = recv_mssg(&mssg, WRITE_REQ_ACK_CODE);
- if ( ! success ) {
+ if (!success) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
}
}
}
/* verify that we received the expected ack message */
- if ( success ) {
+ if (success) {
- if ( ( mssg.req != WRITE_REQ_ACK_CODE ) ||
- ( mssg.src != world_server_mpi_rank ) ||
- ( mssg.dest != world_mpi_rank ) ||
- ( mssg.base_addr != data[world_mpi_rank].base_addr ) ||
- ( mssg.len != data[world_mpi_rank].len ) ||
- ( mssg.ver != data[world_mpi_rank].ver ) ||
- ( mssg.magic != MSSG_MAGIC ) ) {
+ if ((mssg.req != WRITE_REQ_ACK_CODE) || (mssg.src != world_server_mpi_rank) ||
+ (mssg.dest != world_mpi_rank) || (mssg.base_addr != data[world_mpi_rank].base_addr) ||
+ (mssg.len != data[world_mpi_rank].len) || (mssg.ver != data[world_mpi_rank].ver) ||
+ (mssg.magic != MSSG_MAGIC)) {
success = FALSE;
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad data in write req ack.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad data in write req ack.\n", world_mpi_rank, FUNC);
}
}
}
#endif /* DO_WRITE_REQ_ACK */
- do_sync();
+ do_sync();
- /* barrier to allow all writes to complete */
- if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) {
+ /* barrier to allow all writes to complete */
+ if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
success = FALSE;
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: barrier 1 failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: barrier 1 failed.\n", world_mpi_rank, FUNC);
}
}
/* verify that the expected entries have been written, the total */
- if ( success ) {
+ if (success) {
success = verify_entry_writes(data[world_mpi_rank].base_addr, 1);
}
- if ( success ) {
+ if (success) {
success = verify_entry_reads(data[world_mpi_rank].base_addr, 0);
}
- if ( success ) {
+ if (success) {
success = verify_total_writes((unsigned)(world_mpi_size - 1));
}
- if ( success ) {
+ if (success) {
success = verify_total_reads(0);
}
- /* barrier to allow all writes to complete */
- if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) {
+ /* barrier to allow all writes to complete */
+ if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
success = FALSE;
nerrors++;
- if ( verbose ) {
+ if (verbose) {
- HDfprintf(stdout, "%d:%s: barrier 2 failed.\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: barrier 2 failed.\n", world_mpi_rank, FUNC);
}
}
@@ -5342,148 +4901,138 @@ server_smoke_check(void)
mssg.mssg_num = -1; /* set by send function */
mssg.base_addr = data[world_mpi_rank].base_addr;
H5_CHECKED_ASSIGN(mssg.len, unsigned, data[world_mpi_rank].len, size_t);
- mssg.ver = 0; /* bogus -- should be corrected by server */
- mssg.count = 0;
- mssg.magic = MSSG_MAGIC;
+ mssg.ver = 0; /* bogus -- should be corrected by server */
+ mssg.count = 0;
+ mssg.magic = MSSG_MAGIC;
- if ( success ) {
+ if (success) {
success = send_mssg(&mssg, FALSE);
- if ( ! success ) {
+ if (!success) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on write.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on write.\n", world_mpi_rank, FUNC);
}
}
}
/* try to receive the reply from the server */
- if ( success ) {
+ if (success) {
success = recv_mssg(&mssg, READ_REQ_REPLY_CODE);
- if ( ! success ) {
+ if (!success) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, FUNC);
}
}
}
/* verify that we got the expected result */
- if ( success ) {
+ if (success) {
- if ( ( mssg.req != READ_REQ_REPLY_CODE ) ||
- ( mssg.src != world_server_mpi_rank ) ||
- ( mssg.dest != world_mpi_rank ) ||
- ( mssg.base_addr != data[world_mpi_rank].base_addr ) ||
- ( mssg.len != data[world_mpi_rank].len ) ||
- ( mssg.ver != data[world_mpi_rank].ver ) ||
- ( mssg.magic != MSSG_MAGIC ) ) {
+ if ((mssg.req != READ_REQ_REPLY_CODE) || (mssg.src != world_server_mpi_rank) ||
+ (mssg.dest != world_mpi_rank) || (mssg.base_addr != data[world_mpi_rank].base_addr) ||
+ (mssg.len != data[world_mpi_rank].len) || (mssg.ver != data[world_mpi_rank].ver) ||
+ (mssg.magic != MSSG_MAGIC)) {
success = FALSE;
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad data in read req reply.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Bad data in read req reply.\n", world_mpi_rank, FUNC);
}
}
}
- /* barrier to allow all writes to complete */
- if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) {
+ /* barrier to allow all writes to complete */
+ if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
success = FALSE;
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: barrier 3 failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: barrier 3 failed.\n", world_mpi_rank, FUNC);
}
}
/* verify that the expected entries have been read, and the total */
- if ( success ) {
+ if (success) {
success = verify_entry_writes(data[world_mpi_rank].base_addr, 1);
}
- if ( success ) {
+ if (success) {
success = verify_entry_reads(data[world_mpi_rank].base_addr, 1);
}
- if ( success ) {
+ if (success) {
success = verify_total_writes((unsigned)(world_mpi_size - 1));
}
- if ( success ) {
+ if (success) {
success = verify_total_reads(world_mpi_size - 1);
}
- if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) {
+ if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
success = FALSE;
nerrors++;
- if ( verbose ) {
+ if (verbose) {
- HDfprintf(stdout, "%d:%s: barrier 4 failed.\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: barrier 4 failed.\n", world_mpi_rank, FUNC);
}
}
/* reset the counters */
- if ( success ) {
+ if (success) {
success = reset_server_counts();
}
- if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) {
+ if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
success = FALSE;
nerrors++;
- if ( verbose ) {
+ if (verbose) {
- HDfprintf(stdout, "%d:%s: barrier 5 failed.\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: barrier 5 failed.\n", world_mpi_rank, FUNC);
}
}
/* verify that the counters have been reset */
- if ( success ) {
+ if (success) {
success = verify_entry_writes(data[world_mpi_rank].base_addr, 0);
}
- if ( success ) {
+ if (success) {
success = verify_entry_reads(data[world_mpi_rank].base_addr, 0);
}
- if ( success ) {
+ if (success) {
success = verify_total_writes(0);
}
- if ( success ) {
+ if (success) {
success = verify_total_reads(0);
}
- if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) {
+ if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
success = FALSE;
nerrors++;
- if ( verbose ) {
+ if (verbose) {
- HDfprintf(stdout, "%d:%s: barrier 6 failed.\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: barrier 6 failed.\n", world_mpi_rank, FUNC);
}
}
@@ -5492,22 +5041,21 @@ server_smoke_check(void)
mssg.src = world_mpi_rank;
mssg.dest = world_server_mpi_rank;
mssg.mssg_num = -1; /* set by send function */
- mssg.base_addr = 0; /* not used */
- mssg.len = 0; /* not used */
- mssg.ver = 0; /* not used */
+ mssg.base_addr = 0; /* not used */
+ mssg.len = 0; /* not used */
+ mssg.ver = 0; /* not used */
mssg.count = 0;
mssg.magic = MSSG_MAGIC;
- if ( success ) {
+ if (success) {
success = send_mssg(&mssg, FALSE);
- if ( ! success ) {
+ if (!success) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
}
}
}
@@ -5515,26 +5063,25 @@ server_smoke_check(void)
max_nerrors = get_max_nerrors();
- if ( world_mpi_rank == 0 ) {
-
- if ( max_nerrors == 0 ) {
+ if (world_mpi_rank == 0) {
- PASSED();
+ if (max_nerrors == 0) {
- } else {
+ PASSED();
+ }
+ else {
failures++;
H5_FAILED();
}
}
- success = ( ( success ) && ( max_nerrors == 0 ) );
+ success = ((success) && (max_nerrors == 0));
- return(success);
+ return (success);
} /* server_smoke_check() */
-
/*****************************************************************************
*
* Function: smoke_check_1()
@@ -5551,100 +5098,92 @@ server_smoke_check(void)
static hbool_t
smoke_check_1(int metadata_write_strategy)
{
- hbool_t success = TRUE;
- int i;
- int max_nerrors;
- hid_t fid = -1;
- H5F_t * file_ptr = NULL;
- H5C_t * cache_ptr = NULL;
+ hbool_t success = TRUE;
+ int i;
+ int max_nerrors;
+ hid_t fid = -1;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
struct mssg_t mssg;
- switch ( metadata_write_strategy ) {
+ switch (metadata_write_strategy) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #1 -- process 0 only md write strategy");
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #1 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #1 -- distributed md write strategy");
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #1 -- distributed md write strategy");
}
- break;
+ break;
default:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #1 -- unknown md write strategy");
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #1 -- unknown md write strategy");
}
- break;
+ break;
}
nerrors = 0;
init_data();
reset_stats();
- if ( world_mpi_rank == world_server_mpi_rank ) {
+ if (world_mpi_rank == world_server_mpi_rank) {
- if ( ! server_main() ) {
+ if (!server_main()) {
/* some error occured in the server -- report failure */
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
}
}
}
else /* run the clients */
{
- if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr,
- metadata_write_strategy) ) {
+ if (!setup_cache_for_test(&fid, &file_ptr, &cache_ptr, metadata_write_strategy)) {
nerrors++;
- fid = -1;
+ fid = -1;
cache_ptr = NULL;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, FUNC);
}
}
- for ( i = 0; i < (virt_num_data_entries / 2); i++ )
- {
+ for (i = 0; i < (virt_num_data_entries / 2); i++) {
insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
}
- for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
- {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ for (i = (virt_num_data_entries / 2) - 1; i >= 0; i--) {
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
}
/* Move the first half of the entries... */
- for ( i = 0; i < (virt_num_data_entries / 2); i++ )
- {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ for (i = 0; i < (virt_num_data_entries / 2); i++) {
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
}
/* ...and then move them back. */
- for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
- {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ for (i = (virt_num_data_entries / 2) - 1; i >= 0; i--) {
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
}
- if ( fid >= 0 ) {
+ if (fid >= 0) {
- if ( ! take_down_cache(fid, cache_ptr) ) {
+ if (!take_down_cache(fid, cache_ptr)) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, FUNC);
}
}
}
@@ -5653,10 +5192,9 @@ smoke_check_1(int metadata_write_strategy)
* and are clean.
*/
- for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
- {
- HDassert( data_index[i] == i );
- HDassert( ! (data[i].dirty) );
+ for (i = 0; i < NUM_DATA_ENTRIES; i++) {
+ HDassert(data_index[i] == i);
+ HDassert(!(data[i].dirty));
}
/* compose the done message */
@@ -5664,22 +5202,21 @@ smoke_check_1(int metadata_write_strategy)
mssg.src = world_mpi_rank;
mssg.dest = world_server_mpi_rank;
mssg.mssg_num = -1; /* set by send function */
- mssg.base_addr = 0; /* not used */
- mssg.len = 0; /* not used */
- mssg.ver = 0; /* not used */
- mssg.count = 0; /* not used */
+ mssg.base_addr = 0; /* not used */
+ mssg.len = 0; /* not used */
+ mssg.ver = 0; /* not used */
+ mssg.count = 0; /* not used */
mssg.magic = MSSG_MAGIC;
- if ( success ) {
+ if (success) {
success = send_mssg(&mssg, FALSE);
- if ( ! success ) {
+ if (!success) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
}
}
}
@@ -5687,26 +5224,25 @@ smoke_check_1(int metadata_write_strategy)
max_nerrors = get_max_nerrors();
- if ( world_mpi_rank == 0 ) {
-
- if ( max_nerrors == 0 ) {
+ if (world_mpi_rank == 0) {
- PASSED();
+ if (max_nerrors == 0) {
- } else {
+ PASSED();
+ }
+ else {
failures++;
H5_FAILED();
}
}
- success = ( ( success ) && ( max_nerrors == 0 ) );
+ success = ((success) && (max_nerrors == 0));
- return(success);
+ return (success);
} /* smoke_check_1() */
-
/*****************************************************************************
*
* Function: smoke_check_2()
@@ -5726,147 +5262,126 @@ smoke_check_1(int metadata_write_strategy)
static hbool_t
smoke_check_2(int metadata_write_strategy)
{
- hbool_t success = TRUE;
- int i;
- int max_nerrors;
- hid_t fid = -1;
- H5F_t * file_ptr = NULL;
- H5C_t * cache_ptr = NULL;
+ hbool_t success = TRUE;
+ int i;
+ int max_nerrors;
+ hid_t fid = -1;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
struct mssg_t mssg;
- switch ( metadata_write_strategy ) {
+ switch (metadata_write_strategy) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #2 -- process 0 only md write strategy");
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #2 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #2 -- distributed md write strategy");
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #2 -- distributed md write strategy");
}
- break;
+ break;
default:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #2 -- unknown md write strategy");
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #2 -- unknown md write strategy");
}
- break;
+ break;
}
nerrors = 0;
init_data();
reset_stats();
- if ( world_mpi_rank == world_server_mpi_rank ) {
+ if (world_mpi_rank == world_server_mpi_rank) {
- if ( ! server_main() ) {
+ if (!server_main()) {
/* some error occured in the server -- report failure */
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
}
}
}
else /* run the clients */
{
- if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr,
- metadata_write_strategy) ) {
+ if (!setup_cache_for_test(&fid, &file_ptr, &cache_ptr, metadata_write_strategy)) {
nerrors++;
- fid = -1;
+ fid = -1;
cache_ptr = NULL;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, FUNC);
}
}
- for ( i = 0; i < (virt_num_data_entries / 2); i++ )
- {
+ for (i = 0; i < (virt_num_data_entries / 2); i++) {
insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
- if ( i > 100 ) {
+ if (i > 100) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i, 0, 10);
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i, 0, 10);
}
}
- for ( i = 0; i < (virt_num_data_entries / 2); i+=61 )
- {
- /* Make sure we don't step on any locally pinned entries */
- if ( data[i].local_pinned ) {
- unpin_entry(file_ptr, i, FALSE, FALSE, FALSE);
- }
+ for (i = 0; i < (virt_num_data_entries / 2); i += 61) {
+ /* Make sure we don't step on any locally pinned entries */
+ if (data[i].local_pinned) {
+ unpin_entry(file_ptr, i, FALSE, FALSE, FALSE);
+ }
- pin_entry(file_ptr, i, TRUE, FALSE);
- }
+ pin_entry(file_ptr, i, TRUE, FALSE);
+ }
- for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-=2 )
- {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 20),
- 0, 100);
- local_pin_and_unpin_random_entries(file_ptr, 0,
- (virt_num_data_entries / 4),
- 0, 3);
+ for (i = (virt_num_data_entries / 2) - 1; i >= 0; i -= 2) {
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ lock_and_unlock_random_entries(file_ptr, 0, (virt_num_data_entries / 20), 0, 100);
+ local_pin_and_unpin_random_entries(file_ptr, 0, (virt_num_data_entries / 4), 0, 3);
}
- for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
- {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 10),
- 0, 100);
+ for (i = 0; i < (virt_num_data_entries / 2); i += 2) {
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ lock_and_unlock_random_entries(file_ptr, 0, (virt_num_data_entries / 10), 0, 100);
}
- /* we can't move pinned entries, so release any local pins now. */
- local_unpin_all_entries(file_ptr, FALSE);
+ /* we can't move pinned entries, so release any local pins now. */
+ local_unpin_all_entries(file_ptr, FALSE);
/* Move the first half of the entries... */
- for ( i = 0; i < (virt_num_data_entries / 2); i++ )
- {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- ((virt_num_data_entries / 50) - 1),
- 0, 100);
+ for (i = 0; i < (virt_num_data_entries / 2); i++) {
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0, ((virt_num_data_entries / 50) - 1), 0, 100);
}
/* ...and then move them back. */
- for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
- {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 100),
- 0, 100);
+ for (i = (virt_num_data_entries / 2) - 1; i >= 0; i--) {
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0, (virt_num_data_entries / 100), 0, 100);
}
- for ( i = 0; i < (virt_num_data_entries / 2); i+=61 )
- {
- hbool_t via_unprotect = ( (((unsigned)i) & 0x01) == 0 );
- hbool_t dirty = ( (((unsigned)i) & 0x02) == 0 );
+ for (i = 0; i < (virt_num_data_entries / 2); i += 61) {
+ hbool_t via_unprotect = ((((unsigned)i) & 0x01) == 0);
+ hbool_t dirty = ((((unsigned)i) & 0x02) == 0);
- unpin_entry(file_ptr, i, TRUE, dirty, via_unprotect);
- }
+ unpin_entry(file_ptr, i, TRUE, dirty, via_unprotect);
+ }
- if ( fid >= 0 ) {
+ if (fid >= 0) {
- if ( ! take_down_cache(fid, cache_ptr) ) {
+ if (!take_down_cache(fid, cache_ptr)) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, FUNC);
}
}
}
@@ -5875,10 +5390,9 @@ smoke_check_2(int metadata_write_strategy)
* and are clean.
*/
- for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
- {
- HDassert( data_index[i] == i );
- HDassert( ! (data[i].dirty) );
+ for (i = 0; i < NUM_DATA_ENTRIES; i++) {
+ HDassert(data_index[i] == i);
+ HDassert(!(data[i].dirty));
}
/* compose the done message */
@@ -5886,22 +5400,21 @@ smoke_check_2(int metadata_write_strategy)
mssg.src = world_mpi_rank;
mssg.dest = world_server_mpi_rank;
mssg.mssg_num = -1; /* set by send function */
- mssg.base_addr = 0; /* not used */
- mssg.len = 0; /* not used */
- mssg.ver = 0; /* not used */
- mssg.count = 0; /* not used */
+ mssg.base_addr = 0; /* not used */
+ mssg.len = 0; /* not used */
+ mssg.ver = 0; /* not used */
+ mssg.count = 0; /* not used */
mssg.magic = MSSG_MAGIC;
- if ( success ) {
+ if (success) {
success = send_mssg(&mssg, FALSE);
- if ( ! success ) {
+ if (!success) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
}
}
}
@@ -5909,26 +5422,25 @@ smoke_check_2(int metadata_write_strategy)
max_nerrors = get_max_nerrors();
- if ( world_mpi_rank == 0 ) {
-
- if ( max_nerrors == 0 ) {
+ if (world_mpi_rank == 0) {
- PASSED();
+ if (max_nerrors == 0) {
- } else {
+ PASSED();
+ }
+ else {
failures++;
H5_FAILED();
}
}
- success = ( ( success ) && ( max_nerrors == 0 ) );
+ success = ((success) && (max_nerrors == 0));
- return(success);
+ return (success);
} /* smoke_check_2() */
-
/*****************************************************************************
*
* Function: smoke_check_3()
@@ -5951,182 +5463,158 @@ smoke_check_2(int metadata_write_strategy)
static hbool_t
smoke_check_3(int metadata_write_strategy)
{
- hbool_t success = TRUE;
- int i;
- int max_nerrors;
- int min_count;
- int max_count;
- int min_idx;
- int max_idx;
- hid_t fid = -1;
- H5F_t * file_ptr = NULL;
- H5C_t * cache_ptr = NULL;
+ hbool_t success = TRUE;
+ int i;
+ int max_nerrors;
+ int min_count;
+ int max_count;
+ int min_idx;
+ int max_idx;
+ hid_t fid = -1;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
struct mssg_t mssg;
- switch ( metadata_write_strategy ) {
+ switch (metadata_write_strategy) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #3 -- process 0 only md write strategy");
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #3 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #3 -- distributed md write strategy");
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #3 -- distributed md write strategy");
}
- break;
+ break;
default:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #3 -- unknown md write strategy");
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #3 -- unknown md write strategy");
}
- break;
+ break;
}
nerrors = 0;
init_data();
reset_stats();
- if ( world_mpi_rank == world_server_mpi_rank ) {
+ if (world_mpi_rank == world_server_mpi_rank) {
- if ( ! server_main() ) {
+ if (!server_main()) {
/* some error occured in the server -- report failure */
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
}
}
}
else /* run the clients */
{
- if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr,
- metadata_write_strategy) ) {
+ if (!setup_cache_for_test(&fid, &file_ptr, &cache_ptr, metadata_write_strategy)) {
nerrors++;
- fid = -1;
+ fid = -1;
cache_ptr = NULL;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, FUNC);
}
}
min_count = 100 / ((file_mpi_rank + 1) * (file_mpi_rank + 1));
max_count = min_count + 50;
- for ( i = 0; i < (virt_num_data_entries / 4); i++ )
- {
+ for (i = 0; i < (virt_num_data_entries / 4); i++) {
insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
- if ( i > 100 ) {
+ if (i > 100) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
- min_count, max_count);
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i, min_count, max_count);
}
}
-
min_count = 100 / ((file_mpi_rank + 2) * (file_mpi_rank + 2));
max_count = min_count + 50;
- for ( i = (virt_num_data_entries / 4);
- i < (virt_num_data_entries / 2);
- i++ )
- {
+ for (i = (virt_num_data_entries / 4); i < (virt_num_data_entries / 2); i++) {
insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
- if ( i % 59 == 0 ) {
-
- hbool_t dirty = ( (i % 2) == 0);
-
- if ( data[i].local_pinned ) {
- unpin_entry(file_ptr, i, FALSE, FALSE, FALSE);
- }
+ if (i % 59 == 0) {
- pin_entry(file_ptr, i, TRUE, dirty);
+ hbool_t dirty = ((i % 2) == 0);
- HDassert( !dirty || data[i].header.is_dirty );
- HDassert( data[i].header.is_pinned );
- HDassert( data[i].global_pinned );
- HDassert( ! data[i].local_pinned );
- }
+ if (data[i].local_pinned) {
+ unpin_entry(file_ptr, i, FALSE, FALSE, FALSE);
+ }
- if ( i > 100 ) {
+ pin_entry(file_ptr, i, TRUE, dirty);
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
- min_count, max_count);
+ HDassert(!dirty || data[i].header.is_dirty);
+ HDassert(data[i].header.is_pinned);
+ HDassert(data[i].global_pinned);
+ HDassert(!data[i].local_pinned);
}
- local_pin_and_unpin_random_entries(file_ptr, 0,
- virt_num_data_entries / 4,
- 0, (file_mpi_rank + 2));
+ if (i > 100) {
- }
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i, min_count, max_count);
+ }
+ local_pin_and_unpin_random_entries(file_ptr, 0, virt_num_data_entries / 4, 0,
+ (file_mpi_rank + 2));
+ }
- /* flush the file to be sure that we have no problems flushing
- * pinned entries
- */
- if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
+ /* flush the file to be sure that we have no problems flushing
+ * pinned entries
+ */
+ if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, FUNC);
}
}
-
min_idx = 0;
- max_idx = ((virt_num_data_entries / 10) /
- ((file_mpi_rank + 1) * (file_mpi_rank + 1))) - 1;
- if ( max_idx <= min_idx ) {
+ max_idx = ((virt_num_data_entries / 10) / ((file_mpi_rank + 1) * (file_mpi_rank + 1))) - 1;
+ if (max_idx <= min_idx) {
max_idx = min_idx + 10;
}
- for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
- {
- if ( ( i >= (virt_num_data_entries / 4) ) && ( i % 59 == 0 ) ) {
+ for (i = (virt_num_data_entries / 2) - 1; i >= 0; i--) {
+ if ((i >= (virt_num_data_entries / 4)) && (i % 59 == 0)) {
- hbool_t via_unprotect = ( (((unsigned)i) & 0x02) == 0 );
- hbool_t dirty = ( (((unsigned)i) & 0x04) == 0 );
+ hbool_t via_unprotect = ((((unsigned)i) & 0x02) == 0);
+ hbool_t dirty = ((((unsigned)i) & 0x04) == 0);
- HDassert( data[i].global_pinned );
- HDassert( ! data[i].local_pinned );
+ HDassert(data[i].global_pinned);
+ HDassert(!data[i].local_pinned);
- unpin_entry(file_ptr, i, TRUE, dirty,
- via_unprotect);
- }
- if ( i % 2 == 0 ) {
+ unpin_entry(file_ptr, i, TRUE, dirty, via_unprotect);
+ }
+ if (i % 2 == 0) {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- local_pin_and_unpin_random_entries(file_ptr, 0,
- virt_num_data_entries / 2,
- 0, 2);
- lock_and_unlock_random_entries(file_ptr,
- min_idx, max_idx, 0, 100);
- }
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ local_pin_and_unpin_random_entries(file_ptr, 0, virt_num_data_entries / 2, 0, 2);
+ lock_and_unlock_random_entries(file_ptr, min_idx, max_idx, 0, 100);
+ }
}
min_idx = 0;
- max_idx = ((virt_num_data_entries / 10) /
- ((file_mpi_rank + 3) * (file_mpi_rank + 3))) - 1;
- if ( max_idx <= min_idx ) {
+ max_idx = ((virt_num_data_entries / 10) / ((file_mpi_rank + 3) * (file_mpi_rank + 3))) - 1;
+ if (max_idx <= min_idx) {
max_idx = min_idx + 10;
}
- for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
- {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- lock_and_unlock_random_entries(file_ptr,
- min_idx, max_idx, 0, 100);
+ for (i = 0; i < (virt_num_data_entries / 2); i += 2) {
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ lock_and_unlock_random_entries(file_ptr, min_idx, max_idx, 0, 100);
}
/* we can't move pinned entries, so release any local pins now. */
@@ -6136,25 +5624,19 @@ smoke_check_3(int metadata_write_strategy)
max_count = min_count + 100;
/* move the first half of the entries... */
- for ( i = 0; i < (virt_num_data_entries / 2); i++ )
- {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 20),
- min_count, max_count);
+ for (i = 0; i < (virt_num_data_entries / 2); i++) {
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0, (virt_num_data_entries / 20), min_count, max_count);
}
/* ...and then move them back. */
- for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
- {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 40),
- min_count, max_count);
+ for (i = (virt_num_data_entries / 2) - 1; i >= 0; i--) {
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0, (virt_num_data_entries / 40), min_count, max_count);
}
/* finally, do some dirty lock/unlocks while we give the cache
@@ -6163,33 +5645,28 @@ smoke_check_3(int metadata_write_strategy)
min_count = 200 / ((file_mpi_rank + 1) * (file_mpi_rank + 1));
max_count = min_count + 100;
- for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
- {
- local_pin_and_unpin_random_entries(file_ptr, 0,
- (virt_num_data_entries / 2),
- 0, 5);
+ for (i = 0; i < (virt_num_data_entries / 2); i += 2) {
+ local_pin_and_unpin_random_entries(file_ptr, 0, (virt_num_data_entries / 2), 0, 5);
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- if ( i > 100 ) {
+ if (i > 100) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
- min_count, max_count);
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i, min_count, max_count);
}
}
/* release any local pins before we take down the cache. */
local_unpin_all_entries(file_ptr, FALSE);
- if ( fid >= 0 ) {
+ if (fid >= 0) {
- if ( ! take_down_cache(fid, cache_ptr) ) {
+ if (!take_down_cache(fid, cache_ptr)) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, FUNC);
}
}
}
@@ -6198,10 +5675,9 @@ smoke_check_3(int metadata_write_strategy)
* and are clean.
*/
- for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
- {
- HDassert( data_index[i] == i );
- HDassert( ! (data[i].dirty) );
+ for (i = 0; i < NUM_DATA_ENTRIES; i++) {
+ HDassert(data_index[i] == i);
+ HDassert(!(data[i].dirty));
}
/* compose the done message */
@@ -6209,23 +5685,21 @@ smoke_check_3(int metadata_write_strategy)
mssg.src = world_mpi_rank;
mssg.dest = world_server_mpi_rank;
mssg.mssg_num = -1; /* set by send function */
- mssg.base_addr = 0; /* not used */
- mssg.len = 0; /* not used */
- mssg.ver = 0; /* not used */
- mssg.count = 0; /* not used */
+ mssg.base_addr = 0; /* not used */
+ mssg.len = 0; /* not used */
+ mssg.ver = 0; /* not used */
+ mssg.count = 0; /* not used */
mssg.magic = MSSG_MAGIC;
- if ( success ) {
-
+ if (success) {
success = send_mssg(&mssg, FALSE);
- if ( ! success ) {
+ if (!success) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
}
}
}
@@ -6233,26 +5707,25 @@ smoke_check_3(int metadata_write_strategy)
max_nerrors = get_max_nerrors();
- if ( world_mpi_rank == 0 ) {
+ if (world_mpi_rank == 0) {
- if ( max_nerrors == 0 ) {
+ if (max_nerrors == 0) {
- PASSED();
-
- } else {
+ PASSED();
+ }
+ else {
failures++;
H5_FAILED();
}
}
- success = ( ( success ) && ( max_nerrors == 0 ) );
+ success = ((success) && (max_nerrors == 0));
- return(success);
+ return (success);
} /* smoke_check_3() */
-
/*****************************************************************************
*
* Function: smoke_check_4()
@@ -6275,210 +5748,184 @@ smoke_check_3(int metadata_write_strategy)
static hbool_t
smoke_check_4(int metadata_write_strategy)
{
- hbool_t success = TRUE;
- int i;
- int max_nerrors;
- int min_count;
- int max_count;
- int min_idx;
- int max_idx;
- hid_t fid = -1;
- H5F_t * file_ptr = NULL;
- H5C_t * cache_ptr = NULL;
+ hbool_t success = TRUE;
+ int i;
+ int max_nerrors;
+ int min_count;
+ int max_count;
+ int min_idx;
+ int max_idx;
+ hid_t fid = -1;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
struct mssg_t mssg;
- switch ( metadata_write_strategy ) {
+ switch (metadata_write_strategy) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #4 -- process 0 only md write strategy");
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #4 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #4 -- distributed md write strategy");
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #4 -- distributed md write strategy");
}
- break;
+ break;
default:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #4 -- unknown md write strategy");
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #4 -- unknown md write strategy");
}
- break;
+ break;
}
nerrors = 0;
init_data();
reset_stats();
- if ( world_mpi_rank == world_server_mpi_rank ) {
+ if (world_mpi_rank == world_server_mpi_rank) {
- if ( ! server_main() ) {
+ if (!server_main()) {
/* some error occured in the server -- report failure */
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
}
}
}
else /* run the clients */
{
- if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr,
- metadata_write_strategy) ) {
+ if (!setup_cache_for_test(&fid, &file_ptr, &cache_ptr, metadata_write_strategy)) {
nerrors++;
- fid = -1;
+ fid = -1;
cache_ptr = NULL;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, FUNC);
}
}
-
min_count = 100 * (file_mpi_rank % 4);
max_count = min_count + 50;
- for ( i = 0; i < (virt_num_data_entries / 4); i++ )
- {
+ for (i = 0; i < (virt_num_data_entries / 4); i++) {
insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
- if ( i > 100 ) {
+ if (i > 100) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
- min_count, max_count);
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i, min_count, max_count);
}
}
min_count = 10 * (file_mpi_rank % 4);
max_count = min_count + 100;
- for ( i = (virt_num_data_entries / 4);
- i < (virt_num_data_entries / 2);
- i++ )
- {
- if ( i % 2 == 0 ) {
+ for (i = (virt_num_data_entries / 4); i < (virt_num_data_entries / 2); i++) {
+ if (i % 2 == 0) {
insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
+ }
+ else {
- } else {
-
- /* Insert some entries pinned, and then unpin them
- * immediately. We have tested pinned entries elsewhere,
- * so it should be sufficient to verify that the
- * entries are in fact pinned (which unpin_entry() should do).
- */
+ /* Insert some entries pinned, and then unpin them
+ * immediately. We have tested pinned entries elsewhere,
+ * so it should be sufficient to verify that the
+ * entries are in fact pinned (which unpin_entry() should do).
+ */
insert_entry(cache_ptr, file_ptr, i, H5C__PIN_ENTRY_FLAG);
unpin_entry(file_ptr, i, TRUE, FALSE, FALSE);
- }
+ }
- if ( i % 59 == 0 ) {
+ if (i % 59 == 0) {
- hbool_t dirty = ( (i % 2) == 0);
+ hbool_t dirty = ((i % 2) == 0);
- if ( data[i].local_pinned ) {
+ if (data[i].local_pinned) {
unpin_entry(file_ptr, i, FALSE, FALSE, FALSE);
}
pin_entry(file_ptr, i, TRUE, dirty);
- HDassert( !dirty || data[i].header.is_dirty );
- HDassert( data[i].header.is_pinned );
- HDassert( data[i].global_pinned );
- HDassert( ! data[i].local_pinned );
+ HDassert(!dirty || data[i].header.is_dirty);
+ HDassert(data[i].header.is_pinned);
+ HDassert(data[i].global_pinned);
+ HDassert(!data[i].local_pinned);
}
- if ( i > 100 ) {
+ if (i > 100) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
- min_count, max_count);
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i, min_count, max_count);
}
- local_pin_and_unpin_random_entries(file_ptr, 0,
- (virt_num_data_entries / 4),
- 0, (file_mpi_rank + 2));
+ local_pin_and_unpin_random_entries(file_ptr, 0, (virt_num_data_entries / 4), 0,
+ (file_mpi_rank + 2));
}
-
/* flush the file to be sure that we have no problems flushing
- * pinned entries
- */
- if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
+ * pinned entries
+ */
+ if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, FUNC);
}
}
-
min_idx = 0;
- max_idx = (((virt_num_data_entries / 10) / 4) *
- ((file_mpi_rank % 4) + 1)) - 1;
+ max_idx = (((virt_num_data_entries / 10) / 4) * ((file_mpi_rank % 4) + 1)) - 1;
- for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
- {
- if ( ( i >= (virt_num_data_entries / 4) ) && ( i % 59 == 0 ) ) {
+ for (i = (virt_num_data_entries / 2) - 1; i >= 0; i--) {
+ if ((i >= (virt_num_data_entries / 4)) && (i % 59 == 0)) {
- hbool_t via_unprotect = ( (((unsigned)i) & 0x02) == 0 );
- hbool_t dirty = ( (((unsigned)i) & 0x04) == 0 );
+ hbool_t via_unprotect = ((((unsigned)i) & 0x02) == 0);
+ hbool_t dirty = ((((unsigned)i) & 0x04) == 0);
- HDassert( data[i].global_pinned );
- HDassert( ! data[i].local_pinned );
+ HDassert(data[i].global_pinned);
+ HDassert(!data[i].local_pinned);
unpin_entry(file_ptr, i, TRUE, dirty, via_unprotect);
}
- if ( i % 2 == 0 ) {
+ if (i % 2 == 0) {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- lock_and_unlock_random_entries(file_ptr,
- min_idx, max_idx, 0, 100);
- }
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ lock_and_unlock_random_entries(file_ptr, min_idx, max_idx, 0, 100);
+ }
}
min_idx = 0;
- max_idx = (((virt_num_data_entries / 10) / 8) *
- ((file_mpi_rank % 4) + 1)) - 1;
+ max_idx = (((virt_num_data_entries / 10) / 8) * ((file_mpi_rank % 4) + 1)) - 1;
- for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
- {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- lock_and_unlock_random_entries(file_ptr,
- min_idx, max_idx, 0, 100);
+ for (i = 0; i < (virt_num_data_entries / 2); i += 2) {
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ lock_and_unlock_random_entries(file_ptr, min_idx, max_idx, 0, 100);
}
- /* we can't move pinned entries, so release any local pins now. */
- local_unpin_all_entries(file_ptr, FALSE);
+ /* we can't move pinned entries, so release any local pins now. */
+ local_unpin_all_entries(file_ptr, FALSE);
min_count = 10 * (file_mpi_rank % 4);
max_count = min_count + 100;
/* move the first half of the entries... */
- for ( i = 0; i < (virt_num_data_entries / 2); i++ )
- {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 20),
- min_count, max_count);
+ for (i = 0; i < (virt_num_data_entries / 2); i++) {
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0, (virt_num_data_entries / 20), min_count, max_count);
}
/* ...and then move them back. */
- for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
- {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 40),
- min_count, max_count);
+ for (i = (virt_num_data_entries / 2) - 1; i >= 0; i--) {
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0, (virt_num_data_entries / 40), min_count, max_count);
}
/* finally, do some dirty lock/unlocks while we give the cache
@@ -6487,26 +5934,23 @@ smoke_check_4(int metadata_write_strategy)
min_count = 100 * (file_mpi_rank % 4);
max_count = min_count + 100;
- for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
- {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ for (i = 0; i < (virt_num_data_entries / 2); i += 2) {
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- if ( i > 100 ) {
+ if (i > 100) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
- min_count, max_count);
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i, min_count, max_count);
}
}
- if ( fid >= 0 ) {
+ if (fid >= 0) {
- if ( ! take_down_cache(fid, cache_ptr) ) {
+ if (!take_down_cache(fid, cache_ptr)) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, FUNC);
}
}
}
@@ -6515,10 +5959,9 @@ smoke_check_4(int metadata_write_strategy)
* and are clean.
*/
- for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
- {
- HDassert( data_index[i] == i );
- HDassert( ! (data[i].dirty) );
+ for (i = 0; i < NUM_DATA_ENTRIES; i++) {
+ HDassert(data_index[i] == i);
+ HDassert(!(data[i].dirty));
}
/* compose the done message */
@@ -6526,23 +5969,21 @@ smoke_check_4(int metadata_write_strategy)
mssg.src = world_mpi_rank;
mssg.dest = world_server_mpi_rank;
mssg.mssg_num = -1; /* set by send function */
- mssg.base_addr = 0; /* not used */
- mssg.len = 0; /* not used */
- mssg.ver = 0; /* not used */
- mssg.count = 0; /* not used */
+ mssg.base_addr = 0; /* not used */
+ mssg.len = 0; /* not used */
+ mssg.ver = 0; /* not used */
+ mssg.count = 0; /* not used */
mssg.magic = MSSG_MAGIC;
- if ( success ) {
-
+ if (success) {
success = send_mssg(&mssg, FALSE);
- if ( ! success ) {
+ if (!success) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
}
}
}
@@ -6550,26 +5991,25 @@ smoke_check_4(int metadata_write_strategy)
max_nerrors = get_max_nerrors();
- if ( world_mpi_rank == 0 ) {
+ if (world_mpi_rank == 0) {
- if ( max_nerrors == 0 ) {
+ if (max_nerrors == 0) {
- PASSED();
-
- } else {
+ PASSED();
+ }
+ else {
failures++;
H5_FAILED();
}
}
- success = ( ( success ) && ( max_nerrors == 0 ) );
+ success = ((success) && (max_nerrors == 0));
- return(success);
+ return (success);
} /* smoke_check_4() */
-
/*****************************************************************************
*
* Function: smoke_check_5()
@@ -6587,136 +6027,122 @@ smoke_check_4(int metadata_write_strategy)
static hbool_t
smoke_check_5(int metadata_write_strategy)
{
- hbool_t success = TRUE;
- int i;
- int max_nerrors;
- hid_t fid = -1;
- H5F_t * file_ptr = NULL;
- H5C_t * cache_ptr = NULL;
+ hbool_t success = TRUE;
+ int i;
+ int max_nerrors;
+ hid_t fid = -1;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
struct mssg_t mssg;
- switch ( metadata_write_strategy ) {
+ switch (metadata_write_strategy) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #5 -- process 0 only md write strategy");
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #5 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #5 -- distributed md write strategy");
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #5 -- distributed md write strategy");
}
- break;
+ break;
default:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #5 -- unknown md write strategy");
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #5 -- unknown md write strategy");
}
- break;
+ break;
}
-
nerrors = 0;
init_data();
reset_stats();
- if ( world_mpi_rank == world_server_mpi_rank ) {
+ if (world_mpi_rank == world_server_mpi_rank) {
- if ( ! server_main() ) {
+ if (!server_main()) {
/* some error occured in the server -- report failure */
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
}
}
}
else /* run the clients */
{
- if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr,
- metadata_write_strategy) ) {
+ if (!setup_cache_for_test(&fid, &file_ptr, &cache_ptr, metadata_write_strategy)) {
nerrors++;
- fid = -1;
+ fid = -1;
cache_ptr = NULL;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, FUNC);
}
}
- for ( i = 0; i < (virt_num_data_entries / 2); i++ )
- {
+ for (i = 0; i < (virt_num_data_entries / 2); i++) {
insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
}
- /* flush the file so we can lock known clean entries. */
- if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
+ /* flush the file so we can lock known clean entries. */
+ if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, FUNC);
}
}
- for ( i = 0; i < (virt_num_data_entries / 4); i++ )
- {
- lock_entry(file_ptr, i);
+ for (i = 0; i < (virt_num_data_entries / 4); i++) {
+ lock_entry(file_ptr, i);
- if ( i % 2 == 0 )
- {
- mark_entry_dirty(i);
- }
+ if (i % 2 == 0) {
+ mark_entry_dirty(i);
+ }
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- if ( i % 2 == 1 )
- {
- if ( i % 4 == 1 ) {
+ if (i % 2 == 1) {
+ if (i % 4 == 1) {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- }
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ }
- expunge_entry(file_ptr, i);
- }
+ expunge_entry(file_ptr, i);
+ }
}
- for ( i = (virt_num_data_entries / 2) - 1;
- i >= (virt_num_data_entries / 4);
- i-- )
- {
- pin_entry(file_ptr, i, TRUE, FALSE);
+ for (i = (virt_num_data_entries / 2) - 1; i >= (virt_num_data_entries / 4); i--) {
+ pin_entry(file_ptr, i, TRUE, FALSE);
- if ( i % 2 == 0 )
- {
- if ( i % 8 <= 4 ) {
+ if (i % 2 == 0) {
+ if (i % 8 <= 4) {
- resize_entry(i, data[i].len / 2);
- }
+ resize_entry(i, data[i].len / 2);
+ }
mark_entry_dirty(i);
- if ( i % 8 <= 4 ) {
+ if (i % 8 <= 4) {
- resize_entry(i, data[i].len);
- }
- }
+ resize_entry(i, data[i].len);
+ }
+ }
- unpin_entry(file_ptr, i, TRUE, FALSE, FALSE);
+ unpin_entry(file_ptr, i, TRUE, FALSE, FALSE);
}
- if ( fid >= 0 ) {
+ if (fid >= 0) {
- if ( ! take_down_cache(fid, cache_ptr) ) {
+ if (!take_down_cache(fid, cache_ptr)) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, FUNC);
}
}
}
@@ -6725,10 +6151,9 @@ smoke_check_5(int metadata_write_strategy)
* and are clean.
*/
- for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
- {
- HDassert( data_index[i] == i );
- HDassert( ! (data[i].dirty) );
+ for (i = 0; i < NUM_DATA_ENTRIES; i++) {
+ HDassert(data_index[i] == i);
+ HDassert(!(data[i].dirty));
}
/* compose the done message */
@@ -6736,22 +6161,21 @@ smoke_check_5(int metadata_write_strategy)
mssg.src = world_mpi_rank;
mssg.dest = world_server_mpi_rank;
mssg.mssg_num = -1; /* set by send function */
- mssg.base_addr = 0; /* not used */
- mssg.len = 0; /* not used */
- mssg.ver = 0; /* not used */
- mssg.count = 0; /* not used */
+ mssg.base_addr = 0; /* not used */
+ mssg.len = 0; /* not used */
+ mssg.ver = 0; /* not used */
+ mssg.count = 0; /* not used */
mssg.magic = MSSG_MAGIC;
- if ( success ) {
+ if (success) {
success = send_mssg(&mssg, FALSE);
- if ( ! success ) {
+ if (!success) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
}
}
}
@@ -6759,26 +6183,25 @@ smoke_check_5(int metadata_write_strategy)
max_nerrors = get_max_nerrors();
- if ( world_mpi_rank == 0 ) {
-
- if ( max_nerrors == 0 ) {
+ if (world_mpi_rank == 0) {
- PASSED();
+ if (max_nerrors == 0) {
- } else {
+ PASSED();
+ }
+ else {
failures++;
H5_FAILED();
}
}
- success = ( ( success ) && ( max_nerrors == 0 ) );
+ success = ((success) && (max_nerrors == 0));
- return(success);
+ return (success);
} /* smoke_check_5() */
-
/*****************************************************************************
*
* Function: trace_file_check()
@@ -6821,87 +6244,80 @@ trace_file_check(int metadata_write_strategy)
{
hbool_t success = TRUE;
- const char *((* expected_output)[]) = NULL;
- const char * expected_output_0[] =
- {
- "### HDF5 metadata cache trace file version 1 ###\n",
- "H5AC_set_cache_auto_resize_config",
- "H5AC_insert_entry",
- "H5AC_insert_entry",
- "H5AC_insert_entry",
- "H5AC_insert_entry",
- "H5AC_protect",
- "H5AC_mark_entry_dirty",
- "H5AC_unprotect",
- "H5AC_protect",
- "H5AC_pin_protected_entry",
- "H5AC_unprotect",
- "H5AC_unpin_entry",
- "H5AC_expunge_entry",
- "H5AC_protect",
- "H5AC_pin_protected_entry",
- "H5AC_unprotect",
- "H5AC_mark_entry_dirty",
- "H5AC_resize_entry",
- "H5AC_resize_entry",
- "H5AC_unpin_entry",
- "H5AC_move_entry",
- "H5AC_move_entry",
- "H5AC_flush",
- "H5AC_flush",
- NULL
- };
- const char * expected_output_1[] =
- {
- "### HDF5 metadata cache trace file version 1 ###\n",
- "H5AC_set_cache_auto_resize_config",
- "H5AC_insert_entry",
- "H5AC_insert_entry",
- "H5AC_insert_entry",
- "H5AC_insert_entry",
- "H5AC_protect",
- "H5AC_mark_entry_dirty",
- "H5AC_unprotect",
- "H5AC_protect",
- "H5AC_pin_protected_entry",
- "H5AC_unprotect",
- "H5AC_unpin_entry",
- "H5AC_expunge_entry",
- "H5AC_protect",
- "H5AC_pin_protected_entry",
- "H5AC_unprotect",
- "H5AC_mark_entry_dirty",
- "H5AC_resize_entry",
- "H5AC_resize_entry",
- "H5AC_unpin_entry",
- "H5AC_move_entry",
- "H5AC_move_entry",
- "H5AC_flush",
- "H5AC_flush",
- NULL
- };
- char buffer[256];
- char trace_file_name[64];
- hbool_t done = FALSE;
- int i;
- int max_nerrors;
- size_t expected_line_len;
- size_t actual_line_len;
- hid_t fid = -1;
- H5F_t * file_ptr = NULL;
- H5C_t * cache_ptr = NULL;
- FILE * trace_file_ptr = NULL;
+ const char *((*expected_output)[]) = NULL;
+ const char * expected_output_0[] = {"### HDF5 metadata cache trace file version 1 ###\n",
+ "H5AC_set_cache_auto_resize_config",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_protect",
+ "H5AC_mark_entry_dirty",
+ "H5AC_unprotect",
+ "H5AC_protect",
+ "H5AC_pin_protected_entry",
+ "H5AC_unprotect",
+ "H5AC_unpin_entry",
+ "H5AC_expunge_entry",
+ "H5AC_protect",
+ "H5AC_pin_protected_entry",
+ "H5AC_unprotect",
+ "H5AC_mark_entry_dirty",
+ "H5AC_resize_entry",
+ "H5AC_resize_entry",
+ "H5AC_unpin_entry",
+ "H5AC_move_entry",
+ "H5AC_move_entry",
+ "H5AC_flush",
+ "H5AC_flush",
+ NULL};
+ const char * expected_output_1[] = {"### HDF5 metadata cache trace file version 1 ###\n",
+ "H5AC_set_cache_auto_resize_config",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_protect",
+ "H5AC_mark_entry_dirty",
+ "H5AC_unprotect",
+ "H5AC_protect",
+ "H5AC_pin_protected_entry",
+ "H5AC_unprotect",
+ "H5AC_unpin_entry",
+ "H5AC_expunge_entry",
+ "H5AC_protect",
+ "H5AC_pin_protected_entry",
+ "H5AC_unprotect",
+ "H5AC_mark_entry_dirty",
+ "H5AC_resize_entry",
+ "H5AC_resize_entry",
+ "H5AC_unpin_entry",
+ "H5AC_move_entry",
+ "H5AC_move_entry",
+ "H5AC_flush",
+ "H5AC_flush",
+ NULL};
+ char buffer[256];
+ char trace_file_name[64];
+ hbool_t done = FALSE;
+ int i;
+ int max_nerrors;
+ size_t expected_line_len;
+ size_t actual_line_len;
+ hid_t fid = -1;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
+ FILE * trace_file_ptr = NULL;
H5AC_cache_config_t config;
- struct mssg_t mssg;
-
+ struct mssg_t mssg;
- switch(metadata_write_strategy) {
+ switch (metadata_write_strategy) {
case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
expected_output = &expected_output_0;
- if(world_mpi_rank == 0)
+ if (world_mpi_rank == 0)
TESTING("trace file collection -- process 0 only md write strategy");
break;
@@ -6909,7 +6325,7 @@ trace_file_check(int metadata_write_strategy)
expected_output = &expected_output_1;
- if(world_mpi_rank == 0)
+ if (world_mpi_rank == 0)
TESTING("trace file collection -- distributed md write strategy");
break;
@@ -6920,53 +6336,54 @@ trace_file_check(int metadata_write_strategy)
*/
expected_output = &expected_output_0;
- if(world_mpi_rank == 0)
+ if (world_mpi_rank == 0)
TESTING("trace file collection -- unknown md write strategy");
break;
} /* end switch */
-
nerrors = 0;
init_data();
reset_stats();
- if(world_mpi_rank == world_server_mpi_rank) {
+ if (world_mpi_rank == world_server_mpi_rank) {
- if(!server_main()) {
+ if (!server_main()) {
/* some error occured in the server -- report failure */
nerrors++;
- if ( verbose )
+ if (verbose)
HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
}
}
else {
/* run the clients */
- if(!setup_cache_for_test(&fid, &file_ptr, &cache_ptr, metadata_write_strategy) ) {
+ if (!setup_cache_for_test(&fid, &file_ptr, &cache_ptr, metadata_write_strategy)) {
nerrors++;
- fid = -1;
+ fid = -1;
cache_ptr = NULL;
- if(verbose)
+ if (verbose)
HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, FUNC);
}
- if(nerrors == 0) {
+ if (nerrors == 0) {
config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
- if(H5AC_get_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
+ if (H5AC_get_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
nerrors++;
- HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", world_mpi_rank,
+ FUNC);
}
else {
config.open_trace_file = TRUE;
strcpy(config.trace_file_name, "t_cache_trace.txt");
- if(H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
+ if (H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
nerrors++;
- HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", world_mpi_rank,
+ FUNC);
}
}
} /* end if */
@@ -6998,36 +6415,38 @@ trace_file_check(int metadata_write_strategy)
move_entry(file_ptr, 0, 20);
move_entry(file_ptr, 0, 20);
- if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
+ if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
nerrors++;
- if(verbose)
+ if (verbose)
HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, FUNC);
}
- if(nerrors == 0) {
+ if (nerrors == 0) {
config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
- if(H5AC_get_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
+ if (H5AC_get_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
nerrors++;
- HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", world_mpi_rank,
+ FUNC);
}
else {
- config.open_trace_file = FALSE;
- config.close_trace_file = TRUE;
+ config.open_trace_file = FALSE;
+ config.close_trace_file = TRUE;
config.trace_file_name[0] = '\0';
- if(H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
+ if (H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
nerrors++;
- HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", world_mpi_rank,
+ FUNC);
}
}
} /* end if */
- if(fid >= 0) {
- if(!take_down_cache(fid, cache_ptr)) {
+ if (fid >= 0) {
+ if (!take_down_cache(fid, cache_ptr)) {
nerrors++;
- if(verbose)
+ if (verbose)
HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, FUNC);
}
} /* end if */
@@ -7036,7 +6455,7 @@ trace_file_check(int metadata_write_strategy)
* and are clean.
*/
- for(i = 0; i < NUM_DATA_ENTRIES; i++) {
+ for (i = 0; i < NUM_DATA_ENTRIES; i++) {
HDassert(data_index[i] == i);
HDassert(!(data[i].dirty));
}
@@ -7046,80 +6465,88 @@ trace_file_check(int metadata_write_strategy)
mssg.src = world_mpi_rank;
mssg.dest = world_server_mpi_rank;
mssg.mssg_num = -1; /* set by send function */
- mssg.base_addr = 0; /* not used */
- mssg.len = 0; /* not used */
- mssg.ver = 0; /* not used */
- mssg.count = 0; /* not used */
+ mssg.base_addr = 0; /* not used */
+ mssg.len = 0; /* not used */
+ mssg.ver = 0; /* not used */
+ mssg.count = 0; /* not used */
mssg.magic = MSSG_MAGIC;
- if(success) {
+ if (success) {
success = send_mssg(&mssg, FALSE);
- if(!success) {
+ if (!success) {
nerrors++;
- if(verbose)
+ if (verbose)
HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
}
} /* end if */
- if(nerrors == 0) {
+ if (nerrors == 0) {
HDsprintf(trace_file_name, "t_cache_trace.txt.%d", (int)file_mpi_rank);
- if((trace_file_ptr = HDfopen(trace_file_name, "r")) == NULL ) {
+ if ((trace_file_ptr = HDfopen(trace_file_name, "r")) == NULL) {
nerrors++;
- if(verbose)
+ if (verbose)
HDfprintf(stdout, "%d:%s: HDfopen failed.\n", world_mpi_rank, FUNC);
}
} /* end if */
-
i = 0;
- while((nerrors == 0) && (!done)) {
+ while ((nerrors == 0) && (!done)) {
/* Get lines of actual and expected data */
- if((*expected_output)[i] == NULL)
+ if ((*expected_output)[i] == NULL)
expected_line_len = (size_t)0;
else
expected_line_len = HDstrlen((*expected_output)[i]);
- if(HDfgets(buffer, 255, trace_file_ptr) != NULL)
+ if (HDfgets(buffer, 255, trace_file_ptr) != NULL)
actual_line_len = HDstrlen(buffer);
else
actual_line_len = (size_t)0;
/* Compare the lines */
/* Handle running out of data */
- if((actual_line_len == 0) || (expected_line_len == 0)) {
- if((actual_line_len == 0) && (expected_line_len == 0)) {
+ if ((actual_line_len == 0) || (expected_line_len == 0)) {
+ if ((actual_line_len == 0) && (expected_line_len == 0)) {
/* Both ran out at the same time - we're done */
done = TRUE;
}
else {
/* One ran out before the other - BADNESS */
nerrors++;
- if(verbose) {
- HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank, FUNC, i);
- if(expected_line_len == 0) {
- HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, FUNC, "<EMPTY>", expected_line_len);
- HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, FUNC, buffer, actual_line_len);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank,
+ FUNC, i);
+ if (expected_line_len == 0) {
+ HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, FUNC,
+ "<EMPTY>", expected_line_len);
+ HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, FUNC, buffer,
+ actual_line_len);
}
- if(actual_line_len == 0) {
- HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, FUNC, (*expected_output)[i], expected_line_len);
- HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, FUNC, "<EMPTY>", actual_line_len);
+ if (actual_line_len == 0) {
+ HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, FUNC,
+ (*expected_output)[i], expected_line_len);
+ HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, FUNC,
+ "<EMPTY>", actual_line_len);
}
}
HDfprintf(stdout, "BADNESS BADNESS BADNESS\n");
}
}
/* We directly compare the header line (line 0) */
- else if(0 == i) {
- if((actual_line_len != expected_line_len) || (HDstrcmp(buffer, (*expected_output)[i]) != 0 )) {
+ else if (0 == i) {
+ if ((actual_line_len != expected_line_len) ||
+ (HDstrcmp(buffer, (*expected_output)[i]) != 0)) {
nerrors++;
- if(verbose) {
- HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank, FUNC, i);
- HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, FUNC, (*expected_output)[i], expected_line_len);
- HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, FUNC, buffer, actual_line_len);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank,
+ FUNC, i);
+ HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, FUNC,
+ (*expected_output)[i], expected_line_len);
+ HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, FUNC, buffer,
+ actual_line_len);
}
}
}
@@ -7127,16 +6554,18 @@ trace_file_check(int metadata_write_strategy)
* keeps the test from being too fragile.
*/
else {
- char *tok = NULL; /* token for actual line */
+ char *tok = NULL; /* token for actual line */
tok = HDstrtok(buffer, " ");
- if(HDstrcmp(tok, (*expected_output)[i]) != 0 ) {
+ if (HDstrcmp(tok, (*expected_output)[i]) != 0) {
nerrors++;
- if(verbose) {
- HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank, FUNC, i);
- HDfprintf(stdout, "%d:%s: expected = \"%s\"\n", world_mpi_rank, FUNC, (*expected_output)[i]);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank,
+ FUNC, i);
+ HDfprintf(stdout, "%d:%s: expected = \"%s\"\n", world_mpi_rank, FUNC,
+ (*expected_output)[i]);
HDfprintf(stdout, "%d:%s: actual = \"%s\"\n", world_mpi_rank, FUNC, tok);
}
}
@@ -7146,7 +6575,7 @@ trace_file_check(int metadata_write_strategy)
} /* end while */
/* Clean up the trace file */
- if(trace_file_ptr != NULL) {
+ if (trace_file_ptr != NULL) {
HDfclose(trace_file_ptr);
trace_file_ptr = NULL;
HDremove(trace_file_name);
@@ -7155,9 +6584,9 @@ trace_file_check(int metadata_write_strategy)
max_nerrors = get_max_nerrors();
- if(world_mpi_rank == 0) {
+ if (world_mpi_rank == 0) {
- if(max_nerrors == 0) {
+ if (max_nerrors == 0) {
PASSED();
}
else {
@@ -7168,11 +6597,10 @@ trace_file_check(int metadata_write_strategy)
success = ((success) && (max_nerrors == 0));
- return(success);
+ return (success);
} /* trace_file_check() */
-
/*****************************************************************************
*
* Function: smoke_check_6()
@@ -7189,48 +6617,47 @@ trace_file_check(int metadata_write_strategy)
static hbool_t
smoke_check_6(int metadata_write_strategy)
{
- hbool_t success = TRUE;
- int i;
- int max_nerrors;
- hid_t fid = -1;
- H5F_t * file_ptr = NULL;
- H5C_t * cache_ptr = NULL;
+ hbool_t success = TRUE;
+ int i;
+ int max_nerrors;
+ hid_t fid = -1;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
struct mssg_t mssg;
- switch ( metadata_write_strategy ) {
+ switch (metadata_write_strategy) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #6 -- process 0 only md write strategy");
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #6 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #6 -- distributed md write strategy");
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #6 -- distributed md write strategy");
}
- break;
+ break;
default:
- if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #6 -- unknown md write strategy");
+ if (world_mpi_rank == 0) {
+ TESTING("smoke check #6 -- unknown md write strategy");
}
- break;
+ break;
}
nerrors = 0;
init_data();
reset_stats();
- if ( world_mpi_rank == world_server_mpi_rank ) {
+ if (world_mpi_rank == world_server_mpi_rank) {
- if ( ! server_main() ) {
+ if (!server_main()) {
/* some error occured in the server -- report failure */
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
}
}
}
@@ -7238,128 +6665,119 @@ smoke_check_6(int metadata_write_strategy)
{
int temp;
- if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr,
- metadata_write_strategy) ) {
+ if (!setup_cache_for_test(&fid, &file_ptr, &cache_ptr, metadata_write_strategy)) {
nerrors++;
- fid = -1;
+ fid = -1;
cache_ptr = NULL;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, FUNC);
}
}
- temp = virt_num_data_entries;
+ temp = virt_num_data_entries;
virt_num_data_entries = NUM_DATA_ENTRIES;
/* insert the first half collectively */
H5CX_set_coll_metadata_read(TRUE);
- for ( i = 0; i < virt_num_data_entries/2; i++ )
- {
- struct datum * entry_ptr;
+ for (i = 0; i < virt_num_data_entries / 2; i++) {
+ struct datum *entry_ptr;
entry_ptr = &(data[i]);
insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
- if(TRUE != entry_ptr->header.coll_access) {
+ if (TRUE != entry_ptr->header.coll_access) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Entry inserted not marked as collective.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Entry inserted not marked as collective.\n", world_mpi_rank,
+ FUNC);
}
}
/* Make sure coll entries do not cross the 80% threshold */
H5_CHECK_OVERFLOW(cache_ptr->max_cache_size, size_t, double);
- HDassert((double)cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
+ HDassert((double)cache_ptr->max_cache_size * 0.8 > cache_ptr->coll_list_size);
}
/* insert the other half independently */
H5CX_set_coll_metadata_read(FALSE);
- for ( i = virt_num_data_entries/2; i < virt_num_data_entries; i++ )
- {
- struct datum * entry_ptr;
+ for (i = virt_num_data_entries / 2; i < virt_num_data_entries; i++) {
+ struct datum *entry_ptr;
entry_ptr = &(data[i]);
insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
- if(FALSE != entry_ptr->header.coll_access) {
+ if (FALSE != entry_ptr->header.coll_access) {
nerrors++;
- if ( verbose ) {
+ if (verbose) {
HDfprintf(stdout, "%d:%s: Entry inserted indepedently marked as collective.\n",
world_mpi_rank, FUNC);
}
}
/* Make sure coll entries do not cross the 80% threshold */
- HDassert((double)cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
+ HDassert((double)cache_ptr->max_cache_size * 0.8 > cache_ptr->coll_list_size);
}
/* flush the file */
- if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
+ if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, FUNC);
}
}
/* Protect the first half of the entries collectively */
H5CX_set_coll_metadata_read(TRUE);
- for ( i = 0; i < (virt_num_data_entries / 2); i++ )
- {
- struct datum * entry_ptr;
+ for (i = 0; i < (virt_num_data_entries / 2); i++) {
+ struct datum *entry_ptr;
entry_ptr = &(data[i]);
- lock_entry(file_ptr, i);
+ lock_entry(file_ptr, i);
- if(TRUE != entry_ptr->header.coll_access) {
+ if (TRUE != entry_ptr->header.coll_access) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Entry protected not marked as collective.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: Entry protected not marked as collective.\n", world_mpi_rank,
+ FUNC);
}
}
/* Make sure coll entries do not cross the 80% threshold */
- HDassert((double)cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
+ HDassert((double)cache_ptr->max_cache_size * 0.8 > cache_ptr->coll_list_size);
}
/* protect the other half independently */
H5CX_set_coll_metadata_read(FALSE);
- for ( i = virt_num_data_entries/2; i < virt_num_data_entries; i++ )
- {
- struct datum * entry_ptr;
+ for (i = virt_num_data_entries / 2; i < virt_num_data_entries; i++) {
+ struct datum *entry_ptr;
entry_ptr = &(data[i]);
lock_entry(file_ptr, i);
- if(FALSE != entry_ptr->header.coll_access) {
+ if (FALSE != entry_ptr->header.coll_access) {
nerrors++;
- if ( verbose ) {
+ if (verbose) {
HDfprintf(stdout, "%d:%s: Entry inserted indepedently marked as collective.\n",
world_mpi_rank, FUNC);
}
}
/* Make sure coll entries do not cross the 80% threshold */
- HDassert((double)cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
+ HDassert((double)cache_ptr->max_cache_size * 0.8 > cache_ptr->coll_list_size);
}
- for ( i = 0; i < (virt_num_data_entries); i++ )
- {
+ for (i = 0; i < (virt_num_data_entries); i++) {
unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
}
- if ( fid >= 0 ) {
+ if (fid >= 0) {
- if ( ! take_down_cache(fid, cache_ptr) ) {
+ if (!take_down_cache(fid, cache_ptr)) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, FUNC);
}
}
}
@@ -7368,10 +6786,9 @@ smoke_check_6(int metadata_write_strategy)
* and are clean.
*/
- for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
- {
- HDassert( data_index[i] == i );
- HDassert( ! (data[i].dirty) );
+ for (i = 0; i < NUM_DATA_ENTRIES; i++) {
+ HDassert(data_index[i] == i);
+ HDassert(!(data[i].dirty));
}
/* compose the done message */
@@ -7379,23 +6796,21 @@ smoke_check_6(int metadata_write_strategy)
mssg.src = world_mpi_rank;
mssg.dest = world_server_mpi_rank;
mssg.mssg_num = -1; /* set by send function */
- mssg.base_addr = 0; /* not used */
- mssg.len = 0; /* not used */
- mssg.ver = 0; /* not used */
- mssg.count = 0; /* not used */
+ mssg.base_addr = 0; /* not used */
+ mssg.len = 0; /* not used */
+ mssg.ver = 0; /* not used */
+ mssg.count = 0; /* not used */
mssg.magic = MSSG_MAGIC;
- if ( success ) {
-
+ if (success) {
success = send_mssg(&mssg, FALSE);
- if ( ! success ) {
+ if (!success) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
}
}
}
@@ -7404,26 +6819,25 @@ smoke_check_6(int metadata_write_strategy)
max_nerrors = get_max_nerrors();
- if ( world_mpi_rank == 0 ) {
-
- if ( max_nerrors == 0 ) {
+ if (world_mpi_rank == 0) {
- PASSED();
+ if (max_nerrors == 0) {
- } else {
+ PASSED();
+ }
+ else {
failures++;
H5_FAILED();
}
}
- success = ( ( success ) && ( max_nerrors == 0 ) );
+ success = ((success) && (max_nerrors == 0));
- return(success);
+ return (success);
} /* smoke_check_6() */
-
/*****************************************************************************
*
* Function: main()
@@ -7440,62 +6854,62 @@ smoke_check_6(int metadata_write_strategy)
int
main(int argc, char **argv)
{
- int express_test;
+ int express_test;
unsigned u;
- int mpi_size;
- int mpi_rank;
- int max_nerrors;
+ int mpi_size;
+ int mpi_rank;
+ int max_nerrors;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- world_mpi_size = mpi_size;
- world_mpi_rank = mpi_rank;
+ world_mpi_size = mpi_size;
+ world_mpi_rank = mpi_rank;
world_server_mpi_rank = mpi_size - 1;
- world_mpi_comm = MPI_COMM_WORLD;
+ world_mpi_comm = MPI_COMM_WORLD;
/* Attempt to turn off atexit post processing so that in case errors
* happen during the test and the process is aborted, it will not get
* hang in the atexit post processing in which it may try to make MPI
* calls. By then, MPI calls may not work.
*/
- if (H5dont_atexit() < 0){
- HDprintf("%d:Failed to turn off atexit processing. Continue.\n",
- mpi_rank);
+ if (H5dont_atexit() < 0) {
+ HDprintf("%d:Failed to turn off atexit processing. Continue.\n", mpi_rank);
};
H5open();
express_test = do_express_test();
-#if 0 /* JRM */
+#if 0 /* JRM */
express_test = 0;
#endif /* JRM */
- if ( express_test ) {
-
- virt_num_data_entries = EXPRESS_VIRT_NUM_DATA_ENTRIES;
+ if (express_test) {
- } else {
+ virt_num_data_entries = EXPRESS_VIRT_NUM_DATA_ENTRIES;
+ }
+ else {
- virt_num_data_entries = STD_VIRT_NUM_DATA_ENTRIES;
+ virt_num_data_entries = STD_VIRT_NUM_DATA_ENTRIES;
}
#ifdef H5_HAVE_MPE
- if ( MAINPROCESS ) { HDprintf(" Tests compiled for MPE.\n"); }
+ if (MAINPROCESS) {
+ HDprintf(" Tests compiled for MPE.\n");
+ }
virt_num_data_entries = MPE_VIRT_NUM_DATA_ENTIES;
#endif /* H5_HAVE_MPE */
-
- if (MAINPROCESS){
- HDprintf("===================================\n");
- HDprintf("Parallel metadata cache tests\n");
- HDprintf(" mpi_size = %d\n", mpi_size);
- HDprintf(" express_test = %d\n", express_test);
- HDprintf("===================================\n");
+ if (MAINPROCESS) {
+ HDprintf("===================================\n");
+ HDprintf("Parallel metadata cache tests\n");
+ HDprintf(" mpi_size = %d\n", mpi_size);
+ HDprintf(" express_test = %d\n", express_test);
+ HDprintf("===================================\n");
}
- if ( mpi_size < 3 ) {
+ if (mpi_size < 3) {
- if ( MAINPROCESS ) {
+ if (MAINPROCESS) {
HDprintf(" Need at least 3 processes. Exiting.\n");
}
@@ -7514,65 +6928,57 @@ main(int argc, char **argv)
*/
/* setup file access property list with the world communicator */
- if ( FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS)) ) {
+ if (FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS))) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Pcreate() failed 1.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5Pcreate() failed 1.\n", world_mpi_rank, FUNC);
}
}
- if ( H5Pset_fapl_mpio(fapl, world_mpi_comm, MPI_INFO_NULL) < 0 ) {
+ if (H5Pset_fapl_mpio(fapl, world_mpi_comm, MPI_INFO_NULL) < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 1.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 1.\n", world_mpi_rank, FUNC);
}
}
/* fix the file names */
- for ( u = 0; u < sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; ++u )
- {
- if ( h5_fixname(FILENAME[u], fapl, filenames[u],
- sizeof(filenames[u])) == NULL ) {
+ for (u = 0; u < sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; ++u) {
+ if (h5_fixname(FILENAME[u], fapl, filenames[u], sizeof(filenames[u])) == NULL) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: h5_fixname() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: h5_fixname() failed.\n", world_mpi_rank, FUNC);
}
break;
}
}
/* close the fapl before we set it up again */
- if ( H5Pclose(fapl) < 0 ) {
+ if (H5Pclose(fapl) < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Pclose() failed.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5Pclose() failed.\n", world_mpi_rank, FUNC);
}
}
/* now create the fapl again, excluding the server process. */
- if ( world_mpi_rank != world_server_mpi_rank ) {
+ if (world_mpi_rank != world_server_mpi_rank) {
/* setup file access property list */
- if ( FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS)) ) {
- nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Pcreate() failed 2.\n",
- world_mpi_rank, FUNC);
+ if (FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS))) {
+ nerrors++;
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5Pcreate() failed 2.\n", world_mpi_rank, FUNC);
}
}
- if ( H5Pset_fapl_mpio(fapl, file_mpi_comm, MPI_INFO_NULL) < 0 ) {
+ if (H5Pset_fapl_mpio(fapl, file_mpi_comm, MPI_INFO_NULL) < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 2.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 2.\n", world_mpi_rank, FUNC);
}
}
}
@@ -7581,15 +6987,15 @@ main(int argc, char **argv)
max_nerrors = get_max_nerrors();
- if ( max_nerrors != 0 ) {
+ if (max_nerrors != 0) {
/* errors in setup -- no point in continuing */
- if ( world_mpi_rank == 0 ) {
+ if (world_mpi_rank == 0) {
HDfprintf(stdout, "Errors in test initialization. Exiting.\n");
}
- goto finish;
+ goto finish;
}
/* run the tests */
@@ -7617,13 +7023,12 @@ main(int argc, char **argv)
smoke_check_5(H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
#endif
/* enable the collective metadata read property */
- if ( world_mpi_rank != world_server_mpi_rank ) {
- if ( H5Pset_all_coll_metadata_ops(fapl, TRUE) < 0 ) {
+ if (world_mpi_rank != world_server_mpi_rank) {
+ if (H5Pset_all_coll_metadata_ops(fapl, TRUE) < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Pset_all_coll_metadata_ops() failed 1.\n",
- world_mpi_rank, FUNC);
+ if (verbose) {
+ HDfprintf(stdout, "%d:%s: H5Pset_all_coll_metadata_ops() failed 1.\n", world_mpi_rank, FUNC);
}
}
}
@@ -7642,16 +7047,15 @@ finish:
* and exit.
*/
MPI_Barrier(MPI_COMM_WORLD);
- if (MAINPROCESS){ /* only process 0 reports */
- HDprintf("===================================\n");
- if (failures){
- HDprintf("***metadata cache tests detected %d failures***\n",
- failures);
- }
- else{
- HDprintf("metadata cache tests finished with no failures\n");
- }
- HDprintf("===================================\n");
+ if (MAINPROCESS) { /* only process 0 reports */
+ HDprintf("===================================\n");
+ if (failures) {
+ HDprintf("***metadata cache tests detected %d failures***\n", failures);
+ }
+ else {
+ HDprintf("metadata cache tests finished with no failures\n");
+ }
+ HDprintf("===================================\n");
}
takedown_derived_types();
@@ -7663,6 +7067,5 @@ finish:
MPI_Finalize();
/* cannot just return (failures) because exit code is limited to 1byte */
- return(failures != 0);
+ return (failures != 0);
}
-
diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c
index 6877d63..776da2e 100644
--- a/testpar/t_cache_image.c
+++ b/testpar/t_cache_image.c
@@ -27,18 +27,12 @@
#define DSET_SIZE (40 * CHUNK_SIZE)
#define MAX_NUM_DSETS 256
#define PAR_NUM_DSETS 32
-#define PAGE_SIZE (4 * 1024)
+#define PAGE_SIZE (4 * 1024)
#define PB_SIZE (64 * PAGE_SIZE)
/* global variable declarations: */
-
-const char *FILENAMES[] = {
- "t_cache_image_00",
- "t_cache_image_01",
- "t_cache_image_02",
- NULL
-};
+const char *FILENAMES[] = {"t_cache_image_00", "t_cache_image_01", "t_cache_image_02", NULL};
/* local utility function declarations */
@@ -47,49 +41,34 @@ static void create_data_sets(hid_t file_id, int min_dset, int max_dset);
static void delete_data_sets(hid_t file_id, int min_dset, int max_dset);
#endif
-static void open_hdf5_file(const hbool_t create_file,
- const hbool_t mdci_sbem_expected,
- const hbool_t read_only,
- const hbool_t set_mdci_fapl,
- const hbool_t config_fsm,
- const hbool_t enable_page_buffer,
- const char * hdf_file_name,
- const unsigned cache_image_flags,
- hid_t * file_id_ptr,
- H5F_t ** file_ptr_ptr,
- H5C_t ** cache_ptr_ptr,
- MPI_Comm comm,
- MPI_Info info,
- int l_facc_type,
- const hbool_t all_coll_metadata_ops,
- const hbool_t coll_metadata_write,
- const int md_write_strat);
+static void open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected,
+ const hbool_t read_only, const hbool_t set_mdci_fapl, const hbool_t config_fsm,
+ const hbool_t enable_page_buffer, const char *hdf_file_name,
+ const unsigned cache_image_flags, hid_t *file_id_ptr, H5F_t **file_ptr_ptr,
+ H5C_t **cache_ptr_ptr, MPI_Comm comm, MPI_Info info, int l_facc_type,
+ const hbool_t all_coll_metadata_ops, const hbool_t coll_metadata_write,
+ const int md_write_strat);
static void verify_data_sets(hid_t file_id, int min_dset, int max_dset);
/* local test function declarations */
-static hbool_t parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
- hbool_t * ici_ptr, int * file_idx_ptr, int * mpi_size_ptr, hbool_t display);
-static void usage(void);
+static hbool_t parse_flags(int argc, char *argv[], hbool_t *setup_ptr, hbool_t *ici_ptr, int *file_idx_ptr,
+ int *mpi_size_ptr, hbool_t display);
+static void usage(void);
static unsigned construct_test_file(int test_file_index);
-static void par_create_dataset(int dset_num, hid_t file_id, int mpi_rank,
- int mpi_size);
-static void par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank);
-static void par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank);
+static void par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size);
+static void par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank);
+static void par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank);
static hbool_t serial_insert_cache_image(int file_name_idx, int mpi_size);
-static void serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size);
+static void serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size);
/* top level test function declarations */
-static unsigned verify_cache_image_RO(int file_name_id,
- int md_write_strat, int mpi_rank);
-static unsigned verify_cache_image_RW(int file_name_id,
- int md_write_strat, int mpi_rank);
-
-static hbool_t smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info,
- int mpi_rank, int mpi_size);
+static unsigned verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank);
+static unsigned verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank);
+static hbool_t smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size);
/****************************************************************************/
/***************************** Utility Functions ****************************/
@@ -160,42 +139,38 @@ static hbool_t smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info,
static unsigned
construct_test_file(int test_file_index)
{
- const char * fcn_name = "construct_test_file()";
- char filename[512];
- hbool_t show_progress = FALSE;
- hid_t file_id = -1;
- H5F_t *file_ptr = NULL;
- H5C_t *cache_ptr = NULL;
- int cp = 0;
- int min_dset = 0;
- int max_dset = 0;
- MPI_Comm dummy_comm = MPI_COMM_WORLD;
- MPI_Info dummy_info = MPI_INFO_NULL;
+ const char *fcn_name = "construct_test_file()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
+ int cp = 0;
+ int min_dset = 0;
+ int max_dset = 0;
+ MPI_Comm dummy_comm = MPI_COMM_WORLD;
+ MPI_Info dummy_info = MPI_INFO_NULL;
pass = TRUE;
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* setup the file name */
- if ( pass ) {
+ if (pass) {
HDassert(FILENAMES[test_file_index]);
- if ( h5_fixname(FILENAMES[test_file_index], H5P_DEFAULT,
- filename, sizeof(filename))
- == NULL ) {
+ if (h5_fixname(FILENAMES[test_file_index], H5P_DEFAULT, filename, sizeof(filename)) == NULL) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "h5_fixname() failed.\n";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 1) Create a HDF5 file with the cache image FAPL entry.
*
* Verify that the cache is informed of the cache image FAPL entry.
@@ -203,13 +178,13 @@ construct_test_file(int test_file_index)
* Set flags forcing full function of the cache image feature.
*/
- if ( pass ) {
+ if (pass) {
open_hdf5_file(/* create_file */ TRUE,
/* mdci_sbem_expected */ FALSE,
/* read_only */ FALSE,
/* set_mdci_fapl */ TRUE,
- /* config_fsm */ TRUE,
+ /* config_fsm */ TRUE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -224,50 +199,45 @@ construct_test_file(int test_file_index)
/* md_write_strat */ 0);
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 2) Create a data set in the file. */
- if ( pass ) {
+ if (pass) {
create_data_sets(file_id, min_dset++, max_dset++);
}
#if H5C_COLLECT_CACHE_STATS
- if ( pass ) {
+ if (pass) {
- if ( cache_ptr->images_loaded != 0 ) {
+ if (cache_ptr->images_loaded != 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "metadata cache image block loaded(1).";
}
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 3) Close the file. */
- if ( pass ) {
+ if (pass) {
- if ( H5Fclose(file_id) < 0 ) {
+ if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Fclose() failed.\n";
-
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
- while ( ( pass ) && ( max_dset < MAX_NUM_DSETS ) )
- {
+ while ((pass) && (max_dset < MAX_NUM_DSETS)) {
/* 4) Open the file.
*
@@ -275,13 +245,13 @@ construct_test_file(int test_file_index)
* metadata cache image.
*/
- if ( pass ) {
+ if (pass) {
open_hdf5_file(/* create_file */ FALSE,
/* mdci_sbem_expected */ TRUE,
/* read_only */ FALSE,
/* set_mdci_fapl */ TRUE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -296,66 +266,59 @@ construct_test_file(int test_file_index)
/* md_write_strat */ 0);
}
- if ( show_progress )
- HDfprintf(stdout, "%s:L1 cp = %d, max_dset = %d, pass = %d.\n",
- fcn_name, cp, max_dset, pass);
-
+ if (show_progress)
+ HDfprintf(stdout, "%s:L1 cp = %d, max_dset = %d, pass = %d.\n", fcn_name, cp, max_dset, pass);
/* 5) Create a data set in the file. */
- if ( pass ) {
+ if (pass) {
create_data_sets(file_id, min_dset++, max_dset++);
}
#if H5C_COLLECT_CACHE_STATS
- if ( pass ) {
+ if (pass) {
- if ( cache_ptr->images_loaded == 0 ) {
+ if (cache_ptr->images_loaded == 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "metadata cache image block not loaded(1).";
}
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( show_progress )
- HDfprintf(stdout, "%s:L2 cp = %d, max_dset = %d, pass = %d.\n",
- fcn_name, cp + 1, max_dset, pass);
-
+ if (show_progress)
+ HDfprintf(stdout, "%s:L2 cp = %d, max_dset = %d, pass = %d.\n", fcn_name, cp + 1, max_dset, pass);
/* 6) Close the file. */
- if ( pass ) {
+ if (pass) {
- if ( H5Fclose(file_id) < 0 ) {
+ if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Fclose() failed.\n";
-
}
}
- if ( show_progress )
- HDfprintf(stdout, "%s:L3 cp = %d, max_dset = %d, pass = %d.\n",
- fcn_name, cp + 2, max_dset, pass);
+ if (show_progress)
+ HDfprintf(stdout, "%s:L3 cp = %d, max_dset = %d, pass = %d.\n", fcn_name, cp + 2, max_dset, pass);
} /* end while */
cp += 3;
-
/* 7) Open the file R/O.
*
* Verify that the file contains a metadata cache image
* superblock extension message.
*/
- if ( pass ) {
+ if (pass) {
open_hdf5_file(/* create_file */ FALSE,
/* mdci_sbem_expected */ TRUE,
/* read_only */ TRUE,
/* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -370,41 +333,40 @@ construct_test_file(int test_file_index)
/* md_write_strat */ 0);
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 8) Open and close all data sets.
*
* Verify that the cache image has been loaded.
*/
- if ( pass ) {
+ if (pass) {
- verify_data_sets(file_id, 0, max_dset - 1);
+ verify_data_sets(file_id, 0, max_dset - 1);
}
#if H5C_COLLECT_CACHE_STATS
- if ( pass ) {
+ if (pass) {
- if ( cache_ptr->images_loaded == 0 ) {
+ if (cache_ptr->images_loaded == 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "metadata cache image block not loaded(2).";
}
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 9) Close the file. */
- if ( pass ) {
+ if (pass) {
- if ( H5Fclose(file_id) < 0 ) {
+ if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Fclose() failed.\n";
}
}
@@ -413,7 +375,6 @@ construct_test_file(int test_file_index)
} /* construct_test_file() */
-
/*-------------------------------------------------------------------------
* Function: create_data_sets()
*
@@ -446,26 +407,27 @@ construct_test_file(int test_file_index)
static void
create_data_sets(hid_t file_id, int min_dset, int max_dset)
{
- const char * fcn_name = "create_data_sets()";
- char dset_name[64];
- hbool_t show_progress = FALSE;
- hbool_t valid_chunk;
- hbool_t verbose = FALSE;
- int cp = 0;
- int i, j, k, l, m;
- int data_chunk[CHUNK_SIZE][CHUNK_SIZE];
- herr_t status;
- hid_t dataspace_id = -1;
- hid_t filespace_ids[MAX_NUM_DSETS];
- hid_t memspace_id = -1;
- hid_t dataset_ids[MAX_NUM_DSETS];
- hid_t properties = -1;
- hsize_t dims[2];
- hsize_t a_size[2];
- hsize_t offset[2];
- hsize_t chunk_size[2];
-
- if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+ const char *fcn_name = "create_data_sets()";
+ char dset_name[64];
+ hbool_t show_progress = FALSE;
+ hbool_t valid_chunk;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ int i, j, k, l, m;
+ int data_chunk[CHUNK_SIZE][CHUNK_SIZE];
+ herr_t status;
+ hid_t dataspace_id = -1;
+ hid_t filespace_ids[MAX_NUM_DSETS];
+ hid_t memspace_id = -1;
+ hid_t dataset_ids[MAX_NUM_DSETS];
+ hid_t properties = -1;
+ hsize_t dims[2];
+ hsize_t a_size[2];
+ hsize_t offset[2];
+ hsize_t chunk_size[2];
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
HDassert(0 <= min_dset);
HDassert(min_dset <= max_dset);
@@ -473,20 +435,19 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
/* create the datasets */
- if ( pass ) {
+ if (pass) {
i = min_dset;
- while ( ( pass ) && ( i <= max_dset ) )
- {
+ while ((pass) && (i <= max_dset)) {
/* create a dataspace for the chunked dataset */
- dims[0] = DSET_SIZE;
- dims[1] = DSET_SIZE;
+ dims[0] = DSET_SIZE;
+ dims[1] = DSET_SIZE;
dataspace_id = H5Screate_simple(2, dims, NULL);
- if ( dataspace_id < 0 ) {
+ if (dataspace_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Screate_simple() failed.";
}
@@ -494,51 +455,50 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
* to be partioned into 10X10 element chunks.
*/
- if ( pass ) {
+ if (pass) {
chunk_size[0] = CHUNK_SIZE;
chunk_size[1] = CHUNK_SIZE;
- properties = H5Pcreate(H5P_DATASET_CREATE);
+ properties = H5Pcreate(H5P_DATASET_CREATE);
- if ( properties < 0 ) {
+ if (properties < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pcreate() failed.";
}
}
- if ( pass ) {
+ if (pass) {
- if ( H5Pset_chunk(properties, 2, chunk_size) < 0 ) {
+ if (H5Pset_chunk(properties, 2, chunk_size) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pset_chunk() failed.";
}
}
/* create the dataset */
- if ( pass ) {
+ if (pass) {
HDsprintf(dset_name, "/dset%03d", i);
- dataset_ids[i] = H5Dcreate2(file_id, dset_name, H5T_STD_I32BE,
- dataspace_id, H5P_DEFAULT,
+ dataset_ids[i] = H5Dcreate2(file_id, dset_name, H5T_STD_I32BE, dataspace_id, H5P_DEFAULT,
properties, H5P_DEFAULT);
- if ( dataset_ids[i] < 0 ) {
+ if (dataset_ids[i] < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Dcreate() failed.";
}
}
/* get the file space ID */
- if ( pass ) {
+ if (pass) {
filespace_ids[i] = H5Dget_space(dataset_ids[i]);
- if ( filespace_ids[i] < 0 ) {
+ if (filespace_ids[i] < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Dget_space() failed.";
}
}
@@ -547,84 +507,79 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
}
}
- if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
/* create the mem space to be used to read and write chunks */
- if ( pass ) {
+ if (pass) {
- dims[0] = CHUNK_SIZE;
- dims[1] = CHUNK_SIZE;
+ dims[0] = CHUNK_SIZE;
+ dims[1] = CHUNK_SIZE;
memspace_id = H5Screate_simple(2, dims, NULL);
- if ( memspace_id < 0 ) {
+ if (memspace_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Screate_simple() failed.";
}
}
- if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
/* select in memory hyperslab */
- if ( pass ) {
+ if (pass) {
- offset[0] = 0; /*offset of hyperslab in memory*/
+ offset[0] = 0; /*offset of hyperslab in memory*/
offset[1] = 0;
- a_size[0] = CHUNK_SIZE; /*size of hyperslab*/
+ a_size[0] = CHUNK_SIZE; /*size of hyperslab*/
a_size[1] = CHUNK_SIZE;
- status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL,
- a_size, NULL);
+ status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Sselect_hyperslab() failed.";
}
}
- if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
/* initialize all datasets on a round robin basis */
i = 0;
- while ( ( pass ) && ( i < DSET_SIZE ) )
- {
+ while ((pass) && (i < DSET_SIZE)) {
j = 0;
- while ( ( pass ) && ( j < DSET_SIZE ) )
- {
+ while ((pass) && (j < DSET_SIZE)) {
m = min_dset;
- while ( ( pass ) && ( m <= max_dset ) )
- {
+ while ((pass) && (m <= max_dset)) {
/* initialize the slab */
- for ( k = 0; k < CHUNK_SIZE; k++ )
- {
- for ( l = 0; l < CHUNK_SIZE; l++ )
- {
- data_chunk[k][l] = (DSET_SIZE * DSET_SIZE * m) +
- (DSET_SIZE * (i + k)) + j + l;
+ for (k = 0; k < CHUNK_SIZE; k++) {
+ for (l = 0; l < CHUNK_SIZE; l++) {
+ data_chunk[k][l] = (DSET_SIZE * DSET_SIZE * m) + (DSET_SIZE * (i + k)) + j + l;
}
}
/* select on disk hyperslab */
offset[0] = (hsize_t)i; /*offset of hyperslab in file*/
offset[1] = (hsize_t)j;
- a_size[0] = CHUNK_SIZE; /*size of hyperslab*/
+ a_size[0] = CHUNK_SIZE; /*size of hyperslab*/
a_size[1] = CHUNK_SIZE;
- status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET,
- offset, NULL, a_size, NULL);
+ status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET, offset, NULL, a_size, NULL);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "disk H5Sselect_hyperslab() failed.";
}
/* write the chunk to file */
- status = H5Dwrite(dataset_ids[m], H5T_NATIVE_INT, memspace_id,
- filespace_ids[m], H5P_DEFAULT, data_chunk);
+ status = H5Dwrite(dataset_ids[m], H5T_NATIVE_INT, memspace_id, filespace_ids[m], H5P_DEFAULT,
+ data_chunk);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Dwrite() failed.";
}
m++;
@@ -635,88 +590,75 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
i += CHUNK_SIZE;
}
- if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
/* read data from data sets and validate it */
i = 0;
- while ( ( pass ) && ( i < DSET_SIZE ) )
- {
+ while ((pass) && (i < DSET_SIZE)) {
j = 0;
- while ( ( pass ) && ( j < DSET_SIZE ) )
- {
+ while ((pass) && (j < DSET_SIZE)) {
m = min_dset;
- while ( ( pass ) && ( m <= max_dset ) )
- {
+ while ((pass) && (m <= max_dset)) {
/* select on disk hyperslab */
offset[0] = (hsize_t)i; /* offset of hyperslab in file */
offset[1] = (hsize_t)j;
a_size[0] = CHUNK_SIZE; /* size of hyperslab */
a_size[1] = CHUNK_SIZE;
- status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET,
- offset, NULL, a_size, NULL);
+ status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET, offset, NULL, a_size, NULL);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
- failure_mssg = "disk hyperslab create failed.";
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
}
/* read the chunk from file */
- if ( pass ) {
+ if (pass) {
- status = H5Dread(dataset_ids[m], H5T_NATIVE_INT,
- memspace_id, filespace_ids[m],
+ status = H5Dread(dataset_ids[m], H5T_NATIVE_INT, memspace_id, filespace_ids[m],
H5P_DEFAULT, data_chunk);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
- failure_mssg = "disk hyperslab create failed.";
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
}
}
/* validate the slab */
- if ( pass ) {
+ if (pass) {
valid_chunk = TRUE;
- for ( k = 0; k < CHUNK_SIZE; k++ )
- {
- for ( l = 0; l < CHUNK_SIZE; l++ )
- {
- if ( data_chunk[k][l]
- !=
- ((DSET_SIZE * DSET_SIZE * m) +
- (DSET_SIZE * (i + k)) + j + l) ) {
+ for (k = 0; k < CHUNK_SIZE; k++) {
+ for (l = 0; l < CHUNK_SIZE; l++) {
+ if (data_chunk[k][l] !=
+ ((DSET_SIZE * DSET_SIZE * m) + (DSET_SIZE * (i + k)) + j + l)) {
valid_chunk = FALSE;
- if ( verbose ) {
+ if (verbose) {
- HDfprintf(stdout,
- "data_chunk[%0d][%0d] = %0d, expect %0d.\n",
- k, l, data_chunk[k][l],
- ((DSET_SIZE * DSET_SIZE * m) +
- (DSET_SIZE * (i + k)) + j + l));
- HDfprintf(stdout,
- "m = %d, i = %d, j = %d, k = %d, l = %d\n",
- m, i, j, k, l);
- }
+ HDfprintf(stdout, "data_chunk[%0d][%0d] = %0d, expect %0d.\n", k, l,
+ data_chunk[k][l],
+ ((DSET_SIZE * DSET_SIZE * m) + (DSET_SIZE * (i + k)) + j + l));
+ HDfprintf(stdout, "m = %d, i = %d, j = %d, k = %d, l = %d\n", m, i, j, k,
+ l);
+ }
}
}
}
- if ( ! valid_chunk ) {
+ if (!valid_chunk) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "slab validation failed.";
- if ( verbose ) {
+ if (verbose) {
- HDfprintf(stdout,
- "Chunk (%0d, %0d) in /dset%03d is invalid.\n",
- i, j, m);
- }
+ HDfprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n", i, j, m);
+ }
}
}
m++;
@@ -726,39 +668,37 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
i += CHUNK_SIZE;
}
- if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
/* close the file spaces */
i = min_dset;
- while ( ( pass ) && ( i <= max_dset ) )
- {
- if ( H5Sclose(filespace_ids[i]) < 0 ) {
+ while ((pass) && (i <= max_dset)) {
+ if (H5Sclose(filespace_ids[i]) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Sclose() failed.";
}
i++;
}
-
/* close the datasets */
i = min_dset;
- while ( ( pass ) && ( i <= max_dset ) )
- {
- if ( H5Dclose(dataset_ids[i]) < 0 ) {
+ while ((pass) && (i <= max_dset)) {
+ if (H5Dclose(dataset_ids[i]) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Dclose() failed.";
}
i++;
}
/* close the mem space */
- if ( pass ) {
+ if (pass) {
- if ( H5Sclose(memspace_id) < 0 ) {
+ if (H5Sclose(memspace_id) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Sclose(memspace_id) failed.";
}
}
@@ -767,7 +707,6 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
} /* create_data_sets() */
-
/*-------------------------------------------------------------------------
* Function: delete_data_sets()
*
@@ -849,7 +788,6 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
} /* delete_data_sets() */
#endif
-
/*-------------------------------------------------------------------------
* Function: open_hdf5_file()
*
@@ -901,277 +839,248 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
*/
static void
-open_hdf5_file(const hbool_t create_file,
- const hbool_t mdci_sbem_expected,
- const hbool_t read_only,
- const hbool_t set_mdci_fapl,
- const hbool_t config_fsm,
- const hbool_t enable_page_buffer,
- const char * hdf_file_name,
- const unsigned cache_image_flags,
- hid_t * file_id_ptr,
- H5F_t ** file_ptr_ptr,
- H5C_t ** cache_ptr_ptr,
- MPI_Comm comm,
- MPI_Info info,
- int l_facc_type,
- const hbool_t all_coll_metadata_ops,
- const hbool_t coll_metadata_write,
+open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, const hbool_t read_only,
+ const hbool_t set_mdci_fapl, const hbool_t config_fsm, const hbool_t enable_page_buffer,
+ const char *hdf_file_name, const unsigned cache_image_flags, hid_t *file_id_ptr,
+ H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr, MPI_Comm comm, MPI_Info info, int l_facc_type,
+ const hbool_t all_coll_metadata_ops, const hbool_t coll_metadata_write,
const int md_write_strat)
{
- const char * fcn_name = "open_hdf5_file()";
- hbool_t show_progress = FALSE;
- hbool_t verbose = FALSE;
- int cp = 0;
- hid_t fapl_id = -1;
- hid_t fcpl_id = -1;
- hid_t file_id = -1;
- herr_t result;
- H5F_t * file_ptr = NULL;
- H5C_t * cache_ptr = NULL;
- H5C_cache_image_ctl_t image_ctl;
- H5AC_cache_image_config_t cache_image_config = {
- H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION,
- TRUE,
- FALSE,
- H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE};
+ const char * fcn_name = "open_hdf5_file()";
+ hbool_t show_progress = FALSE;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ hid_t fapl_id = -1;
+ hid_t fcpl_id = -1;
+ hid_t file_id = -1;
+ herr_t result;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
+ H5C_cache_image_ctl_t image_ctl;
+ H5AC_cache_image_config_t cache_image_config = {H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION, TRUE, FALSE,
+ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE};
HDassert(!create_file || config_fsm);
- if ( pass )
- {
- /* opening the file both read only and with a cache image
+ if (pass) {
+ /* opening the file both read only and with a cache image
* requested is a contradiction. We resolve it by ignoring
* the cache image request silently.
*/
- if ( ( create_file && mdci_sbem_expected ) ||
- ( create_file && read_only ) ||
- ( config_fsm && !create_file ) ||
- ( create_file && enable_page_buffer && ! config_fsm ) ||
- ( hdf_file_name == NULL ) ||
- ( ( set_mdci_fapl ) && ( cache_image_flags == 0 ) ) ||
- ( ( set_mdci_fapl ) &&
- ( (cache_image_flags & ~H5C_CI__ALL_FLAGS) != 0 ) ) ||
- ( file_id_ptr == NULL ) ||
- ( file_ptr_ptr == NULL ) ||
- ( cache_ptr_ptr == NULL ) ||
- ( l_facc_type != (l_facc_type & (FACC_MPIO)) ) ) {
-
- failure_mssg =
- "Bad param(s) on entry to open_hdf5_file().\n";
+ if ((create_file && mdci_sbem_expected) || (create_file && read_only) ||
+ (config_fsm && !create_file) || (create_file && enable_page_buffer && !config_fsm) ||
+ (hdf_file_name == NULL) || ((set_mdci_fapl) && (cache_image_flags == 0)) ||
+ ((set_mdci_fapl) && ((cache_image_flags & ~H5C_CI__ALL_FLAGS) != 0)) || (file_id_ptr == NULL) ||
+ (file_ptr_ptr == NULL) || (cache_ptr_ptr == NULL) ||
+ (l_facc_type != (l_facc_type & (FACC_MPIO)))) {
+
+ failure_mssg = "Bad param(s) on entry to open_hdf5_file().\n";
pass = FALSE;
- } else if ( verbose ) {
+ }
+ else if (verbose) {
- HDfprintf(stdout, "%s: HDF file name = \"%s\".\n",
- fcn_name, hdf_file_name);
+ HDfprintf(stdout, "%s: HDF file name = \"%s\".\n", fcn_name, hdf_file_name);
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* create a file access propertly list. */
- if ( pass ) {
+ if (pass) {
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
- if ( fapl_id < 0 ) {
+ if (fapl_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pcreate() failed.\n";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* call H5Pset_libver_bounds() on the fapl_id */
- if ( pass ) {
+ if (pass) {
- if ( H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST)
- < 0 ) {
+ if (H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pset_libver_bounds() failed.\n";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* get metadata cache image config -- verify that it is the default */
- if ( pass ) {
+ if (pass) {
result = H5Pget_mdc_image_config(fapl_id, &cache_image_config);
- if ( result < 0 ) {
+ if (result < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pget_mdc_image_config() failed.\n";
}
- if ( ( cache_image_config.version !=
- H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
- ( cache_image_config.generate_image != FALSE ) ||
- ( cache_image_config.save_resize_status != FALSE ) ||
- ( cache_image_config.entry_ageout !=
- H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ) {
+ if ((cache_image_config.version != H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION) ||
+ (cache_image_config.generate_image != FALSE) ||
+ (cache_image_config.save_resize_status != FALSE) ||
+ (cache_image_config.entry_ageout != H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "Unexpected default cache image config.\n";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* set metadata cache image fapl entry if indicated */
- if ( ( pass ) && ( set_mdci_fapl ) ) {
+ if ((pass) && (set_mdci_fapl)) {
/* set cache image config fields to taste */
- cache_image_config.generate_image = TRUE;
+ cache_image_config.generate_image = TRUE;
cache_image_config.save_resize_status = FALSE;
- cache_image_config.entry_ageout = H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE;
+ cache_image_config.entry_ageout = H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE;
result = H5Pset_mdc_image_config(fapl_id, &cache_image_config);
- if ( result < 0 ) {
+ if (result < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pset_mdc_image_config() failed.\n";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* setup the persistant free space manager if indicated */
- if ( ( pass ) && ( config_fsm ) ) {
+ if ((pass) && (config_fsm)) {
- fcpl_id = H5Pcreate(H5P_FILE_CREATE);
+ fcpl_id = H5Pcreate(H5P_FILE_CREATE);
- if ( fcpl_id <= 0 ) {
+ if (fcpl_id <= 0) {
- pass = FALSE;
- failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed.";
- }
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed.";
+ }
}
- if ( ( pass ) && ( config_fsm ) ) {
+ if ((pass) && (config_fsm)) {
- if ( H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE,
- TRUE, (hsize_t)1) == FAIL ) {
- pass = FALSE;
+ if (H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1) == FAIL) {
+ pass = FALSE;
failure_mssg = "H5Pset_file_space_strategy() failed.\n";
}
}
- if ( ( pass ) && ( config_fsm ) ) {
+ if ((pass) && (config_fsm)) {
- if ( H5Pset_file_space_page_size(fcpl_id, PAGE_SIZE) == FAIL ) {
+ if (H5Pset_file_space_page_size(fcpl_id, PAGE_SIZE) == FAIL) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pset_file_space_page_size() failed.\n";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* setup the page buffer if indicated */
- if ( ( pass ) && ( enable_page_buffer ) ) {
+ if ((pass) && (enable_page_buffer)) {
- if ( H5Pset_page_buffer_size(fapl_id, PB_SIZE, 0, 0) < 0 ) {
+ if (H5Pset_page_buffer_size(fapl_id, PB_SIZE, 0, 0) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pset_page_buffer_size() failed.\n";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
- if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) {
+ if ((pass) && (l_facc_type == FACC_MPIO)) {
/* set Parallel access with communicator */
- if ( H5Pset_fapl_mpio(fapl_id, comm, info) < 0 ) {
+ if (H5Pset_fapl_mpio(fapl_id, comm, info) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pset_fapl_mpio() failed.\n";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) {
+ if ((pass) && (l_facc_type == FACC_MPIO)) {
if (H5Pset_all_coll_metadata_ops(fapl_id, all_coll_metadata_ops) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pset_all_coll_metadata_ops() failed.\n";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) {
+ if ((pass) && (l_facc_type == FACC_MPIO)) {
- if ( H5Pset_coll_metadata_write(fapl_id, coll_metadata_write) < 0 ) {
+ if (H5Pset_coll_metadata_write(fapl_id, coll_metadata_write) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pset_coll_metadata_write() failed.\n";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) {
+ if ((pass) && (l_facc_type == FACC_MPIO)) {
/* set the desired parallel metadata write strategy */
H5AC_cache_config_t mdc_config;
mdc_config.version = H5C__CURR_AUTO_SIZE_CTL_VER;
- if ( H5Pget_mdc_config(fapl_id, &mdc_config) < 0 ) {
+ if (H5Pget_mdc_config(fapl_id, &mdc_config) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pget_mdc_config() failed.\n";
}
mdc_config.metadata_write_strategy = md_write_strat;
- if ( H5Pset_mdc_config(fapl_id, &mdc_config) < 0 ) {
+ if (H5Pset_mdc_config(fapl_id, &mdc_config) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pset_mdc_config() failed.\n";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* open the file */
- if ( pass ) {
+ if (pass) {
- if ( create_file ) {
+ if (create_file) {
- if ( fcpl_id != -1 )
+ if (fcpl_id != -1)
- file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC,
- fcpl_id, fapl_id);
- else
-
- file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC,
- H5P_DEFAULT, fapl_id);
+ file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC, fcpl_id, fapl_id);
+ else
- } else {
+ file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ }
+ else {
- if ( read_only )
+ if (read_only)
file_id = H5Fopen(hdf_file_name, H5F_ACC_RDONLY, fapl_id);
@@ -1180,74 +1089,71 @@ open_hdf5_file(const hbool_t create_file,
file_id = H5Fopen(hdf_file_name, H5F_ACC_RDWR, fapl_id);
}
- if ( file_id < 0 ) {
+ if (file_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Fcreate() or H5Fopen() failed.\n";
-
- } else {
+ }
+ else {
file_ptr = (struct H5F_t *)H5VL_object_verify(file_id, H5I_FILE);
- if ( file_ptr == NULL ) {
+ if (file_ptr == NULL) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "Can't get file_ptr.";
- if ( verbose ) {
+ if (verbose) {
HDfprintf(stdout, "%s: Can't get file_ptr.\n", fcn_name);
}
}
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* get a pointer to the files internal data structure and then
* to the cache structure
*/
- if ( pass ) {
+ if (pass) {
- if ( file_ptr->shared->cache == NULL ) {
+ if (file_ptr->shared->cache == NULL) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "can't get cache pointer(1).\n";
-
- } else {
+ }
+ else {
cache_ptr = file_ptr->shared->cache;
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* verify expected page buffer status. At present, page buffering
* must be disabled in parallel -- hopefully this will change in the
* future.
*/
- if ( pass ) {
+ if (pass) {
- if ( ( file_ptr->shared->page_buf ) &&
- ( ( ! enable_page_buffer ) || ( l_facc_type == FACC_MPIO ) ) ) {
+ if ((file_ptr->shared->page_buf) && ((!enable_page_buffer) || (l_facc_type == FACC_MPIO))) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "page buffer unexepectedly enabled.";
+ }
+ else if ((file_ptr->shared->page_buf != NULL) &&
+ ((enable_page_buffer) || (l_facc_type != FACC_MPIO))) {
- } else if ( ( file_ptr->shared->page_buf != NULL ) &&
- ( ( enable_page_buffer ) || ( l_facc_type != FACC_MPIO ) ) ) {
-
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "page buffer unexepectedly disabled.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* verify expected metadata cache status */
/* get the cache image control structure from the cache, and verify
@@ -1255,145 +1161,133 @@ open_hdf5_file(const hbool_t create_file,
*
* Then set the flags in this structure to the specified value.
*/
- if ( pass ) {
+ if (pass) {
- if ( H5C_get_cache_image_config(cache_ptr, &image_ctl) < 0 ) {
+ if (H5C_get_cache_image_config(cache_ptr, &image_ctl) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "error returned by H5C_get_cache_image_config().";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+ if (pass) {
- if ( pass ) {
-
- if ( set_mdci_fapl ) {
+ if (set_mdci_fapl) {
- if ( read_only ) {
+ if (read_only) {
- if ( ( image_ctl.version !=
- H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
- ( image_ctl.generate_image != FALSE ) ||
- ( image_ctl.save_resize_status != FALSE ) ||
- ( image_ctl.entry_ageout !=
- H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ||
- ( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) {
+ if ((image_ctl.version != H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION) ||
+ (image_ctl.generate_image != FALSE) || (image_ctl.save_resize_status != FALSE) ||
+ (image_ctl.entry_ageout != H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE) ||
+ (image_ctl.flags != H5C_CI__ALL_FLAGS)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "Unexpected image_ctl values(1).\n";
}
- } else {
+ }
+ else {
- if ( ( image_ctl.version !=
- H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
- ( image_ctl.generate_image != TRUE ) ||
- ( image_ctl.save_resize_status != FALSE ) ||
- ( image_ctl.entry_ageout !=
- H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ||
- ( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) {
+ if ((image_ctl.version != H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION) ||
+ (image_ctl.generate_image != TRUE) || (image_ctl.save_resize_status != FALSE) ||
+ (image_ctl.entry_ageout != H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE) ||
+ (image_ctl.flags != H5C_CI__ALL_FLAGS)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "Unexpected image_ctl values(2).\n";
}
}
- } else {
+ }
+ else {
- if ( ( image_ctl.version !=
- H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
- ( image_ctl.generate_image != FALSE ) ||
- ( image_ctl.save_resize_status != FALSE ) ||
- ( image_ctl.entry_ageout !=
- H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ||
- ( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) {
+ if ((image_ctl.version != H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION) ||
+ (image_ctl.generate_image != FALSE) || (image_ctl.save_resize_status != FALSE) ||
+ (image_ctl.entry_ageout != H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE) ||
+ (image_ctl.flags != H5C_CI__ALL_FLAGS)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "Unexpected image_ctl values(3).\n";
}
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- if ( ( pass ) && ( set_mdci_fapl ) ) {
+ if ((pass) && (set_mdci_fapl)) {
image_ctl.flags = cache_image_flags;
- if ( H5C_set_cache_image_config(file_ptr, cache_ptr, &image_ctl) < 0 ) {
+ if (H5C_set_cache_image_config(file_ptr, cache_ptr, &image_ctl) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "error returned by H5C_set_cache_image_config().";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- if ( pass ) {
+ if (pass) {
- if ( cache_ptr->close_warning_received == TRUE ) {
+ if (cache_ptr->close_warning_received == TRUE) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "Unexpected value of close_warning_received.\n";
}
- if ( mdci_sbem_expected ) {
+ if (mdci_sbem_expected) {
- if ( read_only ) {
+ if (read_only) {
- if ( ( cache_ptr->load_image != TRUE ) ||
- ( cache_ptr->delete_image != FALSE ) ) {
+ if ((cache_ptr->load_image != TRUE) || (cache_ptr->delete_image != FALSE)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "mdci sb extension message not present?\n";
}
- } else {
+ }
+ else {
- if ( ( cache_ptr->load_image != TRUE ) ||
- ( cache_ptr->delete_image != TRUE ) ) {
+ if ((cache_ptr->load_image != TRUE) || (cache_ptr->delete_image != TRUE)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "mdci sb extension message not present?\n";
}
+ }
}
- } else {
+ else {
- if ( ( cache_ptr->load_image == TRUE ) ||
- ( cache_ptr->delete_image == TRUE ) ) {
+ if ((cache_ptr->load_image == TRUE) || (cache_ptr->delete_image == TRUE)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "mdci sb extension message present?\n";
- }
+ }
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- if ( pass ) {
+ if (pass) {
- *file_id_ptr = file_id;
- *file_ptr_ptr = file_ptr;
+ *file_id_ptr = file_id;
+ *file_ptr_ptr = file_ptr;
*cache_ptr_ptr = cache_ptr;
}
- if ( show_progress ) {
- HDfprintf(stdout, "%s: cp = %d, pass = %d -- exiting.\n",
- fcn_name, cp++, pass);
+ if (show_progress) {
+ HDfprintf(stdout, "%s: cp = %d, pass = %d -- exiting.\n", fcn_name, cp++, pass);
- if ( ! pass )
- HDfprintf(stdout, "%s: failure_mssg = %s\n",
- fcn_name, failure_mssg);
+ if (!pass)
+ HDfprintf(stdout, "%s: failure_mssg = %s\n", fcn_name, failure_mssg);
}
return;
} /* open_hdf5_file() */
-
/*-------------------------------------------------------------------------
* Function: par_create_dataset()
*
@@ -1418,252 +1312,236 @@ open_hdf5_file(const hbool_t create_file,
*/
static void
-par_create_dataset(int dset_num,
- hid_t file_id,
- int mpi_rank,
- int mpi_size)
+par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
{
- const char * fcn_name = "par_create_dataset()";
- char dset_name[256];
- hbool_t show_progress = FALSE;
- hbool_t valid_chunk;
- hbool_t verbose = FALSE;
- int cp = 0;
- int i, j, k, l;
- int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE];
- hsize_t dims[3];
- hsize_t a_size[3];
- hsize_t offset[3];
- hsize_t chunk_size[3];
- hid_t status;
- hid_t dataspace_id = -1;
- hid_t memspace_id = -1;
- hid_t dset_id = -1;
- hid_t filespace_id = -1;
- hid_t dcpl_id = -1;
- hid_t dxpl_id = -1;
+ const char *fcn_name = "par_create_dataset()";
+ char dset_name[256];
+ hbool_t show_progress = FALSE;
+ hbool_t valid_chunk;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ int i, j, k, l;
+ int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE];
+ hsize_t dims[3];
+ hsize_t a_size[3];
+ hsize_t offset[3];
+ hsize_t chunk_size[3];
+ hid_t status;
+ hid_t dataspace_id = -1;
+ hid_t memspace_id = -1;
+ hid_t dset_id = -1;
+ hid_t filespace_id = -1;
+ hid_t dcpl_id = -1;
+ hid_t dxpl_id = -1;
show_progress = (show_progress && (mpi_rank == 0));
verbose = (verbose && (mpi_rank == 0));
HDsprintf(dset_name, "/dset%03d", dset_num);
- if ( show_progress ) {
+ if (show_progress) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
}
- if ( pass ) {
+ if (pass) {
/* create a dataspace for the chunked dataset */
- dims[0] = (hsize_t)mpi_size;
- dims[1] = DSET_SIZE;
- dims[2] = DSET_SIZE;
+ dims[0] = (hsize_t)mpi_size;
+ dims[1] = DSET_SIZE;
+ dims[2] = DSET_SIZE;
dataspace_id = H5Screate_simple(3, dims, NULL);
- if ( dataspace_id < 0 ) {
+ if (dataspace_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Screate_simple() failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* set the dataset creation plist to specify that the raw data is
* to be partioned into 1X10X10 element chunks.
*/
- if ( pass ) {
+ if (pass) {
dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
- if ( dcpl_id < 0 ) {
+ if (dcpl_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pcreate(H5P_DATASET_CREATE) failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- if ( pass ) {
+ if (pass) {
chunk_size[0] = 1;
chunk_size[1] = CHUNK_SIZE;
chunk_size[2] = CHUNK_SIZE;
- if ( H5Pset_chunk(dcpl_id, 3, chunk_size) < 0 ) {
+ if (H5Pset_chunk(dcpl_id, 3, chunk_size) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pset_chunk() failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* create the dataset */
- if ( pass ) {
+ if (pass) {
- dset_id = H5Dcreate2(file_id, dset_name, H5T_STD_I32BE,
- dataspace_id, H5P_DEFAULT,
- dcpl_id, H5P_DEFAULT);
+ dset_id =
+ H5Dcreate2(file_id, dset_name, H5T_STD_I32BE, dataspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
- if ( dset_id < 0 ) {
+ if (dset_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Dcreate() failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* get the file space ID */
- if ( pass ) {
+ if (pass) {
filespace_id = H5Dget_space(dset_id);
- if ( filespace_id < 0 ) {
+ if (filespace_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Dget_space() failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* create the mem space to be used to read and write chunks */
- if ( pass ) {
+ if (pass) {
- dims[0] = 1;
- dims[1] = CHUNK_SIZE;
- dims[2] = CHUNK_SIZE;
+ dims[0] = 1;
+ dims[1] = CHUNK_SIZE;
+ dims[2] = CHUNK_SIZE;
memspace_id = H5Screate_simple(3, dims, NULL);
- if ( memspace_id < 0 ) {
+ if (memspace_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Screate_simple() failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* select in memory hyperslab */
- if ( pass ) {
+ if (pass) {
- offset[0] = 0; /* offset of hyperslab in memory */
+ offset[0] = 0; /* offset of hyperslab in memory */
offset[1] = 0;
offset[2] = 0;
- a_size[0] = 1; /* size of hyperslab */
+ a_size[0] = 1; /* size of hyperslab */
a_size[1] = CHUNK_SIZE;
a_size[2] = CHUNK_SIZE;
- status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL,
- a_size, NULL);
+ status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Sselect_hyperslab() failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* setup the DXPL for collective I/O */
- if ( pass ) {
+ if (pass) {
dxpl_id = H5Pcreate(H5P_DATASET_XFER);
- if ( dxpl_id < 0 ) {
+ if (dxpl_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- if ( pass ) {
+ if (pass) {
- if ( H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0 ) {
+ if (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pset_dxpl_mpio() failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* initialize the dataset with collective writes */
i = 0;
- while ( ( pass ) && ( i < DSET_SIZE ) )
- {
+ while ((pass) && (i < DSET_SIZE)) {
j = 0;
- while ( ( pass ) && ( j < DSET_SIZE ) )
- {
+ while ((pass) && (j < DSET_SIZE)) {
- if ( show_progress )
- HDfprintf(stdout, "%s: cp = %d.0, pass = %d.\n",
- fcn_name, cp, pass);
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d.0, pass = %d.\n", fcn_name, cp, pass);
/* initialize the slab */
- for ( k = 0; k < CHUNK_SIZE; k++ )
- {
- for ( l = 0; l < CHUNK_SIZE; l++ )
- {
- data_chunk[0][k][l] = (DSET_SIZE * DSET_SIZE * mpi_rank) +
- (DSET_SIZE * (i + k)) + j + l +
- dset_num;
+ for (k = 0; k < CHUNK_SIZE; k++) {
+ for (l = 0; l < CHUNK_SIZE; l++) {
+ data_chunk[0][k][l] =
+ (DSET_SIZE * DSET_SIZE * mpi_rank) + (DSET_SIZE * (i + k)) + j + l + dset_num;
}
}
- if ( show_progress )
- HDfprintf(stdout, "%s: cp = %d.1, pass = %d.\n",
- fcn_name, cp, pass);
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d.1, pass = %d.\n", fcn_name, cp, pass);
/* select on disk hyperslab */
offset[0] = (hsize_t)mpi_rank; /* offset of hyperslab in file */
offset[1] = (hsize_t)i;
offset[2] = (hsize_t)j;
- a_size[0] = (hsize_t)1; /* size of hyperslab */
+ a_size[0] = (hsize_t)1; /* size of hyperslab */
a_size[1] = CHUNK_SIZE;
a_size[2] = CHUNK_SIZE;
- status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET,
- offset, NULL, a_size, NULL);
+ status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "disk H5Sselect_hyperslab() failed.";
}
- if ( show_progress )
- HDfprintf(stdout, "%s: cp = %d.2, pass = %d.\n",
- fcn_name, cp, pass);
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d.2, pass = %d.\n", fcn_name, cp, pass);
/* write the chunk to file */
- status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace_id,
- filespace_id, dxpl_id, data_chunk);
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace_id, filespace_id, dxpl_id, data_chunk);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Dwrite() failed.";
}
- if ( show_progress )
- HDfprintf(stdout, "%s: cp = %d.3, pass = %d.\n",
- fcn_name, cp, pass);
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d.3, pass = %d.\n", fcn_name, cp, pass);
j += CHUNK_SIZE;
}
@@ -1672,16 +1550,14 @@ par_create_dataset(int dset_num,
}
cp++;
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* read data from data sets and validate it */
i = 0;
- while ( ( pass ) && ( i < DSET_SIZE ) )
- {
+ while ((pass) && (i < DSET_SIZE)) {
j = 0;
- while ( ( pass ) && ( j < DSET_SIZE ) )
- {
+ while ((pass) && (j < DSET_SIZE)) {
/* select on disk hyperslab */
offset[0] = (hsize_t)mpi_rank;
offset[1] = (hsize_t)i; /* offset of hyperslab in file */
@@ -1690,69 +1566,58 @@ par_create_dataset(int dset_num,
a_size[1] = CHUNK_SIZE; /* size of hyperslab */
a_size[2] = CHUNK_SIZE;
- status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET,
- offset, NULL, a_size, NULL);
+ status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
- failure_mssg = "disk hyperslab create failed.";
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
}
/* read the chunk from file */
- if ( pass ) {
+ if (pass) {
- status = H5Dread(dset_id, H5T_NATIVE_INT,
- memspace_id, filespace_id,
- dxpl_id, data_chunk);
+ status = H5Dread(dset_id, H5T_NATIVE_INT, memspace_id, filespace_id, dxpl_id, data_chunk);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
- failure_mssg = "chunk read failed.";
+ pass = FALSE;
+ failure_mssg = "chunk read failed.";
}
}
/* validate the slab */
- if ( pass ) {
+ if (pass) {
valid_chunk = TRUE;
- for ( k = 0; k < CHUNK_SIZE; k++ )
- {
- for ( l = 0; l < CHUNK_SIZE; l++ )
- {
- if ( data_chunk[0][k][l]
- !=
- ((DSET_SIZE * DSET_SIZE * mpi_rank) +
- (DSET_SIZE * (i + k)) + j + l + dset_num) ) {
+ for (k = 0; k < CHUNK_SIZE; k++) {
+ for (l = 0; l < CHUNK_SIZE; l++) {
+ if (data_chunk[0][k][l] !=
+ ((DSET_SIZE * DSET_SIZE * mpi_rank) + (DSET_SIZE * (i + k)) + j + l + dset_num)) {
valid_chunk = FALSE;
- if ( verbose ) {
+ if (verbose) {
- HDfprintf(stdout,
- "data_chunk[%0d][%0d] = %0d, expect %0d.\n",
- k, l, data_chunk[0][k][l],
- ((DSET_SIZE * DSET_SIZE * mpi_rank) +
- (DSET_SIZE * (i + k)) + j + l + dset_num));
- HDfprintf(stdout,
- "dset_num = %d, i = %d, j = %d, k = %d, l = %d\n",
- dset_num, i, j, k, l);
+ HDfprintf(stdout, "data_chunk[%0d][%0d] = %0d, expect %0d.\n", k, l,
+ data_chunk[0][k][l],
+ ((DSET_SIZE * DSET_SIZE * mpi_rank) + (DSET_SIZE * (i + k)) + j +
+ l + dset_num));
+ HDfprintf(stdout, "dset_num = %d, i = %d, j = %d, k = %d, l = %d\n", dset_num,
+ i, j, k, l);
}
}
}
}
- if ( ! valid_chunk ) {
+ if (!valid_chunk) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "slab validation failed.";
- if ( verbose ) {
+ if (verbose) {
- HDfprintf(stdout,
- "Chunk (%0d, %0d) in /dset%03d is invalid.\n",
- i, j, dset_num);
+ HDfprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n", i, j, dset_num);
}
}
}
@@ -1761,59 +1626,58 @@ par_create_dataset(int dset_num,
i += CHUNK_SIZE;
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* close the data space */
- if ( ( pass ) && ( H5Sclose(dataspace_id) < 0 ) ) {
+ if ((pass) && (H5Sclose(dataspace_id) < 0)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Sclose(dataspace_id) failed.";
}
/* close the file space */
- if ( ( pass ) && ( H5Sclose(filespace_id) < 0 ) ) {
+ if ((pass) && (H5Sclose(filespace_id) < 0)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Sclose(filespace_id) failed.";
}
/* close the dataset */
- if ( ( pass ) && ( H5Dclose(dset_id) < 0 ) ) {
+ if ((pass) && (H5Dclose(dset_id) < 0)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Dclose(dset_id) failed.";
}
/* close the mem space */
- if ( ( pass ) && ( H5Sclose(memspace_id) < 0 ) ) {
+ if ((pass) && (H5Sclose(memspace_id) < 0)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Sclose(memspace_id) failed.";
}
/* close the dataset creation property list */
- if ( ( pass ) && ( H5Pclose(dcpl_id) < 0 ) ) {
+ if ((pass) && (H5Pclose(dcpl_id) < 0)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pclose(dcpl) failed.";
}
/* close the data access property list */
- if ( ( pass ) && ( H5Pclose(dxpl_id) < 0 ) ) {
+ if ((pass) && (H5Pclose(dxpl_id) < 0)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pclose(dxpl) failed.";
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
return;
} /* par_create_dataset() */
-
/*-------------------------------------------------------------------------
* Function: par_delete_dataset()
*
@@ -1837,51 +1701,48 @@ par_create_dataset(int dset_num,
*/
static void
-par_delete_dataset(int dset_num,
- hid_t file_id,
- int mpi_rank)
+par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank)
{
- const char * fcn_name = "par_delete_dataset()";
- char dset_name[256];
- hbool_t show_progress = FALSE;
- int cp = 0;
+ const char *fcn_name = "par_delete_dataset()";
+ char dset_name[256];
+ hbool_t show_progress = FALSE;
+ int cp = 0;
show_progress = (show_progress && (mpi_rank == 0));
HDsprintf(dset_name, "/dset%03d", dset_num);
- if ( show_progress ) {
+ if (show_progress) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
}
/* verify the target dataset */
- if ( pass ) {
+ if (pass) {
par_verify_dataset(dset_num, file_id, mpi_rank);
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* delete the target dataset */
- if ( pass ) {
+ if (pass) {
- if ( H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) {
+ if (H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Ldelete() failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
return;
} /* par_delete_dataset() */
-
/* This test uses many POSIX things that are not available on
* Windows. We're using a check for fork(2) here as a proxy for
* all POSIX/Unix/Linux things until this test can be made
@@ -1889,7 +1750,6 @@ par_delete_dataset(int dset_num,
*/
#ifdef H5_HAVE_FORK
-
/*-------------------------------------------------------------------------
* Function: par_insert_cache_image()
*
@@ -1919,17 +1779,17 @@ par_delete_dataset(int dset_num,
*/
static void
-par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
+par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size)
{
hbool_t show_progress = FALSE;
- if ( pass ) {
+ if (pass) {
- if ( mpi_rank == 0 ) { /* insert cache image in supplied test file */
+ if (mpi_rank == 0) { /* insert cache image in supplied test file */
- char file_name_idx_str[32];
- char mpi_size_str[32];
- int child_status;
+ char file_name_idx_str[32];
+ char mpi_size_str[32];
+ int child_status;
pid_t child_pid;
HDsprintf(file_name_idx_str, "%d", file_name_idx);
@@ -1937,57 +1797,52 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
child_pid = fork();
- if ( child_pid == 0 ) { /* this is the child process */
+ if (child_pid == 0) { /* this is the child process */
/* fun and games to shutup the compiler */
- char param0[32] = "t_cache_image";
- char param1[32] = "ici";
- char * child_argv[] = {param0,
- param1,
- file_name_idx_str,
- mpi_size_str,
- NULL};
+ char param0[32] = "t_cache_image";
+ char param1[32] = "ici";
+ char *child_argv[] = {param0, param1, file_name_idx_str, mpi_size_str, NULL};
/* we may need to play with the path here */
- if ( execv("t_cache_image", child_argv) == -1 ) {
+ if (execv("t_cache_image", child_argv) == -1) {
- HDfprintf(stdout,
- "execl() of ici process failed. errno = %d(%s)\n",
- errno, strerror(errno));
+ HDfprintf(stdout, "execl() of ici process failed. errno = %d(%s)\n", errno,
+ strerror(errno));
HDexit(1);
}
-
- } else if ( child_pid != -1 ) {
+ }
+ else if (child_pid != -1) {
/* this is the parent process -- wait until child is done */
- if ( -1 == waitpid(child_pid, &child_status, WUNTRACED)) {
+ if (-1 == waitpid(child_pid, &child_status, WUNTRACED)) {
HDfprintf(stdout, "can't wait on ici process.\n");
pass = FALSE;
-
- } else if ( ! WIFEXITED(child_status) ) {
+ }
+ else if (!WIFEXITED(child_status)) {
HDfprintf(stdout, "ici process hasn't exitied.\n");
pass = FALSE;
-
- } else if ( WEXITSTATUS(child_status) != 0 ) {
+ }
+ else if (WEXITSTATUS(child_status) != 0) {
HDfprintf(stdout, "ici process reports failure.\n");
pass = FALSE;
-
- } else if ( show_progress ) {
+ }
+ else if (show_progress) {
HDfprintf(stdout, "cache image insertion complete.\n");
}
- } else { /* fork failed */
+ }
+ else { /* fork failed */
- HDfprintf(stdout,
- "can't create process to insert cache image.\n");
+ HDfprintf(stdout, "can't create process to insert cache image.\n");
pass = FALSE;
}
}
}
- if ( pass ) {
+ if (pass) {
/* make sure insertion of the cache image is complete
* before proceeding
@@ -2001,14 +1856,13 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
#else /* H5_HAVE_FORK */
static void
-par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
+par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size)
{
return;
} /* par_insert_cache_image() */
#endif /* H5_HAVE_FORK */
-
/*-------------------------------------------------------------------------
* Function: par_verify_dataset()
*
@@ -2032,139 +1886,134 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
*/
static void
-par_verify_dataset(int dset_num,
- hid_t file_id,
- int mpi_rank)
+par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank)
{
- const char * fcn_name = "par_verify_dataset()";
- char dset_name[256];
- hbool_t show_progress = FALSE;
- hbool_t valid_chunk;
- hbool_t verbose = FALSE;
- int cp = 0;
- int i, j, k, l;
- int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE];
- hsize_t dims[3];
- hsize_t a_size[3];
- hsize_t offset[3];
- hid_t status;
- hid_t memspace_id = -1;
- hid_t dset_id = -1;
- hid_t filespace_id = -1;
- hid_t dxpl_id = -1;
+ const char *fcn_name = "par_verify_dataset()";
+ char dset_name[256];
+ hbool_t show_progress = FALSE;
+ hbool_t valid_chunk;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ int i, j, k, l;
+ int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE];
+ hsize_t dims[3];
+ hsize_t a_size[3];
+ hsize_t offset[3];
+ hid_t status;
+ hid_t memspace_id = -1;
+ hid_t dset_id = -1;
+ hid_t filespace_id = -1;
+ hid_t dxpl_id = -1;
show_progress = (show_progress && (mpi_rank == 0));
verbose = (verbose && (mpi_rank == 0));
HDsprintf(dset_name, "/dset%03d", dset_num);
- if ( show_progress ) {
+ if (show_progress) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
}
- if ( pass ) {
+ if (pass) {
/* open the dataset */
dset_id = H5Dopen2(file_id, dset_name, H5P_DEFAULT);
- if ( dset_id < 0 ) {
+ if (dset_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Dopen2() failed.";
}
}
/* get the file space ID */
- if ( pass ) {
+ if (pass) {
filespace_id = H5Dget_space(dset_id);
- if ( filespace_id < 0 ) {
+ if (filespace_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Dget_space() failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* create the mem space to be used to read */
- if ( pass ) {
+ if (pass) {
- dims[0] = 1;
- dims[1] = CHUNK_SIZE;
- dims[2] = CHUNK_SIZE;
+ dims[0] = 1;
+ dims[1] = CHUNK_SIZE;
+ dims[2] = CHUNK_SIZE;
memspace_id = H5Screate_simple(3, dims, NULL);
- if ( memspace_id < 0 ) {
+ if (memspace_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Screate_simple() failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* select in memory hyperslab */
- if ( pass ) {
+ if (pass) {
- offset[0] = 0; /* offset of hyperslab in memory */
+ offset[0] = 0; /* offset of hyperslab in memory */
offset[1] = 0;
offset[2] = 0;
- a_size[0] = 1; /* size of hyperslab */
+ a_size[0] = 1; /* size of hyperslab */
a_size[1] = CHUNK_SIZE;
a_size[2] = CHUNK_SIZE;
- status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL,
- a_size, NULL);
+ status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Sselect_hyperslab() failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* setup the DXPL for collective I/O */
- if ( pass ) {
+ if (pass) {
dxpl_id = H5Pcreate(H5P_DATASET_XFER);
- if ( dxpl_id < 0 ) {
+ if (dxpl_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- if ( pass ) {
+ if (pass) {
- if ( H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0 ) {
+ if (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pset_dxpl_mpio() failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* read data from data sets and validate it */
i = 0;
- while ( ( pass ) && ( i < DSET_SIZE ) )
- {
+ while ((pass) && (i < DSET_SIZE)) {
j = 0;
- while ( ( pass ) && ( j < DSET_SIZE ) )
- {
+ while ((pass) && (j < DSET_SIZE)) {
/* select on disk hyperslab */
offset[0] = (hsize_t)mpi_rank;
offset[1] = (hsize_t)i; /* offset of hyperslab in file */
@@ -2173,69 +2022,58 @@ par_verify_dataset(int dset_num,
a_size[1] = CHUNK_SIZE; /* size of hyperslab */
a_size[2] = CHUNK_SIZE;
- status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET,
- offset, NULL, a_size, NULL);
+ status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
- failure_mssg = "disk hyperslab create failed.";
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
}
/* read the chunk from file */
- if ( pass ) {
+ if (pass) {
- status = H5Dread(dset_id, H5T_NATIVE_INT,
- memspace_id, filespace_id,
- dxpl_id, data_chunk);
+ status = H5Dread(dset_id, H5T_NATIVE_INT, memspace_id, filespace_id, dxpl_id, data_chunk);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
- failure_mssg = "chunk read failed.";
+ pass = FALSE;
+ failure_mssg = "chunk read failed.";
}
}
/* validate the slab */
- if ( pass ) {
+ if (pass) {
valid_chunk = TRUE;
- for ( k = 0; k < CHUNK_SIZE; k++ )
- {
- for ( l = 0; l < CHUNK_SIZE; l++ )
- {
- if ( data_chunk[0][k][l]
- !=
- ((DSET_SIZE * DSET_SIZE * mpi_rank) +
- (DSET_SIZE * (i + k)) + j + l + dset_num) ) {
+ for (k = 0; k < CHUNK_SIZE; k++) {
+ for (l = 0; l < CHUNK_SIZE; l++) {
+ if (data_chunk[0][k][l] !=
+ ((DSET_SIZE * DSET_SIZE * mpi_rank) + (DSET_SIZE * (i + k)) + j + l + dset_num)) {
valid_chunk = FALSE;
- if ( verbose ) {
+ if (verbose) {
- HDfprintf(stdout,
- "data_chunk[%0d][%0d] = %0d, expect %0d.\n",
- k, l, data_chunk[0][k][l],
- ((DSET_SIZE * DSET_SIZE * mpi_rank) +
- (DSET_SIZE * (i + k)) + j + l + dset_num));
- HDfprintf(stdout,
- "dset_num = %d, i = %d, j = %d, k = %d, l = %d\n",
- dset_num, i, j, k, l);
+ HDfprintf(stdout, "data_chunk[%0d][%0d] = %0d, expect %0d.\n", k, l,
+ data_chunk[0][k][l],
+ ((DSET_SIZE * DSET_SIZE * mpi_rank) + (DSET_SIZE * (i + k)) + j +
+ l + dset_num));
+ HDfprintf(stdout, "dset_num = %d, i = %d, j = %d, k = %d, l = %d\n", dset_num,
+ i, j, k, l);
}
}
}
}
- if ( ! valid_chunk ) {
+ if (!valid_chunk) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "slab validation failed.";
- if ( verbose ) {
+ if (verbose) {
- HDfprintf(stdout,
- "Chunk (%0d, %0d) in /dset%03d is invalid.\n",
- i, j, dset_num);
+ HDfprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n", i, j, dset_num);
}
}
}
@@ -2244,45 +2082,44 @@ par_verify_dataset(int dset_num,
i += CHUNK_SIZE;
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* close the file space */
- if ( ( pass ) && ( H5Sclose(filespace_id) < 0 ) ) {
+ if ((pass) && (H5Sclose(filespace_id) < 0)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Sclose(filespace_id) failed.";
}
/* close the dataset */
- if ( ( pass ) && ( H5Dclose(dset_id) < 0 ) ) {
+ if ((pass) && (H5Dclose(dset_id) < 0)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Dclose(dset_id) failed.";
}
/* close the mem space */
- if ( ( pass ) && ( H5Sclose(memspace_id) < 0 ) ) {
+ if ((pass) && (H5Sclose(memspace_id) < 0)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Sclose(memspace_id) failed.";
}
/* close the data access property list */
- if ( ( pass ) && ( H5Pclose(dxpl_id) < 0 ) ) {
+ if ((pass) && (H5Pclose(dxpl_id) < 0)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Pclose(dxpl) failed.";
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
return;
} /* par_verify_dataset() */
-
/*-------------------------------------------------------------------------
* Function: serial_insert_cache_image()
*
@@ -2307,48 +2144,44 @@ par_verify_dataset(int dset_num,
*/
static hbool_t
-serial_insert_cache_image(int file_name_idx, int mpi_size )
+serial_insert_cache_image(int file_name_idx, int mpi_size)
{
- const char * fcn_name = "serial_insert_cache_image()";
- char filename[512];
- hbool_t show_progress = FALSE;
- int cp = 0;
- int i;
- int num_dsets = PAR_NUM_DSETS;
- hid_t file_id = -1;
- H5F_t *file_ptr = NULL;
- H5C_t *cache_ptr = NULL;
- MPI_Comm dummy_comm = MPI_COMM_WORLD;
- MPI_Info dummy_info = MPI_INFO_NULL;
+ const char *fcn_name = "serial_insert_cache_image()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ int cp = 0;
+ int i;
+ int num_dsets = PAR_NUM_DSETS;
+ hid_t file_id = -1;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
+ MPI_Comm dummy_comm = MPI_COMM_WORLD;
+ MPI_Info dummy_info = MPI_INFO_NULL;
pass = TRUE;
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 1) setup the file name */
- if ( pass ) {
+ if (pass) {
HDassert(FILENAMES[file_name_idx]);
- if ( h5_fixname(FILENAMES[file_name_idx], H5P_DEFAULT,
- filename, sizeof(filename))
- == NULL ) {
+ if (h5_fixname(FILENAMES[file_name_idx], H5P_DEFAULT, filename, sizeof(filename)) == NULL) {
pass = FALSE;
HDfprintf(stdout, "h5_fixname() failed.\n");
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 2) Open the PHDF5 file with the cache image FAPL entry.
*/
- if ( pass ) {
+ if (pass) {
open_hdf5_file(/* create_file */ FALSE,
/* mdci_sbem_expected */ FALSE,
@@ -2369,42 +2202,39 @@ serial_insert_cache_image(int file_name_idx, int mpi_size )
/* md_write_strat */ 1);
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 3) Validate contents of the file */
i = 0;
- while ( ( pass ) && ( i < num_dsets ) ) {
+ while ((pass) && (i < num_dsets)) {
serial_verify_dataset(i, file_id, mpi_size);
i++;
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 4) Close the file */
- if ( pass ) {
+ if (pass) {
- if ( H5Fclose(file_id) < 0 ) {
+ if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Fclose() failed.\n";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
return pass;
} /* serial_insert_cache_image() */
-
/*-------------------------------------------------------------------------
* Function: serial_verify_dataset()
*
@@ -2428,112 +2258,105 @@ serial_insert_cache_image(int file_name_idx, int mpi_size )
*/
static void
-serial_verify_dataset(int dset_num,
- hid_t file_id,
- int mpi_size)
+serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size)
{
- const char * fcn_name = "serial_verify_dataset()";
- char dset_name[256];
- hbool_t show_progress = FALSE;
- hbool_t valid_chunk;
- hbool_t verbose = FALSE;
- int cp = 0;
- int i, j, k, l, m;
- int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE];
- hsize_t dims[3];
- hsize_t a_size[3];
- hsize_t offset[3];
- hid_t status;
- hid_t memspace_id = -1;
- hid_t dset_id = -1;
- hid_t filespace_id = -1;
+ const char *fcn_name = "serial_verify_dataset()";
+ char dset_name[256];
+ hbool_t show_progress = FALSE;
+ hbool_t valid_chunk;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ int i, j, k, l, m;
+ int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE];
+ hsize_t dims[3];
+ hsize_t a_size[3];
+ hsize_t offset[3];
+ hid_t status;
+ hid_t memspace_id = -1;
+ hid_t dset_id = -1;
+ hid_t filespace_id = -1;
HDsprintf(dset_name, "/dset%03d", dset_num);
- if ( show_progress ) {
+ if (show_progress) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
}
- if ( pass ) {
+ if (pass) {
/* open the dataset */
dset_id = H5Dopen2(file_id, dset_name, H5P_DEFAULT);
- if ( dset_id < 0 ) {
+ if (dset_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Dopen2() failed.";
}
}
/* get the file space ID */
- if ( pass ) {
+ if (pass) {
filespace_id = H5Dget_space(dset_id);
- if ( filespace_id < 0 ) {
+ if (filespace_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Dget_space() failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* create the mem space to be used to read */
- if ( pass ) {
+ if (pass) {
- dims[0] = 1;
- dims[1] = CHUNK_SIZE;
- dims[2] = CHUNK_SIZE;
+ dims[0] = 1;
+ dims[1] = CHUNK_SIZE;
+ dims[2] = CHUNK_SIZE;
memspace_id = H5Screate_simple(3, dims, NULL);
- if ( memspace_id < 0 ) {
+ if (memspace_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Screate_simple() failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* select in memory hyperslab */
- if ( pass ) {
+ if (pass) {
- offset[0] = 0; /* offset of hyperslab in memory */
+ offset[0] = 0; /* offset of hyperslab in memory */
offset[1] = 0;
offset[2] = 0;
- a_size[0] = 1; /* size of hyperslab */
+ a_size[0] = 1; /* size of hyperslab */
a_size[1] = CHUNK_SIZE;
a_size[2] = CHUNK_SIZE;
- status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL,
- a_size, NULL);
+ status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Sselect_hyperslab() failed.";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* read data from data sets and validate it */
i = 0;
- while ( ( pass ) && ( i < mpi_size ) )
- {
+ while ((pass) && (i < mpi_size)) {
j = 0;
- while ( ( pass ) && ( j < DSET_SIZE ) )
- {
+ while ((pass) && (j < DSET_SIZE)) {
k = 0;
- while ( ( pass ) && ( k < DSET_SIZE ) )
- {
+ while ((pass) && (k < DSET_SIZE)) {
/* select on disk hyperslab */
offset[0] = (hsize_t)i; /* offset of hyperslab in file */
offset[1] = (hsize_t)j; /* offset of hyperslab in file */
@@ -2542,70 +2365,61 @@ serial_verify_dataset(int dset_num,
a_size[1] = CHUNK_SIZE; /* size of hyperslab */
a_size[2] = CHUNK_SIZE;
- status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET,
- offset, NULL, a_size, NULL);
+ status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
- failure_mssg = "disk hyperslab create failed.";
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
}
/* read the chunk from file */
- if ( pass ) {
+ if (pass) {
- status = H5Dread(dset_id, H5T_NATIVE_INT,
- memspace_id, filespace_id,
- H5P_DEFAULT, data_chunk);
+ status =
+ H5Dread(dset_id, H5T_NATIVE_INT, memspace_id, filespace_id, H5P_DEFAULT, data_chunk);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
- failure_mssg = "chunk read failed.";
+ pass = FALSE;
+ failure_mssg = "chunk read failed.";
}
}
/* validate the slab */
- if ( pass ) {
+ if (pass) {
valid_chunk = TRUE;
- for ( l = 0; l < CHUNK_SIZE; l++ )
- {
- for ( m = 0; m < CHUNK_SIZE; m++ )
- {
- if ( data_chunk[0][l][m]
- !=
- ((DSET_SIZE * DSET_SIZE * i) +
- (DSET_SIZE * (j + l)) + k + m + dset_num) ) {
+ for (l = 0; l < CHUNK_SIZE; l++) {
+ for (m = 0; m < CHUNK_SIZE; m++) {
+ if (data_chunk[0][l][m] !=
+ ((DSET_SIZE * DSET_SIZE * i) + (DSET_SIZE * (j + l)) + k + m + dset_num)) {
valid_chunk = FALSE;
- if ( verbose ) {
+ if (verbose) {
+ HDfprintf(stdout, "data_chunk[%0d][%0d] = %0d, expect %0d.\n", j, k,
+ data_chunk[0][j][k],
+ ((DSET_SIZE * DSET_SIZE * i) + (DSET_SIZE * (j + l)) + k + m +
+ dset_num));
HDfprintf(stdout,
- "data_chunk[%0d][%0d] = %0d, expect %0d.\n",
- j, k, data_chunk[0][j][k],
- ((DSET_SIZE * DSET_SIZE * i) +
- (DSET_SIZE * (j + l)) + k + m + dset_num));
- HDfprintf(stdout,
- "dset_num = %d, i = %d, j = %d, k = %d, l = %d, m = %d\n",
- dset_num, i, j, k, l, m);
+ "dset_num = %d, i = %d, j = %d, k = %d, l = %d, m = %d\n",
+ dset_num, i, j, k, l, m);
}
}
}
}
- if ( ! valid_chunk ) {
+ if (!valid_chunk) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "slab validation failed.";
- if ( verbose ) {
+ if (verbose) {
- HDfprintf(stdout,
- "Chunk (%0d, %0d) in /dset%03d is invalid.\n",
- j, k, dset_num);
+ HDfprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n", j, k, dset_num);
}
}
}
@@ -2616,39 +2430,37 @@ serial_verify_dataset(int dset_num,
i++;
}
-
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* close the file space */
- if ( ( pass ) && ( H5Sclose(filespace_id) < 0 ) ) {
+ if ((pass) && (H5Sclose(filespace_id) < 0)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Sclose(filespace_id) failed.";
}
/* close the dataset */
- if ( ( pass ) && ( H5Dclose(dset_id) < 0 ) ) {
+ if ((pass) && (H5Dclose(dset_id) < 0)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Dclose(dset_id) failed.";
}
/* close the mem space */
- if ( ( pass ) && ( H5Sclose(memspace_id) < 0 ) ) {
+ if ((pass) && (H5Sclose(memspace_id) < 0)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Sclose(memspace_id) failed.";
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
return;
} /* serial_verify_dataset() */
-
/*-------------------------------------------------------------------------
* Function: parse_flags
*
@@ -2664,12 +2476,12 @@ serial_verify_dataset(int dset_num,
*-------------------------------------------------------------------------
*/
static hbool_t
-parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
- hbool_t * ici_ptr, int * file_idx_ptr, int * mpi_size_ptr, hbool_t display)
+parse_flags(int argc, char *argv[], hbool_t *setup_ptr, hbool_t *ici_ptr, int *file_idx_ptr,
+ int *mpi_size_ptr, hbool_t display)
{
- const char * fcn_name = "parse_flags()";
- const char * (ops[]) = {"setup", "ici"};
- int success = TRUE;
+ const char *fcn_name = "parse_flags()";
+ const char *(ops[]) = {"setup", "ici"};
+ int success = TRUE;
HDassert(setup_ptr);
HDassert(*setup_ptr == FALSE);
@@ -2678,73 +2490,67 @@ parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
HDassert(file_idx_ptr);
HDassert(mpi_size_ptr);
- if ( setup_ptr == NULL ) {
+ if (setup_ptr == NULL) {
success = FALSE;
HDfprintf(stdout, "%s: bad arg(s) on entry.\n", fcn_name);
}
-
- if ( ( success ) &&
- ( ( argc != 1 ) && ( argc != 2 ) && ( argc != 4 ) ) ) {
+ if ((success) && ((argc != 1) && (argc != 2) && (argc != 4))) {
success = FALSE;
usage();
}
+ if ((success) && (argc >= 2)) {
- if ( ( success ) && ( argc >= 2 ) ) {
+ if (strcmp(argv[1], ops[0]) == 0) {
- if ( strcmp(argv[1], ops[0]) == 0 ) {
-
- if ( argc != 2 ) {
+ if (argc != 2) {
success = FALSE;
usage();
-
- } else {
+ }
+ else {
*setup_ptr = TRUE;
-
}
- } else if ( strcmp(argv[1], ops[1]) == 0 ) {
+ }
+ else if (strcmp(argv[1], ops[1]) == 0) {
- if ( argc != 4 ) {
+ if (argc != 4) {
success = FALSE;
usage();
+ }
+ else {
- } else {
-
- *ici_ptr = TRUE;
+ *ici_ptr = TRUE;
*file_idx_ptr = atoi(argv[2]);
*mpi_size_ptr = atoi(argv[3]);
-
}
}
}
- if ( ( success ) && ( display ) ) {
+ if ((success) && (display)) {
- if ( *setup_ptr )
+ if (*setup_ptr)
HDfprintf(stdout, "t_cache_image setup\n");
- else if ( *ici_ptr )
+ else if (*ici_ptr)
- HDfprintf(stdout, "t_cache_image ici %d %d\n",
- *file_idx_ptr, *mpi_size_ptr);
+ HDfprintf(stdout, "t_cache_image ici %d %d\n", *file_idx_ptr, *mpi_size_ptr);
else
HDfprintf(stdout, "t_cache_image\n");
}
- return(success);
+ return (success);
} /* parse_flags() */
-
/*-------------------------------------------------------------------------
* Function: usage
*
@@ -2763,8 +2569,7 @@ parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
void
usage(void)
{
- const char * s[] =
- {
+ const char *s[] = {
"\n",
"t_cache_image:\n",
"\n",
@@ -2800,7 +2605,7 @@ usage(void)
};
int i = 0;
- while(s[i] != NULL) {
+ while (s[i] != NULL) {
HDfprintf(stdout, "%s", s[i]);
i++;
}
@@ -2808,7 +2613,6 @@ usage(void)
return;
} /* usage() */
-
/*-------------------------------------------------------------------------
* Function: verify_data_sets()
*
@@ -2843,23 +2647,24 @@ usage(void)
static void
verify_data_sets(hid_t file_id, int min_dset, int max_dset)
{
- const char * fcn_name = "verify_data_sets()";
- char dset_name[64];
- hbool_t show_progress = FALSE;
- hbool_t valid_chunk;
- hbool_t verbose = FALSE;
- int cp = 0;
- int i, j, k, l, m;
- int data_chunk[CHUNK_SIZE][CHUNK_SIZE];
- herr_t status;
- hid_t filespace_ids[MAX_NUM_DSETS];
- hid_t memspace_id = -1;
- hid_t dataset_ids[MAX_NUM_DSETS];
- hsize_t dims[2];
- hsize_t a_size[2];
- hsize_t offset[2];
-
- if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+ const char *fcn_name = "verify_data_sets()";
+ char dset_name[64];
+ hbool_t show_progress = FALSE;
+ hbool_t valid_chunk;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ int i, j, k, l, m;
+ int data_chunk[CHUNK_SIZE][CHUNK_SIZE];
+ herr_t status;
+ hid_t filespace_ids[MAX_NUM_DSETS];
+ hid_t memspace_id = -1;
+ hid_t dataset_ids[MAX_NUM_DSETS];
+ hsize_t dims[2];
+ hsize_t a_size[2];
+ hsize_t offset[2];
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
HDassert(0 <= min_dset);
HDassert(min_dset <= max_dset);
@@ -2867,33 +2672,32 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
/* open the datasets */
- if ( pass ) {
+ if (pass) {
i = min_dset;
- while ( ( pass ) && ( i <= max_dset ) )
- {
+ while ((pass) && (i <= max_dset)) {
/* open the dataset */
- if ( pass ) {
+ if (pass) {
HDsprintf(dset_name, "/dset%03d", i);
dataset_ids[i] = H5Dopen2(file_id, dset_name, H5P_DEFAULT);
- if ( dataset_ids[i] < 0 ) {
+ if (dataset_ids[i] < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Dopen2() failed.";
}
}
/* get the file space ID */
- if ( pass ) {
+ if (pass) {
filespace_ids[i] = H5Dget_space(dataset_ids[i]);
- if ( filespace_ids[i] < 0 ) {
+ if (filespace_ids[i] < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Dget_space() failed.";
}
}
@@ -2902,124 +2706,111 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
}
}
- if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
/* create the mem space to be used to read and write chunks */
- if ( pass ) {
+ if (pass) {
- dims[0] = CHUNK_SIZE;
- dims[1] = CHUNK_SIZE;
+ dims[0] = CHUNK_SIZE;
+ dims[1] = CHUNK_SIZE;
memspace_id = H5Screate_simple(2, dims, NULL);
- if ( memspace_id < 0 ) {
+ if (memspace_id < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Screate_simple() failed.";
}
}
- if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
/* select in memory hyperslab */
- if ( pass ) {
+ if (pass) {
- offset[0] = 0; /*offset of hyperslab in memory*/
+ offset[0] = 0; /*offset of hyperslab in memory*/
offset[1] = 0;
- a_size[0] = CHUNK_SIZE; /*size of hyperslab*/
+ a_size[0] = CHUNK_SIZE; /*size of hyperslab*/
a_size[1] = CHUNK_SIZE;
- status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL,
- a_size, NULL);
+ status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Sselect_hyperslab() failed.";
}
}
- if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
-
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
/* read data from data sets and validate it */
i = 0;
- while ( ( pass ) && ( i < DSET_SIZE ) )
- {
+ while ((pass) && (i < DSET_SIZE)) {
j = 0;
- while ( ( pass ) && ( j < DSET_SIZE ) )
- {
+ while ((pass) && (j < DSET_SIZE)) {
m = min_dset;
- while ( ( pass ) && ( m <= max_dset ) )
- {
+ while ((pass) && (m <= max_dset)) {
/* select on disk hyperslab */
offset[0] = (hsize_t)i; /* offset of hyperslab in file */
offset[1] = (hsize_t)j;
a_size[0] = CHUNK_SIZE; /* size of hyperslab */
a_size[1] = CHUNK_SIZE;
- status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET,
- offset, NULL, a_size, NULL);
+ status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET, offset, NULL, a_size, NULL);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
- failure_mssg = "disk hyperslab create failed.";
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
}
/* read the chunk from file */
- if ( pass ) {
+ if (pass) {
- status = H5Dread(dataset_ids[m], H5T_NATIVE_INT,
- memspace_id, filespace_ids[m],
+ status = H5Dread(dataset_ids[m], H5T_NATIVE_INT, memspace_id, filespace_ids[m],
H5P_DEFAULT, data_chunk);
- if ( status < 0 ) {
+ if (status < 0) {
- pass = FALSE;
- failure_mssg = "disk hyperslab create failed.";
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
}
}
/* validate the slab */
- if ( pass ) {
+ if (pass) {
valid_chunk = TRUE;
- for ( k = 0; k < CHUNK_SIZE; k++ )
- {
- for ( l = 0; l < CHUNK_SIZE; l++ )
- {
- if ( data_chunk[k][l]
- !=
- ((DSET_SIZE * DSET_SIZE * m) +
- (DSET_SIZE * (i + k)) + j + l) ) {
+ for (k = 0; k < CHUNK_SIZE; k++) {
+ for (l = 0; l < CHUNK_SIZE; l++) {
+ if (data_chunk[k][l] !=
+ ((DSET_SIZE * DSET_SIZE * m) + (DSET_SIZE * (i + k)) + j + l)) {
valid_chunk = FALSE;
- if ( verbose ) {
+ if (verbose) {
- HDfprintf(stdout,
- "data_chunk[%0d][%0d] = %0d, expect %0d.\n",
- k, l, data_chunk[k][l],
- ((DSET_SIZE * DSET_SIZE * m) +
- (DSET_SIZE * (i + k)) + j + l));
- HDfprintf(stdout,
- "m = %d, i = %d, j = %d, k = %d, l = %d\n",
- m, i, j, k, l);
- }
+ HDfprintf(stdout, "data_chunk[%0d][%0d] = %0d, expect %0d.\n", k, l,
+ data_chunk[k][l],
+ ((DSET_SIZE * DSET_SIZE * m) + (DSET_SIZE * (i + k)) + j + l));
+ HDfprintf(stdout, "m = %d, i = %d, j = %d, k = %d, l = %d\n", m, i, j, k,
+ l);
+ }
}
}
}
- if ( ! valid_chunk ) {
+ if (!valid_chunk) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "slab validation failed.";
- if ( verbose ) {
+ if (verbose) {
- HDfprintf(stdout,
- "Chunk (%0d, %0d) in /dset%03d is invalid.\n",
- i, j, m);
- }
+ HDfprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n", i, j, m);
+ }
}
}
m++;
@@ -3029,39 +2820,37 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
i += CHUNK_SIZE;
}
- if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
/* close the file spaces */
i = min_dset;
- while ( ( pass ) && ( i <= max_dset ) )
- {
- if ( H5Sclose(filespace_ids[i]) < 0 ) {
+ while ((pass) && (i <= max_dset)) {
+ if (H5Sclose(filespace_ids[i]) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Sclose() failed.";
}
i++;
}
-
/* close the datasets */
i = min_dset;
- while ( ( pass ) && ( i <= max_dset ) )
- {
- if ( H5Dclose(dataset_ids[i]) < 0 ) {
+ while ((pass) && (i <= max_dset)) {
+ if (H5Dclose(dataset_ids[i]) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Dclose() failed.";
}
i++;
}
/* close the mem space */
- if ( pass ) {
+ if (pass) {
- if ( H5Sclose(memspace_id) < 0 ) {
+ if (H5Sclose(memspace_id) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Sclose(memspace_id) failed.";
}
}
@@ -3070,7 +2859,6 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
} /* verify_data_sets() */
-
/****************************************************************************/
/******************************* Test Functions *****************************/
/****************************************************************************/
@@ -3120,19 +2908,19 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
static unsigned
verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
{
- const char * fcn_name = "verify_cache_image_RO()";
- char filename[512];
- hbool_t show_progress = FALSE;
- hid_t file_id = -1;
- H5F_t *file_ptr = NULL;
- H5C_t *cache_ptr = NULL;
- int cp = 0;
+ const char *fcn_name = "verify_cache_image_RO()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
+ int cp = 0;
pass = TRUE;
- if ( mpi_rank == 0 ) {
+ if (mpi_rank == 0) {
- switch(md_write_strat) {
+ switch (md_write_strat) {
case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
TESTING("parallel CI load test -- proc0 md write -- R/O");
@@ -3149,39 +2937,36 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
}
}
- show_progress = ( ( show_progress ) && ( mpi_rank == 0 ) );
+ show_progress = ((show_progress) && (mpi_rank == 0));
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* setup the file name */
- if ( pass ) {
+ if (pass) {
- if ( h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT,
- filename, sizeof(filename)) == NULL ) {
+ if (h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT, filename, sizeof(filename)) == NULL) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "h5_fixname() failed.\n";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 1) Open the test file created at the beginning of this test.
*
* Verify that the file contains a cache image.
*/
- if ( pass ) {
+ if (pass) {
open_hdf5_file(/* create_file */ FALSE,
/* mdci_sbem_expected */ TRUE,
/* read_only */ TRUE,
/* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -3196,10 +2981,9 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
/* md_write_strat */ md_write_strat);
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 2) Verify that the file contains the expected data.
*
* Verify that only process 0 reads the cache image.
@@ -3208,74 +2992,71 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
* image block from process 0.
*/
- if ( pass ) {
+ if (pass) {
- verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
+ verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
}
/* Verify that only process 0 reads the cache image. */
#if H5C_COLLECT_CACHE_STATS
- if ( pass ) {
+ if (pass) {
- if ( ( ( mpi_rank == 0 ) && ( cache_ptr->images_read != 1 ) ) ||
- ( ( mpi_rank > 0 ) && ( cache_ptr->images_read != 0 ) ) ) {
+ if (((mpi_rank == 0) && (cache_ptr->images_read != 1)) ||
+ ((mpi_rank > 0) && (cache_ptr->images_read != 0))) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "unexpected images_read.";
}
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* Verify that all other processes receive the cache image block
- * from process 0.
- *
- * Since we have alread verified that only process 0 has read the
- * image, it is sufficient to verify that the image was loaded on
- * all processes.
- */
+ /* Verify that all other processes receive the cache image block
+ * from process 0.
+ *
+ * Since we have alread verified that only process 0 has read the
+ * image, it is sufficient to verify that the image was loaded on
+ * all processes.
+ */
#if H5C_COLLECT_CACHE_STATS
- if ( pass ) {
+ if (pass) {
- if ( cache_ptr->images_loaded != 1 ) {
+ if (cache_ptr->images_loaded != 1) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "Image not loaded?.";
}
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 3) Close the file. */
- if ( pass ) {
+ if (pass) {
- if ( H5Fclose(file_id) < 0 ) {
+ if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Fclose() failed.\n";
-
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 4) Open the file, and verify that it doesn't contain a cache image. */
- if ( pass ) {
+ if (pass) {
open_hdf5_file(/* create_file */ FALSE,
/* mdci_sbem_expected */ TRUE,
/* read_only */ TRUE,
/* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -3290,69 +3071,62 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
/* md_write_strat */ md_write_strat);
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 5) Verify that the file contains the expected data. */
- if ( pass ) {
+ if (pass) {
- verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
+ verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
}
#if H5C_COLLECT_CACHE_STATS
- if ( pass ) {
+ if (pass) {
- if ( cache_ptr->images_loaded != 1 ) {
+ if (cache_ptr->images_loaded != 1) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "metadata cache image block not loaded(2).";
}
}
#endif /* H5C_COLLECT_CACHE_STATS */
-
/* 6) Close the file. */
- if ( pass ) {
+ if (pass) {
- if ( H5Fclose(file_id) < 0 ) {
+ if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Fclose() failed.\n";
-
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* report results */
- if ( mpi_rank == 0 ) {
+ if (mpi_rank == 0) {
- if ( pass ) {
+ if (pass) {
PASSED();
-
- } else {
+ }
+ else {
H5_FAILED();
- if ( show_progress ) {
- HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name,
- failure_mssg);
+ if (show_progress) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
}
-
return !pass;
} /* verify_cache_image_RO() */
-
/*-------------------------------------------------------------------------
* Function: verify_cache_image_RW()
*
@@ -3401,19 +3175,19 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
static unsigned
verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
{
- const char * fcn_name = "verify_cache_imageRW()";
- char filename[512];
- hbool_t show_progress = FALSE;
- hid_t file_id = -1;
- H5F_t *file_ptr = NULL;
- H5C_t *cache_ptr = NULL;
- int cp = 0;
+ const char *fcn_name = "verify_cache_imageRW()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
+ int cp = 0;
pass = TRUE;
- if ( mpi_rank == 0 ) {
+ if (mpi_rank == 0) {
- switch(md_write_strat) {
+ switch (md_write_strat) {
case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
TESTING("parallel CI load test -- proc0 md write -- R/W");
@@ -3430,27 +3204,24 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
}
}
- show_progress = ( ( show_progress ) && ( mpi_rank == 0 ) );
+ show_progress = ((show_progress) && (mpi_rank == 0));
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* setup the file name */
- if ( pass ) {
+ if (pass) {
- if ( h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT,
- filename, sizeof(filename)) == NULL ) {
+ if (h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT, filename, sizeof(filename)) == NULL) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "h5_fixname() failed.\n";
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 1) Open the test file created at the beginning of this test.
*
* Verify that the file contains a cache image.
@@ -3461,13 +3232,13 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
* image block from process 0.
*/
- if ( pass ) {
+ if (pass) {
open_hdf5_file(/* create_file */ FALSE,
/* mdci_sbem_expected */ TRUE,
/* read_only */ FALSE,
/* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -3482,10 +3253,9 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
/* md_write_strat */ md_write_strat);
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 2) Verify that the file contains the expected data.
*
* Verify that only process 0 reads the cache image.
@@ -3493,74 +3263,71 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
* Verify that all other processes receive the cache
* image block from process 0.
*/
- if ( pass ) {
+ if (pass) {
- verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
+ verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
}
/* Verify that only process 0 reads the cache image. */
#if H5C_COLLECT_CACHE_STATS
- if ( pass ) {
+ if (pass) {
- if ( ( ( mpi_rank == 0 ) && ( cache_ptr->images_read != 1 ) ) ||
- ( ( mpi_rank > 0 ) && ( cache_ptr->images_read != 0 ) ) ) {
+ if (((mpi_rank == 0) && (cache_ptr->images_read != 1)) ||
+ ((mpi_rank > 0) && (cache_ptr->images_read != 0))) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "unexpected images_read.";
}
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* Verify that all other processes receive the cache image block
- * from process 0.
- *
- * Since we have alread verified that only process 0 has read the
- * image, it is sufficient to verify that the image was loaded on
- * all processes.
- */
+ /* Verify that all other processes receive the cache image block
+ * from process 0.
+ *
+ * Since we have alread verified that only process 0 has read the
+ * image, it is sufficient to verify that the image was loaded on
+ * all processes.
+ */
#if H5C_COLLECT_CACHE_STATS
- if ( pass ) {
+ if (pass) {
- if ( cache_ptr->images_loaded != 1 ) {
+ if (cache_ptr->images_loaded != 1) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "Image not loaded?.";
}
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 3) Close the file. */
- if ( pass ) {
+ if (pass) {
- if ( H5Fclose(file_id) < 0 ) {
+ if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Fclose() failed.\n";
-
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 4) Open the file, and verify that it doesn't contain a cache image. */
- if ( pass ) {
+ if (pass) {
open_hdf5_file(/* create_file */ FALSE,
/* mdci_sbem_expected */ FALSE,
/* read_only */ FALSE,
/* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -3575,84 +3342,76 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
/* md_write_strat */ md_write_strat);
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 5) Verify that the file contains the expected data. */
- if ( pass ) {
+ if (pass) {
- verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
+ verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
}
#if H5C_COLLECT_CACHE_STATS
- if ( pass ) {
+ if (pass) {
- if ( cache_ptr->images_loaded != 0 ) {
+ if (cache_ptr->images_loaded != 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "metadata cache image block loaded(1).";
}
}
#endif /* H5C_COLLECT_CACHE_STATS */
-
/* 6) Close the file. */
- if ( pass ) {
+ if (pass) {
- if ( H5Fclose(file_id) < 0 ) {
+ if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Fclose() failed.\n";
-
}
}
- if ( show_progress )
+ if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 7) Delete the file. */
- if ( pass ) {
+ if (pass) {
/* wait for everyone to close the file */
MPI_Barrier(MPI_COMM_WORLD);
- if ( ( mpi_rank == 0 ) && ( HDremove(filename) < 0 ) ) {
+ if ((mpi_rank == 0) && (HDremove(filename) < 0)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "HDremove() failed.\n";
}
}
-
/* report results */
- if ( mpi_rank == 0 ) {
+ if (mpi_rank == 0) {
- if ( pass ) {
+ if (pass) {
PASSED();
-
- } else {
+ }
+ else {
H5_FAILED();
- if ( show_progress ) {
- HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name,
- failure_mssg);
+ if (show_progress) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
}
-
return !pass;
} /* verify_cache_imageRW() */
-
/*****************************************************************************
*
* Function: smoke_check_1()
@@ -3680,52 +3439,49 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
static hbool_t
smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
{
- const char * fcn_name = "smoke_check_1()";
- char filename[512];
- hbool_t show_progress = FALSE;
- hid_t file_id = -1;
- H5F_t *file_ptr = NULL;
- H5C_t *cache_ptr = NULL;
- int cp = 0;
- int i;
- int num_dsets = PAR_NUM_DSETS;
- int test_file_index = 2;
+ const char * fcn_name = "smoke_check_1()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
+ int cp = 0;
+ int i;
+ int num_dsets = PAR_NUM_DSETS;
+ int test_file_index = 2;
h5_stat_size_t file_size;
pass = TRUE;
- if ( mpi_rank == 0 ) {
+ if (mpi_rank == 0) {
TESTING("parallel cache image smoke check 1");
}
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* setup the file name */
- if ( pass ) {
+ if (pass) {
HDassert(FILENAMES[test_file_index]);
- if ( h5_fixname(FILENAMES[test_file_index], H5P_DEFAULT,
- filename, sizeof(filename))
- == NULL ) {
+ if (h5_fixname(FILENAMES[test_file_index], H5P_DEFAULT, filename, sizeof(filename)) == NULL) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "h5_fixname() failed.\n";
}
}
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 1) Create a PHDF5 file without the cache image FAPL entry.
*
* Verify that a cache image is not requested
*/
- if ( pass ) {
+ if (pass) {
open_hdf5_file(/* create_file */ TRUE,
/* mdci_sbem_expected */ FALSE,
@@ -3746,63 +3502,57 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
/* md_write_strat */ 1);
}
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 2) Create datasets in the file */
i = 0;
- while ( ( pass ) && ( i < num_dsets ) ) {
+ while ((pass) && (i < num_dsets)) {
par_create_dataset(i, file_id, mpi_rank, mpi_size);
i++;
}
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 3) Verify the datasets in the file */
i = 0;
- while ( ( pass ) && ( i < num_dsets ) ) {
+ while ((pass) && (i < num_dsets)) {
par_verify_dataset(i, file_id, mpi_rank);
i++;
}
-
/* 4) Close the file */
- if ( pass ) {
+ if (pass) {
- if ( H5Fclose(file_id) < 0 ) {
+ if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Fclose() failed.\n";
-
}
}
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 5 Insert a cache image into the file */
- if ( pass ) {
+ if (pass) {
- par_insert_cache_image(test_file_index, mpi_rank, mpi_size);
+ par_insert_cache_image(test_file_index, mpi_rank, mpi_size);
}
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 6) Open the file R/O */
- if ( pass ) {
+ if (pass) {
open_hdf5_file(/* create_file */ FALSE,
/* mdci_sbem_expected */ TRUE,
@@ -3823,10 +3573,9 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
/* md_write_strat */ 1);
}
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 7) Verify the datasets in the file backwards
*
* Verify that only process 0 reads the cache image.
@@ -3836,72 +3585,69 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
*/
i = num_dsets - 1;
- while ( ( pass ) && ( i >= 0 ) ) {
+ while ((pass) && (i >= 0)) {
par_verify_dataset(i, file_id, mpi_rank);
i--;
}
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* Verify that only process 0 reads the cache image. */
+ /* Verify that only process 0 reads the cache image. */
#if H5C_COLLECT_CACHE_STATS
- if ( pass ) {
+ if (pass) {
- if ( ( ( mpi_rank == 0 ) && ( cache_ptr->images_read != 1 ) ) ||
- ( ( mpi_rank > 0 ) && ( cache_ptr->images_read != 0 ) ) ) {
+ if (((mpi_rank == 0) && (cache_ptr->images_read != 1)) ||
+ ((mpi_rank > 0) && (cache_ptr->images_read != 0))) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "unexpected images_read.";
}
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* Verify that all other processes receive the cache image block
- * from process 0.
- *
- * Since we have alread verified that only process 0 has read the
- * image, it is sufficient to verify that the image was loaded on
- * all processes.
- */
+ /* Verify that all other processes receive the cache image block
+ * from process 0.
+ *
+ * Since we have alread verified that only process 0 has read the
+ * image, it is sufficient to verify that the image was loaded on
+ * all processes.
+ */
#if H5C_COLLECT_CACHE_STATS
- if ( pass ) {
+ if (pass) {
- if ( cache_ptr->images_loaded != 1 ) {
+ if (cache_ptr->images_loaded != 1) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "Image not loaded?.";
}
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 8) Close the file */
- if ( pass ) {
+ if (pass) {
- if ( H5Fclose(file_id) < 0 ) {
+ if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Fclose() failed.";
-
}
}
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 9) Open the file */
- if ( pass ) {
+ if (pass) {
open_hdf5_file(/* create_file */ FALSE,
/* mdci_sbem_expected */ TRUE,
@@ -3922,10 +3668,9 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
/* md_write_strat */ 1);
}
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 10) Verify the datasets in the file
*
* Verify that only process 0 reads the cache image.
@@ -3935,82 +3680,78 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
*/
i = 0;
- while ( ( pass ) && ( i < num_dsets ) ) {
+ while ((pass) && (i < num_dsets)) {
par_verify_dataset(i, file_id, mpi_rank);
i++;
}
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* Verify that only process 0 reads the cache image. */
+ /* Verify that only process 0 reads the cache image. */
#if H5C_COLLECT_CACHE_STATS
- if ( pass ) {
+ if (pass) {
- if ( ( ( mpi_rank == 0 ) && ( cache_ptr->images_read != 1 ) ) ||
- ( ( mpi_rank > 0 ) && ( cache_ptr->images_read != 0 ) ) ) {
+ if (((mpi_rank == 0) && (cache_ptr->images_read != 1)) ||
+ ((mpi_rank > 0) && (cache_ptr->images_read != 0))) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "unexpected images_read.";
}
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* Verify that all other processes receive the cache image block
- * from process 0.
- *
- * Since we have alread verified that only process 0 has read the
- * image, it is sufficient to verify that the image was loaded on
- * all processes.
- */
+ /* Verify that all other processes receive the cache image block
+ * from process 0.
+ *
+ * Since we have alread verified that only process 0 has read the
+ * image, it is sufficient to verify that the image was loaded on
+ * all processes.
+ */
#if H5C_COLLECT_CACHE_STATS
- if ( pass ) {
+ if (pass) {
- if ( cache_ptr->images_loaded != 1 ) {
+ if (cache_ptr->images_loaded != 1) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "Image not loaded?.";
}
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 11) Delete the datasets in the file */
i = 0;
- while ( ( pass ) && ( i < num_dsets ) ) {
+ while ((pass) && (i < num_dsets)) {
par_delete_dataset(i, file_id, mpi_rank);
i++;
}
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 12) Close the file */
- if ( pass ) {
+ if (pass) {
- if ( H5Fclose(file_id) < 0 ) {
+ if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "H5Fclose() failed.";
-
}
}
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 13) Get the size of the file. Verify that it is less
* than 20 KB. Without deletions and persistant free
* space managers, size size is about 30 MB, so this
@@ -4020,55 +3761,52 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
* Note that this test will have to change if we use
* a larger page size.
*/
- if ( pass ) {
+ if (pass) {
- if ( ( file_size = h5_get_file_size(filename, H5P_DEFAULT) ) < 0 ) {
+ if ((file_size = h5_get_file_size(filename, H5P_DEFAULT)) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "h5_get_file_size() failed.";
+ }
+ else if (file_size > 20 * 1024) {
- } else if ( file_size > 20 * 1024 ) {
-
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "unexpectedly large file size.";
}
}
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* 14) Delete the file */
- if ( pass ) {
+ if (pass) {
/* wait for everyone to close the file */
MPI_Barrier(MPI_COMM_WORLD);
- if ( ( mpi_rank == 0 ) && ( HDremove(filename) < 0 ) ) {
+ if ((mpi_rank == 0) && (HDremove(filename) < 0)) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "HDremove() failed.\n";
}
}
- if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ if ((mpi_rank == 0) && (show_progress))
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
-
/* report results */
- if ( mpi_rank == 0 ) {
+ if (mpi_rank == 0) {
- if ( pass ) {
+ if (pass) {
PASSED();
-
- } else {
+ }
+ else {
H5_FAILED();
- HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n",
- fcn_name, failure_mssg);
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
@@ -4076,7 +3814,6 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
} /* smoke_check_1() */
-
/* This test uses many POSIX things that are not available on
* Windows. We're using a check for fork(2) here as a proxy for
* all POSIX/Unix/Linux things until this test can be made
@@ -4113,38 +3850,38 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
int
main(int argc, char **argv)
{
- hbool_t setup = FALSE;
- hbool_t ici = FALSE;
+ hbool_t setup = FALSE;
+ hbool_t ici = FALSE;
unsigned nerrs = 0;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
- int file_idx;
- int i;
- int mpi_size;
- int mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ int file_idx;
+ int i;
+ int mpi_size;
+ int mpi_rank;
- if ( ! parse_flags(argc, argv, &setup, &ici, &file_idx, &mpi_size, FALSE) )
- exit(1); /* exit now if unable to parse flags */
+ if (!parse_flags(argc, argv, &setup, &ici, &file_idx, &mpi_size, FALSE))
+ exit(1); /* exit now if unable to parse flags */
- if ( setup ) { /* construct test files and exit */
+ if (setup) { /* construct test files and exit */
H5open();
HDfprintf(stdout, "Constructing test files: \n");
HDfflush(stdout);
i = 0;
- while ( ( FILENAMES[i] != NULL ) && ( i < TEST_FILES_TO_CONSTRUCT ) ) {
+ while ((FILENAMES[i] != NULL) && (i < TEST_FILES_TO_CONSTRUCT)) {
HDfprintf(stdout, " writing %s ... ", FILENAMES[i]);
HDfflush(stdout);
construct_test_file(i);
- if ( pass ) {
+ if (pass) {
HDprintf("done.\n");
HDfflush(stdout);
-
- } else {
+ }
+ else {
HDprintf("failed.\n");
HDexit(1);
@@ -4154,14 +3891,14 @@ main(int argc, char **argv)
HDfprintf(stdout, "Test file construction complete.\n");
HDexit(0);
+ }
+ else if (ici) {
- } else if ( ici ) {
-
- if ( serial_insert_cache_image(file_idx, mpi_size) ) {
+ if (serial_insert_cache_image(file_idx, mpi_size)) {
HDexit(0);
-
- } else {
+ }
+ else {
HDfprintf(stderr, "\n\nCache image insertion failed.\n");
HDfprintf(stderr, " failure mssg = \"%s\"\n", failure_mssg);
@@ -4181,72 +3918,70 @@ main(int argc, char **argv)
* hang in the atexit post processing in which it may try to make MPI
* calls. By then, MPI calls may not work.
*/
- if (H5dont_atexit() < 0){
- HDprintf("%d:Failed to turn off atexit processing. Continue.\n",
- mpi_rank);
+ if (H5dont_atexit() < 0) {
+ HDprintf("%d:Failed to turn off atexit processing. Continue.\n", mpi_rank);
};
H5open();
- if ( mpi_rank == 0 ) {
+ if (mpi_rank == 0) {
HDprintf("===================================\n");
HDprintf("Parallel metadata cache image tests\n");
HDprintf(" mpi_size = %d\n", mpi_size);
HDprintf("===================================\n");
}
- if ( mpi_size < 2 ) {
+ if (mpi_size < 2) {
- if ( mpi_rank == 0 ) {
+ if (mpi_rank == 0) {
HDprintf(" Need at least 2 processes. Exiting.\n");
}
goto finish;
}
- if ( mpi_rank == 0 ) { /* create test files */
+ if (mpi_rank == 0) { /* create test files */
- int child_status;
+ int child_status;
pid_t child_pid;
child_pid = fork();
- if ( child_pid == 0 ) { /* this is the child process */
+ if (child_pid == 0) { /* this is the child process */
/* fun and games to shutup the compiler */
- char param0[32] = "t_cache_image";
- char param1[32] = "setup";
- char * child_argv[] = {param0, param1, NULL};
+ char param0[32] = "t_cache_image";
+ char param1[32] = "setup";
+ char *child_argv[] = {param0, param1, NULL};
/* we may need to play with the path here */
- if ( execv("t_cache_image", child_argv) == -1 ) {
+ if (execv("t_cache_image", child_argv) == -1) {
- HDfprintf(stdout,
- "execl() of setup process failed. errno = %d(%s)\n",
- errno, strerror(errno));
+ HDfprintf(stdout, "execl() of setup process failed. errno = %d(%s)\n", errno,
+ strerror(errno));
HDexit(1);
}
-
- } else if ( child_pid != -1 ) {
+ }
+ else if (child_pid != -1) {
/* this is the parent process -- wait until child is done */
- if ( -1 == waitpid(child_pid, &child_status, WUNTRACED)) {
+ if (-1 == waitpid(child_pid, &child_status, WUNTRACED)) {
HDfprintf(stdout, "can't wait on setup process.\n");
-
- } else if ( ! WIFEXITED(child_status) ) {
+ }
+ else if (!WIFEXITED(child_status)) {
HDfprintf(stdout, "setup process hasn't exitied.\n");
-
- } else if ( WEXITSTATUS(child_status) != 0 ) {
+ }
+ else if (WEXITSTATUS(child_status) != 0) {
HDfprintf(stdout, "setup process reports failure.\n");
+ }
+ else {
- } else {
-
- HDfprintf(stdout,
- "testfile construction complete -- proceeding with tests.\n");
+ HDfprintf(stdout, "testfile construction complete -- proceeding with tests.\n");
}
- } else { /* fork failed */
+ }
+ else { /* fork failed */
HDfprintf(stdout, "can't create process to construct test file.\n");
}
@@ -4255,16 +3990,11 @@ main(int argc, char **argv)
/* can't start test until test files exist */
MPI_Barrier(MPI_COMM_WORLD);
-
- nerrs += verify_cache_image_RO(0,
- H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, mpi_rank);
+ nerrs += verify_cache_image_RO(0, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, mpi_rank);
#if 1
- nerrs += verify_cache_image_RO(1,
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, mpi_rank);
- nerrs += verify_cache_image_RW(0,
- H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, mpi_rank);
- nerrs += verify_cache_image_RW(1,
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, mpi_rank);
+ nerrs += verify_cache_image_RO(1, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, mpi_rank);
+ nerrs += verify_cache_image_RW(0, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, mpi_rank);
+ nerrs += verify_cache_image_RW(1, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, mpi_rank);
nerrs += smoke_check_1(comm, info, mpi_rank, mpi_size);
#endif
finish:
@@ -4274,12 +4004,11 @@ finish:
*/
MPI_Barrier(MPI_COMM_WORLD);
- if ( mpi_rank == 0 ) { /* only process 0 reports */
+ if (mpi_rank == 0) { /* only process 0 reports */
HDsleep(10);
HDprintf("===================================\n");
- if ( nerrs > 0 ) {
- HDprintf("***metadata cache image tests detected %d failures***\n",
- nerrs);
+ if (nerrs > 0) {
+ HDprintf("***metadata cache image tests detected %d failures***\n", nerrs);
}
else {
HDprintf("metadata cache image tests finished with no failures\n");
@@ -4296,7 +4025,7 @@ finish:
MPI_Finalize();
/* cannot just return (nerrs) because exit code is limited to 1byte */
- return(nerrs > 0);
+ return (nerrs > 0);
} /* main() */
#else /* H5_HAVE_FORK */
@@ -4309,4 +4038,3 @@ main(void)
} /* end main() */
#endif /* H5_HAVE_FORK */
-
diff --git a/testpar/t_chunk_alloc.c b/testpar/t_chunk_alloc.c
index e716f41..8b9eb36 100644
--- a/testpar/t_chunk_alloc.c
+++ b/testpar/t_chunk_alloc.c
@@ -22,21 +22,20 @@
#include "testphdf5.h"
static int mpi_size, mpi_rank;
-#define DSET_NAME "ExtendibleArray"
-#define CHUNK_SIZE 1000 /* #elements per chunk */
-#define CHUNK_FACTOR 200 /* default dataset size in terms of chunks */
-#define CLOSE 1
-#define NO_CLOSE 0
+#define DSET_NAME "ExtendibleArray"
+#define CHUNK_SIZE 1000 /* #elements per chunk */
+#define CHUNK_FACTOR 200 /* default dataset size in terms of chunks */
+#define CLOSE 1
+#define NO_CLOSE 0
static MPI_Offset
get_filesize(const char *filename)
{
int mpierr;
- MPI_File fd;
- MPI_Offset filesize;
+ MPI_File fd;
+ MPI_Offset filesize;
- mpierr = MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_RDONLY,
- MPI_INFO_NULL, &fd);
+ mpierr = MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fd);
VRFY((mpierr == MPI_SUCCESS), "");
mpierr = MPI_File_get_size(fd, &filesize);
@@ -45,21 +44,12 @@ get_filesize(const char *filename)
mpierr = MPI_File_close(&fd);
VRFY((mpierr == MPI_SUCCESS), "");
- return(filesize);
+ return (filesize);
}
-typedef enum write_pattern {
- none,
- sec_last,
- all
-} write_type;
-
-typedef enum access_ {
- write_all,
- open_only,
- extend_only
-} access_type;
+typedef enum write_pattern { none, sec_last, all } write_type;
+typedef enum access_ { write_all, open_only, extend_only } access_type;
/*
* This creates a dataset serially with chunks, each of CHUNK_SIZE
@@ -69,35 +59,35 @@ typedef enum access_ {
static void
create_chunked_dataset(const char *filename, int chunk_factor, write_type write_pattern)
{
- hid_t file_id, dataset; /* handles */
- hid_t dataspace,memspace;
- hid_t cparms;
- hsize_t dims[1];
- hsize_t maxdims[1] = {H5S_UNLIMITED};
-
- hsize_t chunk_dims[1] ={CHUNK_SIZE};
- hsize_t count[1];
- hsize_t stride[1];
- hsize_t block[1];
- hsize_t offset[1]; /* Selection offset within dataspace */
+ hid_t file_id, dataset; /* handles */
+ hid_t dataspace, memspace;
+ hid_t cparms;
+ hsize_t dims[1];
+ hsize_t maxdims[1] = {H5S_UNLIMITED};
+
+ hsize_t chunk_dims[1] = {CHUNK_SIZE};
+ hsize_t count[1];
+ hsize_t stride[1];
+ hsize_t block[1];
+ hsize_t offset[1]; /* Selection offset within dataspace */
/* Variables used in reading data back */
- char buffer[CHUNK_SIZE];
- long nchunks;
- herr_t hrc;
+ char buffer[CHUNK_SIZE];
+ long nchunks;
+ herr_t hrc;
- MPI_Offset filesize, /* actual file size */
- est_filesize; /* estimated file size */
+ MPI_Offset filesize, /* actual file size */
+ est_filesize; /* estimated file size */
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* Only MAINPROCESS should create the file. Others just wait. */
- if (MAINPROCESS){
- nchunks=chunk_factor*mpi_size;
- dims[0]=(hsize_t)(nchunks*CHUNK_SIZE);
+ if (MAINPROCESS) {
+ nchunks = chunk_factor * mpi_size;
+ dims[0] = (hsize_t)(nchunks * CHUNK_SIZE);
/* Create the data space with unlimited dimensions. */
- dataspace = H5Screate_simple (1, dims, maxdims);
+ dataspace = H5Screate_simple(1, dims, maxdims);
VRFY((dataspace >= 0), "");
memspace = H5Screate_simple(1, chunk_dims, NULL);
@@ -118,19 +108,20 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_
VRFY((hrc >= 0), "");
/* Create a new dataset within the file using cparms creation properties. */
- dataset = H5Dcreate2(file_id, DSET_NAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, cparms, H5P_DEFAULT);
+ dataset =
+ H5Dcreate2(file_id, DSET_NAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, cparms, H5P_DEFAULT);
VRFY((dataset >= 0), "");
- if(write_pattern == sec_last) {
+ if (write_pattern == sec_last) {
HDmemset(buffer, 100, CHUNK_SIZE);
- count[0] = 1;
+ count[0] = 1;
stride[0] = 1;
- block[0] = chunk_dims[0];
- offset[0] = (hsize_t)(nchunks-2)*chunk_dims[0];
+ block[0] = chunk_dims[0];
+ offset[0] = (hsize_t)(nchunks - 2) * chunk_dims[0];
hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
- VRFY((hrc >= 0), "");
+ VRFY((hrc >= 0), "");
/* Write sec_last chunk */
hrc = H5Dwrite(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
@@ -138,28 +129,27 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_
} /* end if */
/* Close resources */
- hrc = H5Dclose (dataset);
+ hrc = H5Dclose(dataset);
VRFY((hrc >= 0), "");
dataset = -1;
- hrc = H5Sclose (dataspace);
+ hrc = H5Sclose(dataspace);
VRFY((hrc >= 0), "");
- hrc = H5Sclose (memspace);
+ hrc = H5Sclose(memspace);
VRFY((hrc >= 0), "");
- hrc = H5Pclose (cparms);
+ hrc = H5Pclose(cparms);
VRFY((hrc >= 0), "");
- hrc = H5Fclose (file_id);
+ hrc = H5Fclose(file_id);
VRFY((hrc >= 0), "");
file_id = -1;
/* verify file size */
- filesize = get_filesize(filename);
+ filesize = get_filesize(filename);
est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char);
VRFY((filesize >= est_filesize), "file size check");
-
}
/* Make sure all processes are done before exiting this routine. Otherwise,
@@ -170,7 +160,6 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_
MPI_Barrier(MPI_COMM_WORLD);
}
-
/*
* This program performs three different types of parallel access. It writes on
* the entire dataset, it extends the dataset to nchunks*CHUNK_SIZE, and it only
@@ -178,51 +167,52 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_
* consistent with argument 'chunk_factor'.
*/
static void
-parallel_access_dataset(const char *filename, int chunk_factor, access_type action, hid_t *file_id, hid_t *dataset)
+parallel_access_dataset(const char *filename, int chunk_factor, access_type action, hid_t *file_id,
+ hid_t *dataset)
{
/* HDF5 gubbins */
- hid_t memspace, dataspace; /* HDF5 file identifier */
- hid_t access_plist; /* HDF5 ID for file access property list */
- herr_t hrc; /* HDF5 return code */
- hsize_t size[1];
-
- hsize_t chunk_dims[1] ={CHUNK_SIZE};
- hsize_t count[1];
- hsize_t stride[1];
- hsize_t block[1];
- hsize_t offset[1]; /* Selection offset within dataspace */
- hsize_t dims[1];
- hsize_t maxdims[1];
+ hid_t memspace, dataspace; /* HDF5 file identifier */
+ hid_t access_plist; /* HDF5 ID for file access property list */
+ herr_t hrc; /* HDF5 return code */
+ hsize_t size[1];
+
+ hsize_t chunk_dims[1] = {CHUNK_SIZE};
+ hsize_t count[1];
+ hsize_t stride[1];
+ hsize_t block[1];
+ hsize_t offset[1]; /* Selection offset within dataspace */
+ hsize_t dims[1];
+ hsize_t maxdims[1];
/* Variables used in reading data back */
- char buffer[CHUNK_SIZE];
- int i;
- long nchunks;
+ char buffer[CHUNK_SIZE];
+ int i;
+ long nchunks;
/* MPI Gubbins */
- MPI_Offset filesize, /* actual file size */
- est_filesize; /* estimated file size */
+ MPI_Offset filesize, /* actual file size */
+ est_filesize; /* estimated file size */
/* Initialize MPI */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- nchunks=chunk_factor*mpi_size;
+ nchunks = chunk_factor * mpi_size;
/* Set up MPIO file access property lists */
- access_plist = H5Pcreate(H5P_FILE_ACCESS);
+ access_plist = H5Pcreate(H5P_FILE_ACCESS);
VRFY((access_plist >= 0), "");
hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL);
VRFY((hrc >= 0), "");
/* Open the file */
- if (*file_id<0){
+ if (*file_id < 0) {
*file_id = H5Fopen(filename, H5F_ACC_RDWR, access_plist);
VRFY((*file_id >= 0), "");
}
/* Open dataset*/
- if (*dataset<0){
+ if (*dataset < 0) {
*dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT);
VRFY((*dataset >= 0), "");
}
@@ -233,19 +223,19 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti
dataspace = H5Dget_space(*dataset);
VRFY((dataspace >= 0), "");
- size[0] = (hsize_t)nchunks*CHUNK_SIZE;
+ size[0] = (hsize_t)nchunks * CHUNK_SIZE;
switch (action) {
/* all chunks are written by all the processes in an interleaved way*/
case write_all:
- HDmemset(buffer, mpi_rank+1, CHUNK_SIZE);
- count[0] = 1;
+ HDmemset(buffer, mpi_rank + 1, CHUNK_SIZE);
+ count[0] = 1;
stride[0] = 1;
- block[0] = chunk_dims[0];
- for (i=0; i<nchunks/mpi_size; i++) {
- offset[0] = (hsize_t)(i*mpi_size+mpi_rank)*chunk_dims[0];
+ block[0] = chunk_dims[0];
+ for (i = 0; i < nchunks / mpi_size; i++) {
+ offset[0] = (hsize_t)(i * mpi_size + mpi_rank) * chunk_dims[0];
hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
VRFY((hrc >= 0), "");
@@ -282,10 +272,10 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti
VRFY((hrc >= 0), "");
*dataset = -1;
- hrc = H5Sclose (dataspace);
+ hrc = H5Sclose(dataspace);
VRFY((hrc >= 0), "");
- hrc = H5Sclose (memspace);
+ hrc = H5Sclose(memspace);
VRFY((hrc >= 0), "");
hrc = H5Fclose(*file_id);
@@ -293,8 +283,8 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti
*file_id = -1;
/* verify file size */
- filesize = get_filesize(filename);
- est_filesize = (MPI_Offset)nchunks*(MPI_Offset)CHUNK_SIZE*(MPI_Offset)sizeof(unsigned char);
+ filesize = get_filesize(filename);
+ est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char);
VRFY((filesize >= est_filesize), "file size check");
/* Can close some plists */
@@ -317,45 +307,45 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti
* interleaved pattern.
*/
static void
-verify_data(const char *filename, int chunk_factor, write_type write_pattern, int vclose,
- hid_t *file_id, hid_t *dataset)
+verify_data(const char *filename, int chunk_factor, write_type write_pattern, int vclose, hid_t *file_id,
+ hid_t *dataset)
{
/* HDF5 gubbins */
- hid_t dataspace, memspace; /* HDF5 file identifier */
- hid_t access_plist; /* HDF5 ID for file access property list */
- herr_t hrc; /* HDF5 return code */
-
- hsize_t chunk_dims[1] ={CHUNK_SIZE};
- hsize_t count[1];
- hsize_t stride[1];
- hsize_t block[1];
- hsize_t offset[1]; /* Selection offset within dataspace */
+ hid_t dataspace, memspace; /* HDF5 file identifier */
+ hid_t access_plist; /* HDF5 ID for file access property list */
+ herr_t hrc; /* HDF5 return code */
+
+ hsize_t chunk_dims[1] = {CHUNK_SIZE};
+ hsize_t count[1];
+ hsize_t stride[1];
+ hsize_t block[1];
+ hsize_t offset[1]; /* Selection offset within dataspace */
/* Variables used in reading data back */
- char buffer[CHUNK_SIZE];
- int value, i;
- int index_l;
- long nchunks;
+ char buffer[CHUNK_SIZE];
+ int value, i;
+ int index_l;
+ long nchunks;
/* Initialize MPI */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- nchunks=chunk_factor*mpi_size;
+ nchunks = chunk_factor * mpi_size;
/* Set up MPIO file access property lists */
- access_plist = H5Pcreate(H5P_FILE_ACCESS);
+ access_plist = H5Pcreate(H5P_FILE_ACCESS);
VRFY((access_plist >= 0), "");
hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL);
VRFY((hrc >= 0), "");
/* Open the file */
- if (*file_id<0){
+ if (*file_id < 0) {
*file_id = H5Fopen(filename, H5F_ACC_RDWR, access_plist);
VRFY((*file_id >= 0), "");
}
/* Open dataset*/
- if (*dataset<0){
+ if (*dataset < 0) {
*dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT);
VRFY((*dataset >= 0), "");
}
@@ -367,14 +357,14 @@ verify_data(const char *filename, int chunk_factor, write_type write_pattern, in
VRFY((dataspace >= 0), "");
/* all processes check all chunks. */
- count[0] = 1;
+ count[0] = 1;
stride[0] = 1;
- block[0] = chunk_dims[0];
- for (i=0; i<nchunks; i++){
+ block[0] = chunk_dims[0];
+ for (i = 0; i < nchunks; i++) {
/* reset buffer values */
HDmemset(buffer, -1, CHUNK_SIZE);
- offset[0] = (hsize_t)i*chunk_dims[0];
+ offset[0] = (hsize_t)i * chunk_dims[0];
hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
VRFY((hrc >= 0), "");
@@ -386,13 +376,13 @@ verify_data(const char *filename, int chunk_factor, write_type write_pattern, in
/* set expected value according the write pattern */
switch (write_pattern) {
case all:
- value = i%mpi_size + 1;
+ value = i % mpi_size + 1;
break;
case none:
value = 0;
break;
case sec_last:
- if (i==nchunks-2)
+ if (i == nchunks - 2)
value = 100;
else
value = 0;
@@ -406,10 +396,10 @@ verify_data(const char *filename, int chunk_factor, write_type write_pattern, in
VRFY((buffer[index_l] == value), "data verification");
}
- hrc = H5Sclose (dataspace);
+ hrc = H5Sclose(dataspace);
VRFY((hrc >= 0), "");
- hrc = H5Sclose (memspace);
+ hrc = H5Sclose(memspace);
VRFY((hrc >= 0), "");
/* Can close some plists */
@@ -417,7 +407,7 @@ verify_data(const char *filename, int chunk_factor, write_type write_pattern, in
VRFY((hrc >= 0), "");
/* Close up */
- if (vclose){
+ if (vclose) {
hrc = H5Dclose(*dataset);
VRFY((hrc >= 0), "");
*dataset = -1;
@@ -434,8 +424,6 @@ verify_data(const char *filename, int chunk_factor, write_type write_pattern, in
MPI_Barrier(MPI_COMM_WORLD);
}
-
-
/*
* Test following possible scenarios,
* Case 1:
@@ -458,15 +446,15 @@ void
test_chunk_alloc(void)
{
const char *filename;
- hid_t file_id, dataset;
+ hid_t file_id, dataset;
file_id = dataset = -1;
/* Initialize MPI */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- filename = (const char*)GetTestParameters();
+ filename = (const char *)GetTestParameters();
if (VERBOSE_MED)
HDprintf("Extend Chunked allocation test on file %s\n", filename);
@@ -495,5 +483,4 @@ test_chunk_alloc(void)
parallel_access_dataset(filename, CHUNK_FACTOR, write_all, &file_id, &dataset);
/* reopen dataset in parallel, read and verify the data */
verify_data(filename, CHUNK_FACTOR, all, CLOSE, &file_id, &dataset);
-
}
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index 740f78e..2a55ad1 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -16,26 +16,23 @@
#define HYPER 1
#define POINT 2
-#define ALL 3
+#define ALL 3
/* some commonly used routines for collective chunk IO tests*/
-static void ccslab_set(int mpi_rank,int mpi_size,hsize_t start[],hsize_t count[],
- hsize_t stride[],hsize_t block[],int mode);
+static void ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[],
+ hsize_t block[], int mode);
-static void ccdataset_fill(hsize_t start[],hsize_t count[],
- hsize_t stride[],hsize_t block[],DATATYPE*dataset,
- int mem_selection);
+static void ccdataset_fill(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ DATATYPE *dataset, int mem_selection);
-static void ccdataset_print(hsize_t start[],hsize_t block[],DATATYPE*dataset);
+static void ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset);
-static int ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[],
- hsize_t block[], DATATYPE *dataset, DATATYPE *original,
- int mem_selection);
-
-static void coll_chunktest(const char* filename, int chunk_factor, int select_factor,
- int api_option, int file_selection, int mem_selection, int mode);
+static int ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ DATATYPE *dataset, DATATYPE *original, int mem_selection);
+static void coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option,
+ int file_selection, int mem_selection, int mode);
/*-------------------------------------------------------------------------
* Function: coll_chunk1
@@ -88,7 +85,6 @@ coll_chunk1(void)
coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
}
-
/*-------------------------------------------------------------------------
* Function: coll_chunk2
*
@@ -107,7 +103,7 @@ coll_chunk1(void)
*-------------------------------------------------------------------------
*/
- /* ------------------------------------------------------------------------
+/* ------------------------------------------------------------------------
* Descriptions for the selection: many disjoint selections inside one chunk
* Two dimensions,
*
@@ -140,7 +136,6 @@ coll_chunk2(void)
coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, IN_ORDER);
}
-
/*-------------------------------------------------------------------------
* Function: coll_chunk3
*
@@ -181,7 +176,7 @@ void
coll_chunk3(void)
{
const char *filename = GetTestParameters();
- int mpi_size;
+ int mpi_size;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
@@ -496,17 +491,17 @@ coll_chunk8(void)
void
coll_chunk9(void)
{
- const char *filename = GetTestParameters();
+ const char *filename = GetTestParameters();
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, IN_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, IN_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, IN_ORDER);
}
/*-------------------------------------------------------------------------
@@ -548,28 +543,27 @@ coll_chunk9(void)
void
coll_chunk10(void)
{
- const char *filename = GetTestParameters();
+ const char *filename = GetTestParameters();
- coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, IN_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, IN_ORDER);
- coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, IN_ORDER);
}
-
/*-------------------------------------------------------------------------
* Function: coll_chunktest
*
* Purpose: The real testing routine for regular selection of collective
chunking storage
testing both write and read,
- If anything fails, it may be read or write. There is no
- separation test between read and write.
+ If anything fails, it may be read or write. There is no
+ separation test between read and write.
*
* Return: Success: 0
*
@@ -590,572 +584,563 @@ coll_chunk10(void)
*/
static void
-coll_chunktest(const char* filename,
- int chunk_factor,
- int select_factor,
- int api_option,
- int file_selection,
- int mem_selection,
- int mode)
+coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, int file_selection,
+ int mem_selection, int mode)
{
- hid_t file, dataset, file_dataspace, mem_dataspace;
- hid_t acc_plist,xfer_plist,crp_plist;
+ hid_t file, dataset, file_dataspace, mem_dataspace;
+ hid_t acc_plist, xfer_plist, crp_plist;
- hsize_t dims[RANK], chunk_dims[RANK];
- int* data_array1 = NULL;
- int* data_origin1 = NULL;
+ hsize_t dims[RANK], chunk_dims[RANK];
+ int * data_array1 = NULL;
+ int * data_origin1 = NULL;
- hsize_t start[RANK],count[RANK],stride[RANK],block[RANK];
+ hsize_t start[RANK], count[RANK], stride[RANK], block[RANK];
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- unsigned prop_value;
+ unsigned prop_value;
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
- int mpi_size,mpi_rank;
+ int mpi_size, mpi_rank;
+
+ herr_t status;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ hsize_t current_dims; /* for point selection */
+
+ /* set up MPI parameters */
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /* Create the data space */
+
+ acc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_plist >= 0), "");
+
+ file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_plist);
+ VRFY((file >= 0), "H5Fcreate succeeded");
+
+ status = H5Pclose(acc_plist);
+ VRFY((status >= 0), "");
+
+ /* setup dimensionality object */
+ dims[0] = (hsize_t)(SPACE_DIM1 * mpi_size);
+ dims[1] = SPACE_DIM2;
+
+ /* allocate memory for data buffer */
+ data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* set up dimensions of the slab this process accesses */
+ ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
+
+ /* set up the coords array selection */
+ num_points = block[0] * block[1] * count[0] * count[1];
+ coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
+ VRFY((coords != NULL), "coords malloc succeeded");
+ point_set(start, count, stride, block, num_points, coords, mode);
+
+ file_dataspace = H5Screate_simple(2, dims, NULL);
+ VRFY((file_dataspace >= 0), "file dataspace created succeeded");
+
+ if (ALL != mem_selection) {
+ mem_dataspace = H5Screate_simple(2, dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem dataspace created succeeded");
+ }
+ else {
+ current_dims = num_points;
+ mem_dataspace = H5Screate_simple(1, &current_dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ }
+
+ crp_plist = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((crp_plist >= 0), "");
+
+ /* Set up chunk information. */
+ chunk_dims[0] = dims[0] / (hsize_t)chunk_factor;
+
+ /* to decrease the testing time, maintain bigger chunk size */
+ (chunk_factor == 1) ? (chunk_dims[1] = SPACE_DIM2) : (chunk_dims[1] = SPACE_DIM2 / 2);
+ status = H5Pset_chunk(crp_plist, 2, chunk_dims);
+ VRFY((status >= 0), "chunk creation property list succeeded");
+
+ dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT, file_dataspace, H5P_DEFAULT,
+ crp_plist, H5P_DEFAULT);
+ VRFY((dataset >= 0), "dataset created succeeded");
+
+ status = H5Pclose(crp_plist);
+ VRFY((status >= 0), "");
+
+ /*put some trivial data in the data array */
+ ccdataset_fill(start, stride, count, block, data_array1, mem_selection);
- herr_t status;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- size_t num_points; /* for point selection */
- hsize_t *coords = NULL; /* for point selection */
- hsize_t current_dims; /* for point selection */
-
- /* set up MPI parameters */
- MPI_Comm_size(comm,&mpi_size);
- MPI_Comm_rank(comm,&mpi_rank);
-
- /* Create the data space */
-
- acc_plist = create_faccess_plist(comm,info,facc_type);
- VRFY((acc_plist >= 0),"");
-
- file = H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_plist);
- VRFY((file >= 0),"H5Fcreate succeeded");
-
- status = H5Pclose(acc_plist);
- VRFY((status >= 0),"");
-
- /* setup dimensionality object */
- dims[0] = (hsize_t)(SPACE_DIM1*mpi_size);
- dims[1] = SPACE_DIM2;
-
- /* allocate memory for data buffer */
- data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
-
- /* set up dimensions of the slab this process accesses */
- ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
-
- /* set up the coords array selection */
- num_points = block[0] * block[1] * count[0] * count[1];
- coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
- VRFY((coords != NULL), "coords malloc succeeded");
- point_set(start, count, stride, block, num_points, coords, mode);
-
- file_dataspace = H5Screate_simple(2, dims, NULL);
- VRFY((file_dataspace >= 0), "file dataspace created succeeded");
-
- if(ALL != mem_selection) {
- mem_dataspace = H5Screate_simple(2, dims, NULL);
- VRFY((mem_dataspace >= 0), "mem dataspace created succeeded");
- }
- else {
- current_dims = num_points;
- mem_dataspace = H5Screate_simple (1, &current_dims, NULL);
- VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
- }
-
- crp_plist = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((crp_plist >= 0),"");
-
- /* Set up chunk information. */
- chunk_dims[0] = dims[0]/(hsize_t)chunk_factor;
-
- /* to decrease the testing time, maintain bigger chunk size */
- (chunk_factor == 1) ? (chunk_dims[1] = SPACE_DIM2) : (chunk_dims[1] = SPACE_DIM2/2);
- status = H5Pset_chunk(crp_plist, 2, chunk_dims);
- VRFY((status >= 0),"chunk creation property list succeeded");
-
- dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT,
- file_dataspace, H5P_DEFAULT, crp_plist, H5P_DEFAULT);
- VRFY((dataset >= 0),"dataset created succeeded");
-
- status = H5Pclose(crp_plist);
- VRFY((status >= 0), "");
-
- /*put some trivial data in the data array */
- ccdataset_fill(start, stride, count,block, data_array1, mem_selection);
-
- MESG("data_array initialized");
-
- switch (file_selection) {
- case HYPER:
- status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((status >= 0),"hyperslab selection succeeded");
- break;
-
- case POINT:
- if (num_points) {
- status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((status >= 0),"Element selection succeeded");
- }
- else {
- status = H5Sselect_none(file_dataspace);
- VRFY((status >= 0),"none selection succeeded");
- }
- break;
-
- case ALL:
- status = H5Sselect_all(file_dataspace);
- VRFY((status >= 0), "H5Sselect_all succeeded");
- break;
- }
-
- switch (mem_selection) {
- case HYPER:
- status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((status >= 0),"hyperslab selection succeeded");
- break;
-
- case POINT:
- if (num_points) {
- status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((status >= 0),"Element selection succeeded");
- }
- else {
- status = H5Sselect_none(mem_dataspace);
- VRFY((status >= 0),"none selection succeeded");
- }
- break;
-
- case ALL:
- status = H5Sselect_all(mem_dataspace);
- VRFY((status >= 0), "H5Sselect_all succeeded");
- break;
- }
-
- /* set up the collective transfer property list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
-
- status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((status>= 0),"MPIO collective transfer property succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((status>= 0),"set independent IO collectively succeeded");
- }
-
- switch(api_option){
- case API_LINK_HARD:
- status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_ONE_IO);
- VRFY((status>= 0),"collective chunk optimization succeeded");
- break;
-
- case API_MULTI_HARD:
- status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_MULTI_IO);
- VRFY((status>= 0),"collective chunk optimization succeeded ");
- break;
-
- case API_LINK_TRUE:
- status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,2);
- VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
- break;
-
- case API_LINK_FALSE:
- status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,6);
- VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
- break;
-
- case API_MULTI_COLL:
- status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
- VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
- status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,50);
- VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded");
- break;
-
- case API_MULTI_IND:
- status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
- VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
- status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,100);
- VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded");
- break;
-
- default:
- ;
- }
+ MESG("data_array initialized");
+
+ switch (file_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(file_dataspace);
+ VRFY((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(file_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ switch (mem_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(mem_dataspace);
+ VRFY((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(mem_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ /* set up the collective transfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+
+ status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((status >= 0), "MPIO collective transfer property succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((status >= 0), "set independent IO collectively succeeded");
+ }
+
+ switch (api_option) {
+ case API_LINK_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_ONE_IO);
+ VRFY((status >= 0), "collective chunk optimization succeeded");
+ break;
+
+ case API_MULTI_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_MULTI_IO);
+ VRFY((status >= 0), "collective chunk optimization succeeded ");
+ break;
+
+ case API_LINK_TRUE:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 2);
+ VRFY((status >= 0), "collective chunk optimization set chunk number succeeded");
+ break;
+
+ case API_LINK_FALSE:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 6);
+ VRFY((status >= 0), "collective chunk optimization set chunk number succeeded");
+ break;
+
+ case API_MULTI_COLL:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */
+ VRFY((status >= 0), "collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 50);
+ VRFY((status >= 0), "collective chunk optimization set chunk ratio succeeded");
+ break;
+
+ case API_MULTI_IND:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */
+ VRFY((status >= 0), "collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 100);
+ VRFY((status >= 0), "collective chunk optimization set chunk ratio succeeded");
+ break;
+
+ default:;
+ }
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- if(facc_type == FACC_MPIO) {
- switch(api_option) {
+ if (facc_type == FACC_MPIO) {
+ switch (api_option) {
case API_LINK_HARD:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
- NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
- break;
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
case API_MULTI_HARD:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
- NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
- break;
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
case API_LINK_TRUE:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
- NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
- break;
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
case API_LINK_FALSE:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
- NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
- break;
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
case API_MULTI_COLL:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
- NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
- break;
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,
+ H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
case API_MULTI_IND:
- prop_value = H5D_XFER_COLL_CHUNK_DEF;
- status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
- NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
- break;
-
- default:
- ;
- }
- }
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ default:;
+ }
+ }
#endif
- /* write data collectively */
- status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
- VRFY((status >= 0),"dataset write succeeded");
+ /* write data collectively */
+ status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((status >= 0), "dataset write succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- if(facc_type == FACC_MPIO) {
- switch(api_option){
+ if (facc_type == FACC_MPIO) {
+ switch (api_option) {
case API_LINK_HARD:
- status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_HARD_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO directly succeeded");
- break;
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0), "API to set LINK COLLECTIVE IO directly succeeded");
+ break;
case API_MULTI_HARD:
- status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
- break;
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
+ break;
case API_LINK_TRUE:
- status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO succeeded");
- break;
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0), "API to set LINK COLLECTIVE IO succeeded");
+ break;
case API_LINK_FALSE:
- status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set LINK IO transferring to multi-chunk IO succeeded");
- break;
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0), "API to set LINK IO transferring to multi-chunk IO succeeded");
+ break;
case API_MULTI_COLL:
- status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
- break;
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
+ break;
case API_MULTI_IND:
- status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set MULTI-CHUNK IO transferring to independent IO succeeded");
- break;
-
- default:
- ;
- }
- }
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0),
+ "API to set MULTI-CHUNK IO transferring to independent IO succeeded");
+ break;
+
+ default:;
+ }
+ }
#endif
- status = H5Dclose(dataset);
- VRFY((status >= 0),"");
-
- status = H5Pclose(xfer_plist);
- VRFY((status >= 0),"property list closed");
-
- status = H5Sclose(file_dataspace);
- VRFY((status >= 0),"");
-
- status = H5Sclose(mem_dataspace);
- VRFY((status >= 0),"");
-
-
- status = H5Fclose(file);
- VRFY((status >= 0),"");
-
- if (data_array1) HDfree(data_array1);
-
- /* Use collective read to verify the correctness of collective write. */
-
- /* allocate memory for data buffer */
- data_array1 = (int *)HDmalloc(dims[0]*dims[1]*sizeof(int));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
-
- /* allocate memory for data buffer */
- data_origin1 = (int *)HDmalloc(dims[0]*dims[1]*sizeof(int));
- VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
-
- acc_plist = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_plist >= 0),"MPIO creation property list succeeded");
-
- file = H5Fopen(filename,H5F_ACC_RDONLY,acc_plist);
- VRFY((file >= 0),"H5Fcreate succeeded");
-
- status = H5Pclose(acc_plist);
- VRFY((status >= 0),"");
-
- /* open the collective dataset*/
- dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT);
- VRFY((dataset >= 0), "");
-
- /* set up dimensions of the slab this process accesses */
- ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
-
- /* obtain the file and mem dataspace*/
- file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "");
-
- if (ALL != mem_selection) {
- mem_dataspace = H5Dget_space (dataset);
- VRFY((mem_dataspace >= 0), "");
- }
- else {
- current_dims = num_points;
- mem_dataspace = H5Screate_simple (1, &current_dims, NULL);
- VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
- }
-
- switch (file_selection) {
- case HYPER:
- status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((status >= 0),"hyperslab selection succeeded");
- break;
-
- case POINT:
- if (num_points) {
- status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((status >= 0),"Element selection succeeded");
- }
- else {
- status = H5Sselect_none(file_dataspace);
- VRFY((status >= 0),"none selection succeeded");
- }
- break;
-
- case ALL:
- status = H5Sselect_all(file_dataspace);
- VRFY((status >= 0), "H5Sselect_all succeeded");
- break;
- }
-
- switch (mem_selection) {
- case HYPER:
- status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((status >= 0),"hyperslab selection succeeded");
- break;
-
- case POINT:
- if (num_points) {
- status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((status >= 0),"Element selection succeeded");
- }
- else {
- status = H5Sselect_none(mem_dataspace);
- VRFY((status >= 0),"none selection succeeded");
- }
- break;
-
- case ALL:
- status = H5Sselect_all(mem_dataspace);
- VRFY((status >= 0), "H5Sselect_all succeeded");
- break;
- }
-
- /* fill dataset with test data */
- ccdataset_fill(start, stride,count,block, data_origin1, mem_selection);
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0),"");
-
- status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((status>= 0),"MPIO collective transfer property succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- status = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((status>= 0),"set independent IO collectively succeeded");
- }
-
- status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
- VRFY((status >=0),"dataset read succeeded");
-
- /* verify the read data with original expected data */
- status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection);
- if (status) nerrors++;
-
- status = H5Pclose(xfer_plist);
- VRFY((status >= 0),"property list closed");
-
- /* close dataset collectively */
- status=H5Dclose(dataset);
- VRFY((status >= 0), "H5Dclose");
-
- /* release all IDs created */
- status = H5Sclose(file_dataspace);
- VRFY((status >= 0),"H5Sclose");
-
- status = H5Sclose(mem_dataspace);
- VRFY((status >= 0),"H5Sclose");
-
- /* close the file collectively */
- status = H5Fclose(file);
- VRFY((status >= 0),"H5Fclose");
-
- /* release data buffers */
- if(coords) HDfree(coords);
- if(data_array1) HDfree(data_array1);
- if(data_origin1) HDfree(data_origin1);
+ status = H5Dclose(dataset);
+ VRFY((status >= 0), "");
-}
+ status = H5Pclose(xfer_plist);
+ VRFY((status >= 0), "property list closed");
+
+ status = H5Sclose(file_dataspace);
+ VRFY((status >= 0), "");
+
+ status = H5Sclose(mem_dataspace);
+ VRFY((status >= 0), "");
+ status = H5Fclose(file);
+ VRFY((status >= 0), "");
+
+ if (data_array1)
+ HDfree(data_array1);
+
+ /* Use collective read to verify the correctness of collective write. */
+
+ /* allocate memory for data buffer */
+ data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* allocate memory for data buffer */
+ data_origin1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
+
+ acc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_plist >= 0), "MPIO creation property list succeeded");
+
+ file = H5Fopen(filename, H5F_ACC_RDONLY, acc_plist);
+ VRFY((file >= 0), "H5Fcreate succeeded");
+
+ status = H5Pclose(acc_plist);
+ VRFY((status >= 0), "");
+
+ /* open the collective dataset*/
+ dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT);
+ VRFY((dataset >= 0), "");
+
+ /* set up dimensions of the slab this process accesses */
+ ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
+
+ /* obtain the file and mem dataspace*/
+ file_dataspace = H5Dget_space(dataset);
+ VRFY((file_dataspace >= 0), "");
+
+ if (ALL != mem_selection) {
+ mem_dataspace = H5Dget_space(dataset);
+ VRFY((mem_dataspace >= 0), "");
+ }
+ else {
+ current_dims = num_points;
+ mem_dataspace = H5Screate_simple(1, &current_dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ }
+
+ switch (file_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(file_dataspace);
+ VRFY((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(file_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ switch (mem_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(mem_dataspace);
+ VRFY((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(mem_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ /* fill dataset with test data */
+ ccdataset_fill(start, stride, count, block, data_origin1, mem_selection);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+
+ status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((status >= 0), "MPIO collective transfer property succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((status >= 0), "set independent IO collectively succeeded");
+ }
+
+ status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((status >= 0), "dataset read succeeded");
+
+ /* verify the read data with original expected data */
+ status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection);
+ if (status)
+ nerrors++;
+
+ status = H5Pclose(xfer_plist);
+ VRFY((status >= 0), "property list closed");
+
+ /* close dataset collectively */
+ status = H5Dclose(dataset);
+ VRFY((status >= 0), "H5Dclose");
+
+ /* release all IDs created */
+ status = H5Sclose(file_dataspace);
+ VRFY((status >= 0), "H5Sclose");
+
+ status = H5Sclose(mem_dataspace);
+ VRFY((status >= 0), "H5Sclose");
+
+ /* close the file collectively */
+ status = H5Fclose(file);
+ VRFY((status >= 0), "H5Fclose");
+
+ /* release data buffers */
+ if (coords)
+ HDfree(coords);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_origin1)
+ HDfree(data_origin1);
+}
/* Set up the selection */
static void
-ccslab_set(int mpi_rank,
- int mpi_size,
- hsize_t start[],
- hsize_t count[],
- hsize_t stride[],
- hsize_t block[],
- int mode)
+ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ int mode)
{
- switch (mode){
-
- case BYROW_CONT:
- /* Each process takes a slabs of rows. */
- block[0] = 1;
- block[1] = 1;
- stride[0] = 1;
- stride[1] = 1;
- count[0] = SPACE_DIM1;
- count[1] = SPACE_DIM2;
- start[0] = (hsize_t)mpi_rank*count[0];
- start[1] = 0;
-
- break;
-
- case BYROW_DISCONT:
- /* Each process takes several disjoint blocks. */
- block[0] = 1;
- block[1] = 1;
- stride[0] = 3;
- stride[1] = 3;
- count[0] = SPACE_DIM1/(stride[0]*block[0]);
- count[1] = (SPACE_DIM2)/(stride[1]*block[1]);
- start[0] = (hsize_t)SPACE_DIM1*(hsize_t)mpi_rank;
- start[1] = 0;
-
- break;
-
- case BYROW_SELECTNONE:
- /* Each process takes a slabs of rows, there are
- no selections for the last process. */
- block[0] = 1;
- block[1] = 1;
- stride[0] = 1;
- stride[1] = 1;
- count[0] = ((mpi_rank >= MAX(1,(mpi_size-2)))?0:SPACE_DIM1);
- count[1] = SPACE_DIM2;
- start[0] = (hsize_t)mpi_rank*count[0];
- start[1] = 0;
-
- break;
-
- case BYROW_SELECTUNBALANCE:
- /* The first one-third of the number of processes only
- select top half of the domain, The rest will select the bottom
- half of the domain. */
-
- block[0] = 1;
- count[0] = 2;
- stride[0] = (hsize_t)SPACE_DIM1*(hsize_t)mpi_size/4+1;
- block[1] = SPACE_DIM2;
- count[1] = 1;
- start[1] = 0;
- stride[1] = 1;
- if((mpi_rank *3)<(mpi_size*2)) start[0] = (hsize_t)mpi_rank;
- else start[0] = (hsize_t)(1 + SPACE_DIM1*mpi_size/2 + (mpi_rank-2*mpi_size/3));
- break;
-
- case BYROW_SELECTINCHUNK:
- /* Each process will only select one chunk */
-
- block[0] = 1;
- count[0] = 1;
- start[0] = (hsize_t)(mpi_rank*SPACE_DIM1);
- stride[0]= 1;
- block[1] = SPACE_DIM2;
- count[1] = 1;
- stride[1]= 1;
- start[1] = 0;
-
- break;
-
- default:
- /* Unknown mode. Set it to cover the whole dataset. */
- block[0] = (hsize_t)SPACE_DIM1*(hsize_t)mpi_size;
- block[1] = SPACE_DIM2;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = 0;
-
- break;
+ switch (mode) {
+
+ case BYROW_CONT:
+ /* Each process takes a slabs of rows. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = SPACE_DIM1;
+ count[1] = SPACE_DIM2;
+ start[0] = (hsize_t)mpi_rank * count[0];
+ start[1] = 0;
+
+ break;
+
+ case BYROW_DISCONT:
+ /* Each process takes several disjoint blocks. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 3;
+ stride[1] = 3;
+ count[0] = SPACE_DIM1 / (stride[0] * block[0]);
+ count[1] = (SPACE_DIM2) / (stride[1] * block[1]);
+ start[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ break;
+
+ case BYROW_SELECTNONE:
+ /* Each process takes a slabs of rows, there are
+ no selections for the last process. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = ((mpi_rank >= MAX(1, (mpi_size - 2))) ? 0 : SPACE_DIM1);
+ count[1] = SPACE_DIM2;
+ start[0] = (hsize_t)mpi_rank * count[0];
+ start[1] = 0;
+
+ break;
+
+ case BYROW_SELECTUNBALANCE:
+ /* The first one-third of the number of processes only
+ select top half of the domain, The rest will select the bottom
+ half of the domain. */
+
+ block[0] = 1;
+ count[0] = 2;
+ stride[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_size / 4 + 1;
+ block[1] = SPACE_DIM2;
+ count[1] = 1;
+ start[1] = 0;
+ stride[1] = 1;
+ if ((mpi_rank * 3) < (mpi_size * 2))
+ start[0] = (hsize_t)mpi_rank;
+ else
+ start[0] = (hsize_t)(1 + SPACE_DIM1 * mpi_size / 2 + (mpi_rank - 2 * mpi_size / 3));
+ break;
+
+ case BYROW_SELECTINCHUNK:
+ /* Each process will only select one chunk */
+
+ block[0] = 1;
+ count[0] = 1;
+ start[0] = (hsize_t)(mpi_rank * SPACE_DIM1);
+ stride[0] = 1;
+ block[1] = SPACE_DIM2;
+ count[1] = 1;
+ stride[1] = 1;
+ start[1] = 0;
+
+ break;
+
+ default:
+ /* Unknown mode. Set it to cover the whole dataset. */
+ block[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_size;
+ block[1] = SPACE_DIM2;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+
+ break;
}
- if (VERBOSE_MED){
- HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
- (unsigned long)(block[0]*block[1]*count[0]*count[1]));
+ if (VERBOSE_MED) {
+ HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total "
+ "datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
}
}
-
/*
* Fill the dataset with trivial data for testing.
* Assume dimension rank is 2.
*/
static void
-ccdataset_fill(hsize_t start[],
- hsize_t stride[],
- hsize_t count[],
- hsize_t block[],
- DATATYPE * dataset,
+ccdataset_fill(hsize_t start[], hsize_t stride[], hsize_t count[], hsize_t block[], DATATYPE *dataset,
int mem_selection)
{
DATATYPE *dataptr = dataset;
DATATYPE *tmptr;
- hsize_t i,j,k1,k2,k=0;
+ hsize_t i, j, k1, k2, k = 0;
/* put some trivial data in the data_array */
tmptr = dataptr;
@@ -1163,23 +1148,23 @@ ccdataset_fill(hsize_t start[],
through the pointer */
for (k1 = 0; k1 < count[0]; k1++) {
- for(i = 0; i < block[0]; i++) {
- for(k2 = 0; k2 < count[1]; k2++) {
- for(j = 0;j < block[1]; j++) {
+ for (i = 0; i < block[0]; i++) {
+ for (k2 = 0; k2 < count[1]; k2++) {
+ for (j = 0; j < block[1]; j++) {
- if (ALL != mem_selection) {
- dataptr = tmptr + ((start[0]+k1*stride[0]+i)*SPACE_DIM2+
- start[1]+k2*stride[1]+j);
- }
- else {
- dataptr = tmptr + k;
- k++;
- }
+ if (ALL != mem_selection) {
+ dataptr = tmptr + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] +
+ k2 * stride[1] + j);
+ }
+ else {
+ dataptr = tmptr + k;
+ k++;
+ }
- *dataptr = (DATATYPE)(k1+k2+i+j);
- }
+ *dataptr = (DATATYPE)(k1 + k2 + i + j);
+ }
+ }
}
- }
}
}
@@ -1187,83 +1172,75 @@ ccdataset_fill(hsize_t start[],
* Print the first block of the content of the dataset.
*/
static void
-ccdataset_print(hsize_t start[],
- hsize_t block[],
- DATATYPE * dataset)
+ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset)
{
DATATYPE *dataptr = dataset;
- hsize_t i, j;
+ hsize_t i, j;
/* print the column heading */
HDprintf("Print only the first block of the dataset\n");
HDprintf("%-8s", "Cols:");
- for (j=0; j < block[1]; j++){
- HDprintf("%3lu ", (unsigned long)(start[1]+j));
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%3lu ", (unsigned long)(start[1] + j));
}
HDprintf("\n");
/* print the slab data */
- for (i=0; i < block[0]; i++){
- HDprintf("Row %2lu: ", (unsigned long)(i+start[0]));
- for (j=0; j < block[1]; j++){
- HDprintf("%03d ", *dataptr++);
- }
- HDprintf("\n");
+ for (i = 0; i < block[0]; i++) {
+ HDprintf("Row %2lu: ", (unsigned long)(i + start[0]));
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%03d ", *dataptr++);
+ }
+ HDprintf("\n");
}
}
-
/*
* Print the content of the dataset.
*/
static int
-ccdataset_vrfy(hsize_t start[],
- hsize_t count[],
- hsize_t stride[],
- hsize_t block[],
- DATATYPE *dataset,
- DATATYPE *original,
- int mem_selection)
+ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
+ DATATYPE *original, int mem_selection)
{
- hsize_t i, j,k1,k2,k=0;
- int vrfyerrs;
- DATATYPE *dataptr,*oriptr;
+ hsize_t i, j, k1, k2, k = 0;
+ int vrfyerrs;
+ DATATYPE *dataptr, *oriptr;
/* print it if VERBOSE_MED */
if (VERBOSE_MED) {
- HDprintf("dataset_vrfy dumping:::\n");
- HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
- HDprintf("original values:\n");
- ccdataset_print(start, block, original);
- HDprintf("compared values:\n");
- ccdataset_print(start, block, dataset);
+ HDprintf("dataset_vrfy dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("original values:\n");
+ ccdataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ ccdataset_print(start, block, dataset);
}
vrfyerrs = 0;
- for (k1=0;k1<count[0];k1++) {
- for(i=0;i<block[0];i++) {
- for(k2=0; k2<count[1];k2++) {
- for(j=0;j<block[1];j++) {
+ for (k1 = 0; k1 < count[0]; k1++) {
+ for (i = 0; i < block[0]; i++) {
+ for (k2 = 0; k2 < count[1]; k2++) {
+ for (j = 0; j < block[1]; j++) {
if (ALL != mem_selection) {
- dataptr = dataset + ((start[0]+k1*stride[0]+i)*SPACE_DIM2+
- start[1]+k2*stride[1]+j);
- oriptr = original + ((start[0]+k1*stride[0]+i)*SPACE_DIM2+
- start[1]+k2*stride[1]+j);
+ dataptr = dataset + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] +
+ k2 * stride[1] + j);
+ oriptr = original + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] +
+ k2 * stride[1] + j);
}
else {
dataptr = dataset + k;
- oriptr = original + k;
+ oriptr = original + k;
k++;
}
- if (*dataptr != *oriptr){
- if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
+ if (*dataptr != *oriptr) {
+ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
HDprintf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
- (unsigned long)i, (unsigned long)j,
- *(oriptr), *(dataptr));
+ (unsigned long)i, (unsigned long)j, *(oriptr), *(dataptr));
}
}
}
@@ -1271,8 +1248,8 @@ ccdataset_vrfy(hsize_t start[],
}
}
if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- HDprintf("[more errors ...]\n");
+ HDprintf("[more errors ...]\n");
if (vrfyerrs)
- HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
- return(vrfyerrs);
+ HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
+ return (vrfyerrs);
}
diff --git a/testpar/t_coll_md_read.c b/testpar/t_coll_md_read.c
index d4aaa2e..4439b0d 100644
--- a/testpar/t_coll_md_read.c
+++ b/testpar/t_coll_md_read.c
@@ -27,17 +27,17 @@
* an if (mpi_rank == 0) check.
*/
#define PARTIAL_NO_SELECTION_NO_SEL_PROCESS (mpi_rank == mpi_size - 1)
-#define PARTIAL_NO_SELECTION_DATASET_NAME "partial_no_selection_dset"
-#define PARTIAL_NO_SELECTION_DATASET_NDIMS 2
-#define PARTIAL_NO_SELECTION_Y_DIM_SCALE 5
-#define PARTIAL_NO_SELECTION_X_DIM_SCALE 5
+#define PARTIAL_NO_SELECTION_DATASET_NAME "partial_no_selection_dset"
+#define PARTIAL_NO_SELECTION_DATASET_NDIMS 2
+#define PARTIAL_NO_SELECTION_Y_DIM_SCALE 5
+#define PARTIAL_NO_SELECTION_X_DIM_SCALE 5
#define MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS 2
-#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME "linked_chunk_io_sort_chunk_issue"
-#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_Y_DIM_SCALE 20000
-#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE 1
-#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS 1
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME "linked_chunk_io_sort_chunk_issue"
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_Y_DIM_SCALE 20000
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE 1
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS 1
/*
* A test for issue HDFFV-10501. A parallel hang was reported which occurred
@@ -53,27 +53,29 @@
* can simply be removed and the address used for the read/write can be set to an
* arbitrary number (0 was chosen).
*/
-void test_partial_no_selection_coll_md_read(void)
+void
+test_partial_no_selection_coll_md_read(void)
{
const char *filename;
- hsize_t *dataset_dims = NULL;
+ hsize_t * dataset_dims = NULL;
hsize_t max_dataset_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS];
hsize_t sel_dims[1];
- hsize_t chunk_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS] = { PARTIAL_NO_SELECTION_Y_DIM_SCALE, PARTIAL_NO_SELECTION_X_DIM_SCALE };
+ hsize_t chunk_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS] = {PARTIAL_NO_SELECTION_Y_DIM_SCALE,
+ PARTIAL_NO_SELECTION_X_DIM_SCALE};
hsize_t start[PARTIAL_NO_SELECTION_DATASET_NDIMS];
hsize_t stride[PARTIAL_NO_SELECTION_DATASET_NDIMS];
hsize_t count[PARTIAL_NO_SELECTION_DATASET_NDIMS];
hsize_t block[PARTIAL_NO_SELECTION_DATASET_NDIMS];
- hid_t file_id = H5I_INVALID_HID;
- hid_t fapl_id = H5I_INVALID_HID;
- hid_t dset_id = H5I_INVALID_HID;
- hid_t dcpl_id = H5I_INVALID_HID;
- hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
hid_t fspace_id = H5I_INVALID_HID;
hid_t mspace_id = H5I_INVALID_HID;
int mpi_rank, mpi_size;
- void *data = NULL;
- void *read_buf = NULL;
+ void * data = NULL;
+ void * read_buf = NULL;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -96,8 +98,8 @@ void test_partial_no_selection_coll_md_read(void)
dataset_dims = HDmalloc(PARTIAL_NO_SELECTION_DATASET_NDIMS * sizeof(*dataset_dims));
VRFY((dataset_dims != NULL), "malloc succeeded");
- dataset_dims[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_size;
- dataset_dims[1] = (hsize_t)PARTIAL_NO_SELECTION_X_DIM_SCALE * (hsize_t)mpi_size;
+ dataset_dims[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_size;
+ dataset_dims[1] = (hsize_t)PARTIAL_NO_SELECTION_X_DIM_SCALE * (hsize_t)mpi_size;
max_dataset_dims[0] = H5S_UNLIMITED;
max_dataset_dims[1] = H5S_UNLIMITED;
@@ -110,9 +112,11 @@ void test_partial_no_selection_coll_md_read(void)
dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
- VRFY((H5Pset_chunk(dcpl_id, PARTIAL_NO_SELECTION_DATASET_NDIMS, chunk_dims) >= 0), "H5Pset_chunk succeeded");
+ VRFY((H5Pset_chunk(dcpl_id, PARTIAL_NO_SELECTION_DATASET_NDIMS, chunk_dims) >= 0),
+ "H5Pset_chunk succeeded");
- dset_id = H5Dcreate2(file_id, PARTIAL_NO_SELECTION_DATASET_NAME, H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, PARTIAL_NO_SELECTION_DATASET_NAME, H5T_NATIVE_INT, fspace_id, H5P_DEFAULT,
+ dcpl_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
/*
@@ -120,23 +124,25 @@ void test_partial_no_selection_coll_md_read(void)
*
* The ranks will write rows across the dataset.
*/
- start[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_rank;
- start[1] = 0;
+ start[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_rank;
+ start[1] = 0;
stride[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE;
stride[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE;
- count[0] = 1;
- count[1] = (hsize_t)mpi_size;
- block[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE;
- block[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE;
+ count[0] = 1;
+ count[1] = (hsize_t)mpi_size;
+ block[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE;
+ block[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE;
- VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), "H5Sselect_hyperslab succeeded");
+ VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "H5Sselect_hyperslab succeeded");
sel_dims[0] = count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE);
mspace_id = H5Screate_simple(1, sel_dims, NULL);
VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
- data = HDcalloc(1, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int));
+ data = HDcalloc(1, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) *
+ sizeof(int));
VRFY((data != NULL), "calloc succeeded");
dxpl_id = H5Pcreate(H5P_DATASET_XFER);
@@ -156,9 +162,11 @@ void test_partial_no_selection_coll_md_read(void)
* the particular code path where the issue lies and we don't
* want the library doing multi-chunk I/O behind our backs.
*/
- VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded");
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0),
+ "H5Pset_dxpl_mpio_chunk_opt succeeded");
- read_buf = HDmalloc(count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int));
+ read_buf = HDmalloc(count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) *
+ sizeof(int));
VRFY((read_buf != NULL), "malloc succeeded");
/*
@@ -172,13 +180,17 @@ void test_partial_no_selection_coll_md_read(void)
/*
* Finally have each rank read their section of data back from the dataset.
*/
- VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0), "H5Dread succeeded");
+ VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0),
+ "H5Dread succeeded");
/*
* Check data integrity just to be sure.
*/
if (!PARTIAL_NO_SELECTION_NO_SEL_PROCESS) {
- VRFY((!HDmemcmp(data, read_buf, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int))), "memcmp succeeded");
+ VRFY((!HDmemcmp(data, read_buf,
+ count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) *
+ sizeof(int))),
+ "memcmp succeeded");
}
if (dataset_dims) {
@@ -218,40 +230,35 @@ void test_partial_no_selection_coll_md_read(void)
* major: Internal error (too specific to document in detail)
* minor: Some MPI function failed
* #009: H5Dmpio.c line 2546 in H5D__obtain_mpio_mode(): Message truncated, error stack:
- *PMPI_Bcast(1600)..................: MPI_Bcast(buf=0x1df98e0, count=18, MPI_BYTE, root=0, comm=0x84000006) failed
- *MPIR_Bcast_impl(1452).............:
- *MPIR_Bcast(1476)..................:
+ *PMPI_Bcast(1600)..................: MPI_Bcast(buf=0x1df98e0, count=18, MPI_BYTE, root=0, comm=0x84000006)
+ *failed MPIR_Bcast_impl(1452).............: MPIR_Bcast(1476)..................:
*MPIR_Bcast_intra(1249)............:
*MPIR_SMP_Bcast(1088)..............:
*MPIR_Bcast_binomial(239)..........:
- *MPIDI_CH3U_Receive_data_found(131): Message from rank 0 and tag 2 truncated; 2616 bytes received but buffer size is 18
- * major: Internal error (too specific to document in detail)
- * minor: MPI Error String
+ *MPIDI_CH3U_Receive_data_found(131): Message from rank 0 and tag 2 truncated; 2616 bytes received but buffer
+ *size is 18 major: Internal error (too specific to document in detail) minor: MPI Error String
*
*/
-void test_multi_chunk_io_addrmap_issue(void)
+void
+test_multi_chunk_io_addrmap_issue(void)
{
const char *filename;
- hsize_t start[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
- hsize_t stride[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
- hsize_t count[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
- hsize_t block[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
- hsize_t dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {10, 5};
- hsize_t chunk_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {5, 5};
- hsize_t max_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {H5S_UNLIMITED, H5S_UNLIMITED};
- hid_t file_id = H5I_INVALID_HID;
- hid_t fapl_id = H5I_INVALID_HID;
- hid_t dset_id = H5I_INVALID_HID;
- hid_t dcpl_id = H5I_INVALID_HID;
- hid_t dxpl_id = H5I_INVALID_HID;
- hid_t space_id = H5I_INVALID_HID;
- void *read_buf = NULL;
- int mpi_rank;
- int data[5][5] = { {0, 1, 2, 3, 4},
- {0, 1, 2, 3, 4},
- {0, 1, 2, 3, 4},
- {0, 1, 2, 3, 4},
- {0, 1, 2, 3, 4} };
+ hsize_t start[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t stride[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t count[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t block[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {10, 5};
+ hsize_t chunk_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {5, 5};
+ hsize_t max_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {H5S_UNLIMITED, H5S_UNLIMITED};
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ void * read_buf = NULL;
+ int mpi_rank;
+ int data[5][5] = {{0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}};
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -276,7 +283,8 @@ void test_multi_chunk_io_addrmap_issue(void)
dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
- VRFY((H5Pset_chunk(dcpl_id, MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, chunk_dims) >= 0), "H5Pset_chunk succeeded");
+ VRFY((H5Pset_chunk(dcpl_id, MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, chunk_dims) >= 0),
+ "H5Pset_chunk succeeded");
dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
@@ -285,9 +293,10 @@ void test_multi_chunk_io_addrmap_issue(void)
VRFY((dxpl_id >= 0), "H5Pcreate succeeded");
VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
- VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_MULTI_IO) >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded");
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_MULTI_IO) >= 0),
+ "H5Pset_dxpl_mpio_chunk_opt succeeded");
- start[1] = 0;
+ start[1] = 0;
stride[0] = stride[1] = 1;
count[0] = count[1] = 5;
block[0] = block[1] = 1;
@@ -297,7 +306,8 @@ void test_multi_chunk_io_addrmap_issue(void)
else
start[0] = 5;
- VRFY((H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) >= 0), "H5Sselect_hyperslab succeeded");
+ VRFY((H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "H5Sselect_hyperslab succeeded");
if (mpi_rank != 0)
VRFY((H5Sselect_none(space_id) >= 0), "H5Sselect_none succeeded");
@@ -358,31 +368,31 @@ void test_multi_chunk_io_addrmap_issue(void)
*MPIR_Bcast(1476)........:
*MPIR_Bcast_intra(1249)..:
*MPIR_SMP_Bcast(1088)....:
- *MPIR_Bcast_binomial(250): message sizes do not match across processes in the collective routine: Received 2096 but expected 320000
- * major: Internal error (too specific to document in detail)
- * minor: MPI Error String
+ *MPIR_Bcast_binomial(250): message sizes do not match across processes in the collective routine: Received
+ *2096 but expected 320000 major: Internal error (too specific to document in detail) minor: MPI Error String
*/
-void test_link_chunk_io_sort_chunk_issue(void)
+void
+test_link_chunk_io_sort_chunk_issue(void)
{
const char *filename;
- hsize_t *dataset_dims = NULL;
- hsize_t max_dataset_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
- hsize_t sel_dims[1];
- hsize_t chunk_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS] = { LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS };
- hsize_t start[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
- hsize_t stride[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
- hsize_t count[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
- hsize_t block[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
- hid_t file_id = H5I_INVALID_HID;
- hid_t fapl_id = H5I_INVALID_HID;
- hid_t dset_id = H5I_INVALID_HID;
- hid_t dcpl_id = H5I_INVALID_HID;
- hid_t dxpl_id = H5I_INVALID_HID;
- hid_t fspace_id = H5I_INVALID_HID;
- hid_t mspace_id = H5I_INVALID_HID;
- int mpi_rank, mpi_size;
- void *data = NULL;
- void *read_buf = NULL;
+ hsize_t * dataset_dims = NULL;
+ hsize_t max_dataset_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t sel_dims[1];
+ hsize_t chunk_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS] = {LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS};
+ hsize_t start[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t stride[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t count[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t block[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ int mpi_rank, mpi_size;
+ void * data = NULL;
+ void * read_buf = NULL;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -405,7 +415,8 @@ void test_link_chunk_io_sort_chunk_issue(void)
dataset_dims = HDmalloc(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS * sizeof(*dataset_dims));
VRFY((dataset_dims != NULL), "malloc succeeded");
- dataset_dims[0] = (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE * (hsize_t)mpi_size * (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_Y_DIM_SCALE;
+ dataset_dims[0] = (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE * (hsize_t)mpi_size *
+ (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_Y_DIM_SCALE;
max_dataset_dims[0] = H5S_UNLIMITED;
fspace_id = H5Screate_simple(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, dataset_dims, max_dataset_dims);
@@ -417,9 +428,11 @@ void test_link_chunk_io_sort_chunk_issue(void)
dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
- VRFY((H5Pset_chunk(dcpl_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, chunk_dims) >= 0), "H5Pset_chunk succeeded");
+ VRFY((H5Pset_chunk(dcpl_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, chunk_dims) >= 0),
+ "H5Pset_chunk succeeded");
- dset_id = H5Dcreate2(file_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME, H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME, H5T_NATIVE_INT, fspace_id,
+ H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
/*
@@ -428,11 +441,12 @@ void test_link_chunk_io_sort_chunk_issue(void)
* The ranks will write rows across the dataset.
*/
stride[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE;
- count[0] = (dataset_dims[0] / LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) / (hsize_t)mpi_size;
- start[0] = count[0] * (hsize_t)mpi_rank;
- block[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE;
+ count[0] = (dataset_dims[0] / LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) / (hsize_t)mpi_size;
+ start[0] = count[0] * (hsize_t)mpi_rank;
+ block[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE;
- VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), "H5Sselect_hyperslab succeeded");
+ VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "H5Sselect_hyperslab succeeded");
sel_dims[0] = count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE);
@@ -459,12 +473,14 @@ void test_link_chunk_io_sort_chunk_issue(void)
* the particular code path where the issue lies and we don't
* want the library doing multi-chunk I/O behind our backs.
*/
- VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded");
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0),
+ "H5Pset_dxpl_mpio_chunk_opt succeeded");
read_buf = HDmalloc(count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) * sizeof(int));
VRFY((read_buf != NULL), "malloc succeeded");
- VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), "H5Sselect_hyperslab succeeded");
+ VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "H5Sselect_hyperslab succeeded");
sel_dims[0] = count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE);
@@ -479,7 +495,8 @@ void test_link_chunk_io_sort_chunk_issue(void)
/*
* Finally have each rank read their section of data back from the dataset.
*/
- VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0), "H5Dread succeeded");
+ VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0),
+ "H5Dread succeeded");
if (dataset_dims) {
HDfree(dataset_dims);
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index 13f9e89..ec1ecf7 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -42,131 +42,128 @@
* ZCOL same as BYCOL except process 0 gets 0 columns
*/
static void
-slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[],
- hsize_t stride[], hsize_t block[], int mode)
+slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ int mode)
{
switch (mode) {
- case BYROW:
- /* Each process takes a slabs of rows. */
- block[0] = (hsize_t)(dim0 / mpi_size);
- block[1] = (hsize_t)dim1;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)mpi_rank * block[0];
- start[1] = 0;
- if (VERBOSE_MED)
- HDprintf("slab_set BYROW\n");
- break;
- case BYCOL:
- /* Each process takes a block of columns. */
- block[0] = (hsize_t)dim0;
- block[1] = (hsize_t)(dim1 / mpi_size);
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = (hsize_t)mpi_rank * block[1];
- if (VERBOSE_MED)
- HDprintf("slab_set BYCOL\n");
- break;
- case ZROW:
- /* Similar to BYROW except process 0 gets 0 row */
- block[0] = (hsize_t)(mpi_rank ? dim0 / mpi_size : 0);
- block[1] = (hsize_t)dim1;
- stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (mpi_rank ? (hsize_t)mpi_rank * block[0] : 0);
- start[1] = 0;
- if (VERBOSE_MED)
- HDprintf("slab_set ZROW\n");
- break;
- case ZCOL:
- /* Similar to BYCOL except process 0 gets 0 column */
- block[0] = (hsize_t)dim0;
- block[1] = (hsize_t)(mpi_rank ? dim1 / mpi_size : 0);
- stride[0] = block[0];
- stride[1] = (hsize_t)(mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = (mpi_rank ? (hsize_t)mpi_rank * block[1] : 0);
- if (VERBOSE_MED)
- HDprintf("slab_set ZCOL\n");
- break;
- default:
- /* Unknown mode. Set it to cover the whole dataset. */
- HDprintf("unknown slab_set mode (%d)\n", mode);
- block[0] = (hsize_t)dim0;
- block[1] = (hsize_t)dim1;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = 0;
- if (VERBOSE_MED)
- HDprintf("slab_set wholeset\n");
- break;
+ case BYROW:
+ /* Each process takes a slabs of rows. */
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)dim1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set BYROW\n");
+ break;
+ case BYCOL:
+ /* Each process takes a block of columns. */
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)(dim1 / mpi_size);
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank * block[1];
+ if (VERBOSE_MED)
+ HDprintf("slab_set BYCOL\n");
+ break;
+ case ZROW:
+ /* Similar to BYROW except process 0 gets 0 row */
+ block[0] = (hsize_t)(mpi_rank ? dim0 / mpi_size : 0);
+ block[1] = (hsize_t)dim1;
+ stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (mpi_rank ? (hsize_t)mpi_rank * block[0] : 0);
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set ZROW\n");
+ break;
+ case ZCOL:
+ /* Similar to BYCOL except process 0 gets 0 column */
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)(mpi_rank ? dim1 / mpi_size : 0);
+ stride[0] = block[0];
+ stride[1] = (hsize_t)(mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (mpi_rank ? (hsize_t)mpi_rank * block[1] : 0);
+ if (VERBOSE_MED)
+ HDprintf("slab_set ZCOL\n");
+ break;
+ default:
+ /* Unknown mode. Set it to cover the whole dataset. */
+ HDprintf("unknown slab_set mode (%d)\n", mode);
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)dim1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set wholeset\n");
+ break;
}
if (VERBOSE_MED) {
- HDprintf(
- "start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
- (unsigned long) start[0], (unsigned long) start[1],
- (unsigned long) count[0], (unsigned long) count[1],
- (unsigned long) stride[0], (unsigned long) stride[1],
- (unsigned long) block[0], (unsigned long) block[1],
- (unsigned long) (block[0] * block[1] * count[0] * count[1]));
+ HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total "
+ "datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
}
}
/*
* Setup the coordinates for point selection.
*/
-void point_set(hsize_t start[],
- hsize_t count[],
- hsize_t stride[],
- hsize_t block[],
- size_t num_points,
- hsize_t coords[],
- int order)
+void
+point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
+ hsize_t coords[], int order)
{
- hsize_t i,j, k = 0, m ,n, s1 ,s2;
+ hsize_t i, j, k = 0, m, n, s1, s2;
HDcompile_assert(RANK == 2);
- if(OUT_OF_ORDER == order)
+ if (OUT_OF_ORDER == order)
k = (num_points * RANK) - 1;
- else if(IN_ORDER == order)
+ else if (IN_ORDER == order)
k = 0;
s1 = start[0];
s2 = start[1];
- for(i = 0 ; i < count[0]; i++)
- for(j = 0 ; j < count[1]; j++)
- for(m = 0 ; m < block[0]; m++)
- for(n = 0 ; n < block[1]; n++)
- if(OUT_OF_ORDER == order) {
+ for (i = 0; i < count[0]; i++)
+ for (j = 0; j < count[1]; j++)
+ for (m = 0; m < block[0]; m++)
+ for (n = 0; n < block[1]; n++)
+ if (OUT_OF_ORDER == order) {
coords[k--] = s2 + (stride[1] * j) + n;
coords[k--] = s1 + (stride[0] * i) + m;
}
- else if(IN_ORDER == order) {
+ else if (IN_ORDER == order) {
coords[k++] = s1 + stride[0] * i + m;
coords[k++] = s2 + stride[1] * j + n;
}
- if(VERBOSE_MED) {
- HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
- (unsigned long)(block[0] * block[1] * count[0] * count[1]));
+ if (VERBOSE_MED) {
+ HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total "
+ "datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
k = 0;
- for(i = 0; i < num_points ; i++) {
+ for (i = 0; i < num_points; i++) {
HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
k += 2;
}
@@ -178,92 +175,90 @@ void point_set(hsize_t start[],
* Assume dimension rank is 2 and data is stored contiguous.
*/
static void
-dataset_fill(hsize_t start[], hsize_t block[], DATATYPE * dataset)
+dataset_fill(hsize_t start[], hsize_t block[], DATATYPE *dataset)
{
DATATYPE *dataptr = dataset;
- hsize_t i, j;
+ hsize_t i, j;
/* put some trivial data in the data_array */
- for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- *dataptr = (DATATYPE)((i+start[0])*100 + (j+start[1]+1));
- dataptr++;
- }
+ for (i = 0; i < block[0]; i++) {
+ for (j = 0; j < block[1]; j++) {
+ *dataptr = (DATATYPE)((i + start[0]) * 100 + (j + start[1] + 1));
+ dataptr++;
+ }
}
}
-
/*
* Print the content of the dataset.
*/
static void
-dataset_print(hsize_t start[], hsize_t block[], DATATYPE * dataset)
+dataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset)
{
DATATYPE *dataptr = dataset;
- hsize_t i, j;
+ hsize_t i, j;
/* print the column heading */
HDprintf("%-8s", "Cols:");
- for (j=0; j < block[1]; j++){
- HDprintf("%3lu ", (unsigned long)(start[1]+j));
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%3lu ", (unsigned long)(start[1] + j));
}
HDprintf("\n");
/* print the slab data */
- for (i=0; i < block[0]; i++){
- HDprintf("Row %2lu: ", (unsigned long)(i+start[0]));
- for (j=0; j < block[1]; j++){
- HDprintf("%03d ", *dataptr++);
- }
- HDprintf("\n");
+ for (i = 0; i < block[0]; i++) {
+ HDprintf("Row %2lu: ", (unsigned long)(i + start[0]));
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%03d ", *dataptr++);
+ }
+ HDprintf("\n");
}
}
-
/*
* Print the content of the dataset.
*/
int
-dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, DATATYPE *original)
+dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
+ DATATYPE *original)
{
hsize_t i, j;
- int vrfyerrs;
+ int vrfyerrs;
/* print it if VERBOSE_MED */
- if(VERBOSE_MED) {
- HDprintf("dataset_vrfy dumping:::\n");
- HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
- HDprintf("original values:\n");
- dataset_print(start, block, original);
- HDprintf("compared values:\n");
- dataset_print(start, block, dataset);
+ if (VERBOSE_MED) {
+ HDprintf("dataset_vrfy dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("original values:\n");
+ dataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ dataset_print(start, block, dataset);
}
vrfyerrs = 0;
- for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- if(*dataset != *original){
- if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
- HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
- (unsigned long)i, (unsigned long)j,
- (unsigned long)(i+start[0]), (unsigned long)(j+start[1]),
- *(original), *(dataset));
- }
- dataset++;
- original++;
+ for (i = 0; i < block[0]; i++) {
+ for (j = 0; j < block[1]; j++) {
+ if (*dataset != *original) {
+ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j, (unsigned long)(i + start[0]),
+ (unsigned long)(j + start[1]), *(original), *(dataset));
+ }
+ dataset++;
+ original++;
+ }
}
}
- }
- if(vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- HDprintf("[more errors ...]\n");
- if(vrfyerrs)
- HDprintf("%d errors found in dataset_vrfy\n", vrfyerrs);
- return(vrfyerrs);
+ if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if (vrfyerrs)
+ HDprintf("%d errors found in dataset_vrfy\n", vrfyerrs);
+ return (vrfyerrs);
}
-
/*
* Part 1.a--Independent read/write for fixed dimension datasets.
*/
@@ -279,36 +274,36 @@ dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[]
void
dataset_writeInd(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE * data_array1 = NULL; /* data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Independent write test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Independent write test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* ----------------------------------------
@@ -326,7 +321,6 @@ dataset_writeInd(void)
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "");
-
/* ---------------------------------------------
* Define the dimensions of the overall datasets
* and the slabs local to the MPI process.
@@ -334,21 +328,17 @@ dataset_writeInd(void)
/* setup dimensionality object */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple (RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
-
/* create a dataset collectively */
- dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
/* create another dataset collectively */
- dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
-
/*
* To test the independent orders of writes between processes, all
* even number processes write to dataset1 first, then dataset2.
@@ -363,43 +353,40 @@ dataset_writeInd(void)
MESG("data_array initialized");
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* write data independently */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
/* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
/* setup dimensions again to write with zero rows for process 0 */
- if(VERBOSE_MED)
- HDprintf("writeInd by some with zero row\n");
+ if (VERBOSE_MED)
+ HDprintf("writeInd by some with zero row\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
- if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("writeInd by some with zero row");
-if((mpi_rank/2)*2 != mpi_rank){
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
- VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
-}
+ if ((mpi_rank / 2) * 2 != mpi_rank) {
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
+ }
#ifdef BARRIER_CHECKS
-MPI_Barrier(MPI_COMM_WORLD);
+ MPI_Barrier(MPI_COMM_WORLD);
#endif /* BARRIER_CHECKS */
/* release dataspace ID */
@@ -418,44 +405,45 @@ MPI_Barrier(MPI_COMM_WORLD);
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
+ if (data_array1)
+ HDfree(data_array1);
}
/* Example of using the parallel HDF5 library to read a dataset */
void
dataset_readInd(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ DATATYPE * data_origin1 = NULL; /* expected data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Independent read test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Independent read test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* setup file access template */
@@ -478,40 +466,39 @@ dataset_readInd(void)
dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
VRFY((dataset2 >= 0), "");
-
/* set up dimensions of the slab this process accesses */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
/* read data independently */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* read data independently */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* close dataset collectively */
ret = H5Dclose(dataset1);
@@ -526,11 +513,12 @@ dataset_readInd(void)
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
- if(data_origin1) HDfree(data_origin1);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_origin1)
+ HDfree(data_origin1);
}
-
/*
* Part 1.b--Collective read/write for fixed dimension datasets.
*/
@@ -547,48 +535,48 @@ dataset_readInd(void)
void
dataset_writeAll(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */
- hid_t dataset5, dataset6, dataset7; /* Dataset ID */
- hid_t datatype; /* Datatype ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */
+ hid_t dataset5, dataset6, dataset7; /* Dataset ID */
+ hid_t datatype; /* Datatype ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE * data_array1 = NULL; /* data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- size_t num_points; /* for point selection */
- hsize_t *coords = NULL; /* for point selection */
- hsize_t current_dims; /* for point selection */
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ hsize_t current_dims; /* for point selection */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Collective write test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Collective write test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* set up the coords array selection */
num_points = (size_t)dim1;
- coords = (hsize_t *)HDmalloc((size_t)dim1 * (size_t)RANK * sizeof(hsize_t));
+ coords = (hsize_t *)HDmalloc((size_t)dim1 * (size_t)RANK * sizeof(hsize_t));
VRFY((coords != NULL), "coords malloc succeeded");
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* -------------------
@@ -606,7 +594,6 @@ dataset_writeAll(void)
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "");
-
/* --------------------------
* Define the dimensions of the overall datasets
* and create the dataset
@@ -614,17 +601,16 @@ dataset_writeAll(void)
/* setup 2-D dimensionality object */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple (RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
-
/* create a dataset collectively */
dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
/* create another dataset collectively */
datatype = H5Tcopy(H5T_NATIVE_INT);
- ret = H5Tset_order(datatype, H5T_ORDER_LE);
+ ret = H5Tset_order(datatype, H5T_ORDER_LE);
VRFY((ret >= 0), "H5Tset_order succeeded");
dataset2 = H5Dcreate2(fid, DATASETNAME2, datatype, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -663,54 +649,51 @@ dataset_writeAll(void)
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill the local slab with some trivial data */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* write data collectively */
MESG("writeAll by Row");
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
/* setup dimensions again to writeAll with zero rows for process 0 */
- if(VERBOSE_MED)
- HDprintf("writeAll by some with zero row\n");
+ if (VERBOSE_MED)
+ HDprintf("writeAll by some with zero row\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
- if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("writeAll by some with zero row");
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
/* release all temporary handles. */
@@ -726,59 +709,56 @@ dataset_writeAll(void)
/* put some trivial data in the data_array */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill the local slab with some trivial data */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
/* setup dimensions again to writeAll with zero columns for process 0 */
- if(VERBOSE_MED)
- HDprintf("writeAll by some with zero col\n");
+ if (VERBOSE_MED)
+ HDprintf("writeAll by some with zero col\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
- if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("writeAll by some with zero col");
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 by ZCOL succeeded");
/* release all temporary handles. */
@@ -788,16 +768,15 @@ dataset_writeAll(void)
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
-
/* Dataset3: each process takes a block of rows, except process zero uses "none" selection. */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset3);
+ file_dataspace = H5Dget_space(dataset3);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- if(MAINPROCESS) {
- ret = H5Sselect_none(file_dataspace);
- VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded");
} /* end if */
else {
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
@@ -805,42 +784,39 @@ dataset_writeAll(void)
} /* end else */
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
- if(MAINPROCESS) {
- ret = H5Sselect_none(mem_dataspace);
- VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded");
} /* end if */
/* fill the local slab with some trivial data */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
} /* end if */
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* write data collectively */
MESG("writeAll with none");
- ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
/* write data collectively (with datatype conversion) */
MESG("writeAll with none");
- ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
/* release all temporary handles. */
@@ -854,11 +830,11 @@ dataset_writeAll(void)
/* Additionally, these are in a scalar dataspace */
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset4);
+ file_dataspace = H5Dget_space(dataset4);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- if(MAINPROCESS) {
- ret = H5Sselect_none(file_dataspace);
- VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded");
} /* end if */
else {
ret = H5Sselect_all(file_dataspace);
@@ -868,9 +844,9 @@ dataset_writeAll(void)
/* create a memory dataspace independently */
mem_dataspace = H5Screate(H5S_SCALAR);
VRFY((mem_dataspace >= 0), "");
- if(MAINPROCESS) {
- ret = H5Sselect_none(mem_dataspace);
- VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded");
} /* end if */
else {
ret = H5Sselect_all(mem_dataspace);
@@ -880,31 +856,29 @@ dataset_writeAll(void)
/* fill the local slab with some trivial data */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
} /* end if */
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* write data collectively */
MESG("writeAll with scalar dataspace");
- ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
/* write data collectively (with datatype conversion) */
MESG("writeAll with scalar dataspace");
- ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
/* release all temporary handles. */
@@ -912,55 +886,54 @@ dataset_writeAll(void)
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
-
- if(data_array1) free(data_array1);
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ if (data_array1)
+ free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
- block[0] = 1;
- block[1] = (hsize_t)dim1;
+ block[0] = 1;
+ block[1] = (hsize_t)dim1;
stride[0] = 1;
stride[1] = (hsize_t)dim1;
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)(dim0/mpi_size * mpi_rank);
- start[1] = 0;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
+ start[1] = 0;
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* Dataset5: point selection in File - Hyperslab selection in Memory*/
/* create a file dataspace independently */
- point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
- file_dataspace = H5Dget_space (dataset5);
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ file_dataspace = H5Dget_space(dataset5);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
- start[0] = 0;
- start[1] = 0;
- mem_dataspace = H5Dget_space (dataset5);
+ start[0] = 0;
+ start[1] = 0;
+ mem_dataspace = H5Dget_space(dataset5);
VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* write data collectively */
- ret = H5Dwrite(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset5 succeeded");
/* release all temporary handles. */
@@ -970,35 +943,34 @@ dataset_writeAll(void)
/* Dataset6: point selection in File - Point selection in Memory*/
/* create a file dataspace independently */
- start[0] = (hsize_t)(dim0/mpi_size * mpi_rank);
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
- file_dataspace = H5Dget_space (dataset6);
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ file_dataspace = H5Dget_space(dataset6);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
start[0] = 0;
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, IN_ORDER);
- mem_dataspace = H5Dget_space (dataset6);
+ point_set(start, count, stride, block, num_points, coords, IN_ORDER);
+ mem_dataspace = H5Dget_space(dataset6);
VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* write data collectively */
- ret = H5Dwrite(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset6 succeeded");
/* release all temporary handles. */
@@ -1008,34 +980,33 @@ dataset_writeAll(void)
/* Dataset7: point selection in File - All selection in Memory*/
/* create a file dataspace independently */
- start[0] = (hsize_t)(dim0/mpi_size * mpi_rank);
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, IN_ORDER);
- file_dataspace = H5Dget_space (dataset7);
+ point_set(start, count, stride, block, num_points, coords, IN_ORDER);
+ file_dataspace = H5Dget_space(dataset7);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
- current_dims = num_points;
- mem_dataspace = H5Screate_simple (1, &current_dims, NULL);
+ current_dims = num_points;
+ mem_dataspace = H5Screate_simple(1, &current_dims, NULL);
VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
ret = H5Sselect_all(mem_dataspace);
VRFY((ret >= 0), "H5Sselect_all succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* write data collectively */
- ret = H5Dwrite(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset7 succeeded");
/* release all temporary handles. */
@@ -1065,8 +1036,10 @@ dataset_writeAll(void)
H5Fclose(fid);
/* release data buffers */
- if(coords) HDfree(coords);
- if(data_array1) HDfree(data_array1);
+ if (coords)
+ HDfree(coords);
+ if (data_array1)
+ HDfree(data_array1);
}
/*
@@ -1081,47 +1054,47 @@ dataset_writeAll(void)
void
dataset_readAll(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ DATATYPE * data_origin1 = NULL; /* expected data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- size_t num_points; /* for point selection */
- hsize_t *coords = NULL; /* for point selection */
- int i,j,k;
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ int i, j, k;
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Collective read test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Collective read test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* set up the coords array selection */
num_points = (size_t)dim1;
- coords = (hsize_t *)HDmalloc((size_t)dim0 * (size_t)dim1 * RANK * sizeof(hsize_t));
+ coords = (hsize_t *)HDmalloc((size_t)dim0 * (size_t)dim1 * RANK * sizeof(hsize_t));
VRFY((coords != NULL), "coords malloc succeeded");
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* -------------------
@@ -1132,14 +1105,13 @@ dataset_readAll(void)
VRFY((acc_tpl >= 0), "");
/* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl);
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
VRFY((fid >= 0), "H5Fopen succeeded");
/* Release file-access template */
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "");
-
/* --------------------------
* Open the datasets in it
* ------------------------- */
@@ -1167,62 +1139,61 @@ dataset_readAll(void)
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* read data collectively */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset1 succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* setup dimensions again to readAll with zero columns for process 0 */
- if(VERBOSE_MED)
- HDprintf("readAll by some with zero col\n");
+ if (VERBOSE_MED)
+ HDprintf("readAll by some with zero col\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
- if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("readAll by some with zero col");
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset1 by ZCOL succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* release all temporary handles. */
/* Could have used them for dataset2 but it is cleaner */
@@ -1235,219 +1206,221 @@ dataset_readAll(void)
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* read data collectively */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset2 succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* setup dimensions again to readAll with zero rows for process 0 */
- if(VERBOSE_MED)
- HDprintf("readAll by some with zero row\n");
+ if (VERBOSE_MED)
+ HDprintf("readAll by some with zero row\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
- if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("readAll by some with zero row");
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset1 by ZROW succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* release all temporary handles. */
H5Sclose(file_dataspace);
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
- if(data_array1) free(data_array1);
- if(data_origin1) free(data_origin1);
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ if (data_array1)
+ free(data_array1);
+ if (data_origin1)
+ free(data_origin1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
- block[0] = 1;
- block[1] = (hsize_t)dim1;
+ block[0] = 1;
+ block[1] = (hsize_t)dim1;
stride[0] = 1;
stride[1] = (hsize_t)dim1;
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)(dim0/mpi_size * mpi_rank);
- start[1] = 0;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
+ start[1] = 0;
dataset_fill(start, block, data_origin1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
}
/* Dataset5: point selection in memory - Hyperslab selection in file*/
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset5);
+ file_dataspace = H5Dget_space(dataset5);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
start[0] = 0;
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
- mem_dataspace = H5Dget_space (dataset5);
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ mem_dataspace = H5Dget_space(dataset5);
VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* read data collectively */
- ret = H5Dread(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset5 succeeded");
-
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* release all temporary handles. */
H5Sclose(file_dataspace);
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
-
- if(data_array1) free(data_array1);
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ if (data_array1)
+ free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
/* Dataset6: point selection in File - Point selection in Memory*/
/* create a file dataspace independently */
- start[0] = (hsize_t)(dim0/mpi_size * mpi_rank);
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, IN_ORDER);
- file_dataspace = H5Dget_space (dataset6);
+ point_set(start, count, stride, block, num_points, coords, IN_ORDER);
+ file_dataspace = H5Dget_space(dataset6);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
start[0] = 0;
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
- mem_dataspace = H5Dget_space (dataset6);
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ mem_dataspace = H5Dget_space(dataset6);
VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* read data collectively */
- ret = H5Dread(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset6 succeeded");
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* release all temporary handles. */
H5Sclose(file_dataspace);
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
- if(data_array1) free(data_array1);
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ if (data_array1)
+ free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
/* Dataset7: point selection in memory - All selection in file*/
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset7);
+ file_dataspace = H5Dget_space(dataset7);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_all(file_dataspace);
VRFY((ret >= 0), "H5Sselect_all succeeded");
num_points = (size_t)(dim0 * dim1);
- k=0;
- for (i=0 ; i<dim0; i++) {
- for (j=0 ; j<dim1; j++) {
+ k = 0;
+ for (i = 0; i < dim0; i++) {
+ for (j = 0; j < dim1; j++) {
coords[k++] = (hsize_t)i;
coords[k++] = (hsize_t)j;
}
}
- mem_dataspace = H5Dget_space (dataset7);
+ mem_dataspace = H5Dget_space(dataset7);
VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* read data collectively */
- ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset7 succeeded");
- start[0] = (hsize_t)(dim0/mpi_size * mpi_rank);
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
start[1] = 0;
- ret = dataset_vrfy(start, count, stride, block, data_array1+(dim0/mpi_size * dim1 * mpi_rank), data_origin1);
- if(ret) nerrors++;
+ ret = dataset_vrfy(start, count, stride, block, data_array1 + (dim0 / mpi_size * dim1 * mpi_rank),
+ data_origin1);
+ if (ret)
+ nerrors++;
/* release all temporary handles. */
H5Sclose(file_dataspace);
@@ -1472,12 +1445,14 @@ dataset_readAll(void)
H5Fclose(fid);
/* release data buffers */
- if(coords) HDfree(coords);
- if(data_array1) HDfree(data_array1);
- if(data_origin1) HDfree(data_origin1);
+ if (coords)
+ HDfree(coords);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_origin1)
+ HDfree(data_origin1);
}
-
/*
* Part 2--Independent read/write for extendible datasets.
*/
@@ -1493,45 +1468,44 @@ dataset_readAll(void)
void
extend_writeInd(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
- hsize_t max_dims[RANK] =
- {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- hsize_t chunk_dims[RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK]; /* for hyperslab setting */
- hsize_t stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Extend independent write test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* setup chunk-size. Make sure sizes are > 0 */
chunk_dims[0] = (hsize_t)chunkdim0;
chunk_dims[1] = (hsize_t)chunkdim1;
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* -------------------
@@ -1541,22 +1515,22 @@ extend_writeInd(void)
acc_tpl = create_faccess_plist(comm, info, facc_type);
VRFY((acc_tpl >= 0), "");
-/* Reduce the number of metadata cache slots, so that there are cache
- * collisions during the raw data I/O on the chunked dataset. This stresses
- * the metadata cache and tests for cache bugs. -QAK
- */
-{
- int mdc_nelmts;
- size_t rdcc_nelmts;
- size_t rdcc_nbytes;
- double rdcc_w0;
-
- ret = H5Pget_cache(acc_tpl,&mdc_nelmts,&rdcc_nelmts,&rdcc_nbytes,&rdcc_w0);
- VRFY((ret >= 0), "H5Pget_cache succeeded");
- mdc_nelmts=4;
- ret = H5Pset_cache(acc_tpl,mdc_nelmts,rdcc_nelmts,rdcc_nbytes,rdcc_w0);
- VRFY((ret >= 0), "H5Pset_cache succeeded");
-}
+ /* Reduce the number of metadata cache slots, so that there are cache
+ * collisions during the raw data I/O on the chunked dataset. This stresses
+ * the metadata cache and tests for cache bugs. -QAK
+ */
+ {
+ int mdc_nelmts;
+ size_t rdcc_nelmts;
+ size_t rdcc_nbytes;
+ double rdcc_w0;
+
+ ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0);
+ VRFY((ret >= 0), "H5Pget_cache succeeded");
+ mdc_nelmts = 4;
+ ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0);
+ VRFY((ret >= 0), "H5Pset_cache succeeded");
+ }
/* create the file collectively */
fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
@@ -1566,14 +1540,13 @@ extend_writeInd(void)
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "");
-
/* --------------------------------------------------------------
* Define the dimensions of the overall datasets and create them.
* ------------------------------------------------------------- */
/* set up dataset storage chunk sizes and creation property list */
- if(VERBOSE_MED)
- HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ if (VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
@@ -1582,7 +1555,7 @@ extend_writeInd(void)
/* setup dimensionality object */
/* start out with no rows, extend it later. */
dims[0] = dims[1] = 0;
- sid = H5Screate_simple (RANK, dims, max_dims);
+ sid = H5Screate_simple(RANK, dims, max_dims);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* create an extendible dataset collectively */
@@ -1597,8 +1570,6 @@ extend_writeInd(void)
H5Sclose(sid);
H5Pclose(dataset_pl);
-
-
/* -------------------------
* Test writing to dataset1
* -------------------------*/
@@ -1608,37 +1579,35 @@ extend_writeInd(void)
/* put some trivial data in the data_array */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* Extend its current dim sizes before writing */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- ret = H5Dset_extent(dataset1, dims);
+ ret = H5Dset_extent(dataset1, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
H5Sclose(file_dataspace);
H5Sclose(mem_dataspace);
-
/* -------------------------
* Test writing to dataset2
* -------------------------*/
@@ -1648,13 +1617,13 @@ extend_writeInd(void)
/* put some trivial data in the data_array */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* Try write to dataset2 beyond its current dim sizes. Should fail. */
@@ -1663,14 +1632,13 @@ extend_writeInd(void)
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently. Should fail. */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret < 0), "H5Dwrite failed as expected");
/* restore auto error reporting */
@@ -1680,18 +1648,17 @@ extend_writeInd(void)
/* Extend dataset2 and try again. Should succeed. */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- ret = H5Dset_extent(dataset2, dims);
+ ret = H5Dset_extent(dataset2, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
@@ -1700,7 +1667,6 @@ extend_writeInd(void)
ret = H5Sclose(mem_dataspace);
VRFY((ret >= 0), "H5Sclose succeeded");
-
/* close dataset collectively */
ret = H5Dclose(dataset1);
VRFY((ret >= 0), "H5Dclose1 succeeded");
@@ -1711,7 +1677,8 @@ extend_writeInd(void)
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
+ if (data_array1)
+ HDfree(data_array1);
}
/*
@@ -1724,30 +1691,30 @@ void
extend_writeInd2(void)
{
const char *filename;
- hid_t fid; /* HDF5 file ID */
- hid_t fapl; /* File access templates */
- hid_t fs; /* File dataspace ID */
- hid_t ms; /* Memory dataspace ID */
- hid_t dataset; /* Dataset ID */
- hsize_t orig_size=10; /* Original dataset dim size */
- hsize_t new_size=20; /* Extended dataset dim size */
- hsize_t one=1;
- hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */
- hsize_t chunk_size = 16384; /* chunk size */
- hid_t dcpl; /* dataset create prop. list */
- int written[10], /* Data to write */
- retrieved[10]; /* Data read in */
- int mpi_size, mpi_rank; /* MPI settings */
- int i; /* Local index variable */
- herr_t ret; /* Generic return value */
+ hid_t fid; /* HDF5 file ID */
+ hid_t fapl; /* File access templates */
+ hid_t fs; /* File dataspace ID */
+ hid_t ms; /* Memory dataspace ID */
+ hid_t dataset; /* Dataset ID */
+ hsize_t orig_size = 10; /* Original dataset dim size */
+ hsize_t new_size = 20; /* Extended dataset dim size */
+ hsize_t one = 1;
+ hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */
+ hsize_t chunk_size = 16384; /* chunk size */
+ hid_t dcpl; /* dataset create prop. list */
+ int written[10], /* Data to write */
+ retrieved[10]; /* Data read in */
+ int mpi_size, mpi_rank; /* MPI settings */
+ int i; /* Local index variable */
+ herr_t ret; /* Generic return value */
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Extend independent write test #2 on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test #2 on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* -------------------
* START AN HDF5 FILE
@@ -1764,7 +1731,6 @@ extend_writeInd2(void)
ret = H5Pclose(fapl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/* --------------------------------------------------------------
* Define the dimensions of the overall datasets and create them.
* ------------------------------------------------------------- */
@@ -1776,7 +1742,7 @@ extend_writeInd2(void)
VRFY((ret >= 0), "H5Pset_chunk succeeded");
/* setup dimensionality object */
- fs = H5Screate_simple (1, &orig_size, &max_size);
+ fs = H5Screate_simple(1, &orig_size, &max_size);
VRFY((fs >= 0), "H5Screate_simple succeeded");
/* create an extendible dataset collectively */
@@ -1787,7 +1753,6 @@ extend_writeInd2(void)
ret = H5Pclose(dcpl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/* -------------------------
* Test writing to dataset
* -------------------------*/
@@ -1796,13 +1761,13 @@ extend_writeInd2(void)
VRFY((ms >= 0), "H5Screate_simple succeeded");
/* put some trivial data in the data_array */
- for(i = 0; i < (int)orig_size; i++)
+ for (i = 0; i < (int)orig_size; i++)
written[i] = i;
MESG("data array initialized");
- if(VERBOSE_MED) {
- MESG("writing at offset zero: ");
- for(i = 0; i < (int)orig_size; i++)
- HDprintf("%s%d", i?", ":"", written[i]);
+ if (VERBOSE_MED) {
+ MESG("writing at offset zero: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", written[i]);
HDprintf("\n");
}
ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written);
@@ -1813,16 +1778,16 @@ extend_writeInd2(void)
* -------------------------*/
ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved);
VRFY((ret >= 0), "H5Dread succeeded");
- for (i=0; i<(int)orig_size; i++)
- if(written[i]!=retrieved[i]) {
- HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
- i,written[i], i,retrieved[i]);
+ for (i = 0; i < (int)orig_size; i++)
+ if (written[i] != retrieved[i]) {
+ HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i,
+ written[i], i, retrieved[i]);
nerrors++;
}
- if(VERBOSE_MED){
- MESG("read at offset zero: ");
- for (i=0; i<(int)orig_size; i++)
- HDprintf("%s%d", i?", ":"", retrieved[i]);
+ if (VERBOSE_MED) {
+ MESG("read at offset zero: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", retrieved[i]);
HDprintf("\n");
}
@@ -1840,13 +1805,13 @@ extend_writeInd2(void)
* Write to the second half of the dataset
* -------------------------*/
H5_CHECK_OVERFLOW(orig_size, hsize_t, int);
- for (i=0; i<(int)orig_size; i++)
+ for (i = 0; i < (int)orig_size; i++)
written[i] = (int)orig_size + i;
MESG("data array re-initialized");
- if(VERBOSE_MED) {
- MESG("writing at offset 10: ");
- for (i=0; i<(int)orig_size; i++)
- HDprintf("%s%d", i?", ":"", written[i]);
+ if (VERBOSE_MED) {
+ MESG("writing at offset 10: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", written[i]);
HDprintf("\n");
}
ret = H5Sselect_hyperslab(fs, H5S_SELECT_SET, &orig_size, NULL, &one, &orig_size);
@@ -1859,20 +1824,19 @@ extend_writeInd2(void)
* -------------------------*/
ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved);
VRFY((ret >= 0), "H5Dread succeeded");
- for (i=0; i<(int)orig_size; i++)
- if(written[i]!=retrieved[i]) {
- HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
- i,written[i], i,retrieved[i]);
+ for (i = 0; i < (int)orig_size; i++)
+ if (written[i] != retrieved[i]) {
+ HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i,
+ written[i], i, retrieved[i]);
nerrors++;
}
- if(VERBOSE_MED){
- MESG("read at offset 10: ");
- for (i=0; i<(int)orig_size; i++)
- HDprintf("%s%d", i?", ":"", retrieved[i]);
+ if (VERBOSE_MED) {
+ MESG("read at offset 10: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", retrieved[i]);
HDprintf("\n");
}
-
/* Close dataset collectively */
ret = H5Dclose(dataset);
VRFY((ret >= 0), "H5Dclose succeeded");
@@ -1886,41 +1850,41 @@ extend_writeInd2(void)
void
extend_readInd(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_array2 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ DATATYPE * data_array2 = NULL; /* data buffer */
+ DATATYPE * data_origin1 = NULL; /* expected data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Extend independent read test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent read test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_array2 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array2 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* -------------------
@@ -1931,7 +1895,7 @@ extend_readInd(void)
VRFY((acc_tpl >= 0), "");
/* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl);
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
VRFY((fid >= 0), "");
/* Release file-access template */
@@ -1951,7 +1915,7 @@ extend_readInd(void)
H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
@@ -1963,72 +1927,70 @@ extend_readInd(void)
H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
H5Sclose(file_dataspace);
-
/* Read dataset1 using BYROW pattern */
/* set up dimensions of the slab this process accesses */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* read data independently */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
VRFY((ret == 0), "dataset1 read verified correct");
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
H5Sclose(mem_dataspace);
H5Sclose(file_dataspace);
-
/* Read dataset2 using BYCOL pattern */
/* set up dimensions of the slab this process accesses */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* read data independently */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
VRFY((ret == 0), "dataset2 read verified correct");
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
H5Sclose(mem_dataspace);
H5Sclose(file_dataspace);
@@ -2039,14 +2001,16 @@ extend_readInd(void)
ret = H5Dclose(dataset2);
VRFY((ret >= 0), "");
-
/* close the file collectively */
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
- if(data_array2) HDfree(data_array2);
- if(data_origin1) HDfree(data_origin1);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_array2)
+ HDfree(data_array2);
+ if (data_origin1)
+ HDfree(data_origin1);
}
/*
@@ -2064,46 +2028,45 @@ extend_readInd(void)
void
extend_writeAll(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
- hsize_t max_dims[RANK] =
- {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- hsize_t chunk_dims[RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK]; /* for hyperslab setting */
- hsize_t stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Extend independent write test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* setup chunk-size. Make sure sizes are > 0 */
chunk_dims[0] = (hsize_t)chunkdim0;
chunk_dims[1] = (hsize_t)chunkdim1;
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* -------------------
@@ -2113,22 +2076,22 @@ extend_writeAll(void)
acc_tpl = create_faccess_plist(comm, info, facc_type);
VRFY((acc_tpl >= 0), "");
-/* Reduce the number of metadata cache slots, so that there are cache
- * collisions during the raw data I/O on the chunked dataset. This stresses
- * the metadata cache and tests for cache bugs. -QAK
- */
-{
- int mdc_nelmts;
- size_t rdcc_nelmts;
- size_t rdcc_nbytes;
- double rdcc_w0;
-
- ret = H5Pget_cache(acc_tpl,&mdc_nelmts,&rdcc_nelmts,&rdcc_nbytes,&rdcc_w0);
- VRFY((ret >= 0), "H5Pget_cache succeeded");
- mdc_nelmts=4;
- ret = H5Pset_cache(acc_tpl,mdc_nelmts,rdcc_nelmts,rdcc_nbytes,rdcc_w0);
- VRFY((ret >= 0), "H5Pset_cache succeeded");
-}
+ /* Reduce the number of metadata cache slots, so that there are cache
+ * collisions during the raw data I/O on the chunked dataset. This stresses
+ * the metadata cache and tests for cache bugs. -QAK
+ */
+ {
+ int mdc_nelmts;
+ size_t rdcc_nelmts;
+ size_t rdcc_nbytes;
+ double rdcc_w0;
+
+ ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0);
+ VRFY((ret >= 0), "H5Pget_cache succeeded");
+ mdc_nelmts = 4;
+ ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0);
+ VRFY((ret >= 0), "H5Pset_cache succeeded");
+ }
/* create the file collectively */
fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
@@ -2138,14 +2101,13 @@ extend_writeAll(void)
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "");
-
/* --------------------------------------------------------------
* Define the dimensions of the overall datasets and create them.
* ------------------------------------------------------------- */
/* set up dataset storage chunk sizes and creation property list */
- if(VERBOSE_MED)
- HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ if (VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
@@ -2154,7 +2116,7 @@ extend_writeAll(void)
/* setup dimensionality object */
/* start out with no rows, extend it later. */
dims[0] = dims[1] = 0;
- sid = H5Screate_simple (RANK, dims, max_dims);
+ sid = H5Screate_simple(RANK, dims, max_dims);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* create an extendible dataset collectively */
@@ -2169,8 +2131,6 @@ extend_writeAll(void)
H5Sclose(sid);
H5Pclose(dataset_pl);
-
-
/* -------------------------
* Test writing to dataset1
* -------------------------*/
@@ -2180,41 +2140,39 @@ extend_writeAll(void)
/* put some trivial data in the data_array */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* Extend its current dim sizes before writing */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- ret = H5Dset_extent(dataset1, dims);
+ ret = H5Dset_extent(dataset1, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* write data collectively */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
@@ -2222,7 +2180,6 @@ extend_writeAll(void)
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
-
/* -------------------------
* Test writing to dataset2
* -------------------------*/
@@ -2232,40 +2189,38 @@ extend_writeAll(void)
/* put some trivial data in the data_array */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* Try write to dataset2 beyond its current dim sizes. Should fail. */
/* Temporary turn off auto error reporting */
H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently. Should fail. */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret < 0), "H5Dwrite failed as expected");
/* restore auto error reporting */
@@ -2275,18 +2230,17 @@ extend_writeAll(void)
/* Extend dataset2 and try again. Should succeed. */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- ret = H5Dset_extent(dataset2, dims);
+ ret = H5Dset_extent(dataset2, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
@@ -2297,7 +2251,6 @@ extend_writeAll(void)
ret = H5Pclose(xfer_plist);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/* close dataset collectively */
ret = H5Dclose(dataset1);
VRFY((ret >= 0), "H5Dclose1 succeeded");
@@ -2308,49 +2261,50 @@ extend_writeAll(void)
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
+ if (data_array1)
+ HDfree(data_array1);
}
/* Example of using the parallel HDF5 library to read an extendible dataset */
void
extend_readAll(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_array2 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ DATATYPE * data_array2 = NULL; /* data buffer */
+ DATATYPE * data_origin1 = NULL; /* expected data buffer */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Extend independent read test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent read test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_array2 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_array2 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* -------------------
@@ -2361,7 +2315,7 @@ extend_readAll(void)
VRFY((acc_tpl >= 0), "");
/* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl);
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
VRFY((fid >= 0), "");
/* Release file-access template */
@@ -2381,7 +2335,7 @@ extend_readAll(void)
H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
@@ -2393,95 +2347,91 @@ extend_readAll(void)
H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
H5Sclose(file_dataspace);
-
/* Read dataset1 using BYROW pattern */
/* set up dimensions of the slab this process accesses */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* read data collectively */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
VRFY((ret == 0), "dataset1 read verified correct");
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
H5Sclose(mem_dataspace);
H5Sclose(file_dataspace);
H5Pclose(xfer_plist);
-
/* Read dataset2 using BYCOL pattern */
/* set up dimensions of the slab this process accesses */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* read data collectively */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
VRFY((ret == 0), "dataset2 read verified correct");
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
H5Sclose(mem_dataspace);
H5Sclose(file_dataspace);
@@ -2493,14 +2443,16 @@ extend_readAll(void)
ret = H5Dclose(dataset2);
VRFY((ret >= 0), "");
-
/* close the file collectively */
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
- if(data_array2) HDfree(data_array2);
- if(data_origin1) HDfree(data_origin1);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_array2)
+ HDfree(data_array2);
+ if (data_origin1)
+ HDfree(data_origin1);
}
/*
@@ -2511,49 +2463,49 @@ extend_readAll(void)
void
compress_readAll(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t dcpl; /* Dataset creation property list */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t dataspace; /* Dataspace ID */
- hid_t dataset; /* Dataset ID */
- int rank=1; /* Dataspace rank */
- hsize_t dim=(hsize_t)dim0; /* Dataspace dimensions */
- unsigned u; /* Local index variable */
- unsigned chunk_opts; /* Chunk options */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t dataspace; /* Dataspace ID */
+ hid_t dataset; /* Dataset ID */
+ int rank = 1; /* Dataspace rank */
+ hsize_t dim = (hsize_t)dim0; /* Dataspace dimensions */
+ unsigned u; /* Local index variable */
+ unsigned chunk_opts; /* Chunk options */
unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
- DATATYPE *data_read = NULL; /* data buffer */
- DATATYPE *data_orig = NULL; /* expected data buffer */
+ DATATYPE * data_read = NULL; /* data buffer */
+ DATATYPE * data_orig = NULL; /* expected data buffer */
const char *filename;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
- int mpi_size, mpi_rank;
- herr_t ret; /* Generic return value */
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Collective chunked dataset read test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Collective chunked dataset read test on file %s\n", filename);
/* Retrieve MPI parameters */
- MPI_Comm_size(comm,&mpi_size);
- MPI_Comm_rank(comm,&mpi_rank);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
/* Allocate data buffer */
- data_orig = (DATATYPE *)HDmalloc((size_t)dim*sizeof(DATATYPE));
+ data_orig = (DATATYPE *)HDmalloc((size_t)dim * sizeof(DATATYPE));
VRFY((data_orig != NULL), "data_origin1 HDmalloc succeeded");
- data_read = (DATATYPE *)HDmalloc((size_t)dim*sizeof(DATATYPE));
+ data_read = (DATATYPE *)HDmalloc((size_t)dim * sizeof(DATATYPE));
VRFY((data_read != NULL), "data_array1 HDmalloc succeeded");
/* Initialize data buffers */
- for(u=0; u<dim;u++)
- data_orig[u]=(DATATYPE)u;
+ for (u = 0; u < dim; u++)
+ data_orig[u] = (DATATYPE)u;
/* Run test both with and without filters disabled on partial chunks */
- for(disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
- disable_partial_chunk_filters++) {
+ for (disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
+ disable_partial_chunk_filters++) {
/* Process zero creates the file with a compressed, chunked dataset */
- if(mpi_rank==0) {
- hsize_t chunk_dim; /* Chunk dimensions */
+ if (mpi_rank == 0) {
+ hsize_t chunk_dim; /* Chunk dimensions */
/* Create the file */
fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
@@ -2568,18 +2520,18 @@ compress_readAll(void)
/* Use eight chunks */
chunk_dim = dim / 8;
- ret = H5Pset_chunk(dcpl, rank, &chunk_dim);
+ ret = H5Pset_chunk(dcpl, rank, &chunk_dim);
VRFY((ret >= 0), "H5Pset_chunk succeeded");
/* Set chunk options appropriately */
- if(disable_partial_chunk_filters) {
+ if (disable_partial_chunk_filters) {
ret = H5Pget_chunk_opts(dcpl, &chunk_opts);
- VRFY((ret>=0),"H5Pget_chunk_opts succeeded");
+ VRFY((ret >= 0), "H5Pget_chunk_opts succeeded");
chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
ret = H5Pset_chunk_opts(dcpl, chunk_opts);
- VRFY((ret>=0),"H5Pset_chunk_opts succeeded");
+ VRFY((ret >= 0), "H5Pset_chunk_opts succeeded");
} /* end if */
ret = H5Pset_deflate(dcpl, 9);
@@ -2590,7 +2542,8 @@ compress_readAll(void)
VRFY((dataspace > 0), "H5Screate_simple succeeded");
/* Create dataset */
- dataset = H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ dataset =
+ H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset > 0), "H5Dcreate2 succeeded");
/* Write compressed data */
@@ -2612,49 +2565,47 @@ compress_readAll(void)
MPI_Barrier(comm);
/* -------------------
- * OPEN AN HDF5 FILE
- * -------------------*/
+ * OPEN AN HDF5 FILE
+ * -------------------*/
/* setup file access template */
acc_tpl = create_faccess_plist(comm, info, facc_type);
VRFY((acc_tpl >= 0), "");
/* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl);
+ fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl);
VRFY((fid > 0), "H5Fopen succeeded");
/* Release file-access template */
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/* Open dataset with compressed chunks */
dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT);
VRFY((dataset > 0), "H5Dopen2 succeeded");
/* Try reading & writing data */
- if(dataset>0) {
+ if (dataset > 0) {
/* Create dataset transfer property list */
xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist > 0), "H5Pcreate succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* Try reading the data */
ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
VRFY((ret >= 0), "H5Dread succeeded");
/* Verify data read */
- for(u=0; u<dim; u++)
- if(data_orig[u]!=data_read[u]) {
- HDprintf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__,
- (unsigned)u,data_orig[u],(unsigned)u,data_read[u]);
+ for (u = 0; u < dim; u++)
+ if (data_orig[u] != data_read[u]) {
+ HDprintf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n", __LINE__,
+ (unsigned)u, data_orig[u], (unsigned)u, data_read[u]);
nerrors++;
}
@@ -2675,8 +2626,10 @@ compress_readAll(void)
} /* end for */
/* release data buffers */
- if(data_read) HDfree(data_read);
- if(data_orig) HDfree(data_orig);
+ if (data_read)
+ HDfree(data_read);
+ if (data_orig)
+ HDfree(data_orig);
}
#endif /* H5_HAVE_FILTER_DEFLATE */
@@ -2695,39 +2648,39 @@ compress_readAll(void)
void
none_selection_chunk(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_origin = NULL; /* data buffer */
- DATATYPE *data_array = NULL; /* data buffer */
- hsize_t chunk_dims[RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE * data_origin = NULL; /* data buffer */
+ DATATYPE * data_array = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK]; /* for hyperslab setting */
- hsize_t stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
- hsize_t mstart[RANK]; /* for data buffer in memory */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t mstart[RANK]; /* for data buffer in memory */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- HDprintf("Extend independent write test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* setup chunk-size. Make sure sizes are > 0 */
chunk_dims[0] = (hsize_t)chunkdim0;
@@ -2753,8 +2706,8 @@ none_selection_chunk(void)
* ------------------------------------------------------------- */
/* set up dataset storage chunk sizes and creation property list */
- if(VERBOSE_MED)
- HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ if (VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
@@ -2763,7 +2716,7 @@ none_selection_chunk(void)
/* setup dimensionality object */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple(RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* create an extendible dataset collectively */
@@ -2786,65 +2739,64 @@ none_selection_chunk(void)
/* allocate memory for data buffer. Only allocate enough buffer for
* each processor's data. */
- if(mpi_rank) {
- data_origin = (DATATYPE *)HDmalloc(block[0]*block[1]*sizeof(DATATYPE));
+ if (mpi_rank) {
+ data_origin = (DATATYPE *)HDmalloc(block[0] * block[1] * sizeof(DATATYPE));
VRFY((data_origin != NULL), "data_origin HDmalloc succeeded");
- data_array = (DATATYPE *)HDmalloc(block[0]*block[1]*sizeof(DATATYPE));
+ data_array = (DATATYPE *)HDmalloc(block[0] * block[1] * sizeof(DATATYPE));
VRFY((data_array != NULL), "data_array HDmalloc succeeded");
/* put some trivial data in the data_array */
mstart[0] = mstart[1] = 0;
dataset_fill(mstart, block, data_origin);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(mstart, block, data_origin);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(mstart, block, data_origin);
}
}
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* Process 0 has no selection */
- if(!mpi_rank) {
+ if (!mpi_rank) {
ret = H5Sselect_none(mem_dataspace);
VRFY((ret >= 0), "H5Sselect_none succeeded");
}
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* Process 0 has no selection */
- if(!mpi_rank) {
+ if (!mpi_rank) {
ret = H5Sselect_none(file_dataspace);
VRFY((ret >= 0), "H5Sselect_none succeeded");
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
/* write data collectively */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_origin);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* read data independently */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
- if(mpi_rank) {
+ if (mpi_rank) {
ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
}
/* -------------------------
@@ -2854,19 +2806,18 @@ none_selection_chunk(void)
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
/* write data collectively */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_origin);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* read data independently */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
- if(mpi_rank) {
+ if (mpi_rank) {
ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
}
/* release resource */
@@ -2877,7 +2828,6 @@ none_selection_chunk(void)
ret = H5Pclose(xfer_plist);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/* close dataset collectively */
ret = H5Dclose(dataset1);
VRFY((ret >= 0), "H5Dclose1 succeeded");
@@ -2888,11 +2838,12 @@ none_selection_chunk(void)
H5Fclose(fid);
/* release data buffers */
- if(data_origin) HDfree(data_origin);
- if(data_array) HDfree(data_array);
+ if (data_origin)
+ HDfree(data_origin);
+ if (data_array)
+ HDfree(data_array);
}
-
/* Function: test_actual_io_mode
*
* Purpose: tests one specific case of collective I/O and checks that the
@@ -2956,63 +2907,61 @@ none_selection_chunk(void)
* Date: 2011-04-06
*/
static void
-test_actual_io_mode(int selection_mode) {
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = -1;
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = -1;
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = -1;
- H5D_mpio_actual_io_mode_t actual_io_mode_write = -1;
- H5D_mpio_actual_io_mode_t actual_io_mode_read = -1;
- H5D_mpio_actual_io_mode_t actual_io_mode_expected = -1;
- const char * filename;
- const char * test_name;
- hbool_t direct_multi_chunk_io;
- hbool_t multi_chunk_io;
- hbool_t is_chunked;
- hbool_t is_collective;
- int mpi_size = -1;
- int mpi_rank = -1;
- int length;
- int * buffer;
- int i;
- MPI_Comm mpi_comm = MPI_COMM_NULL;
- MPI_Info mpi_info = MPI_INFO_NULL;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
- hid_t data_type = H5T_NATIVE_INT;
- hid_t fapl = -1;
- hid_t mem_space = -1;
- hid_t file_space = -1;
- hid_t dcpl = -1;
- hid_t dxpl_write = -1;
- hid_t dxpl_read = -1;
- hsize_t dims[RANK];
- hsize_t chunk_dims[RANK];
- hsize_t start[RANK];
- hsize_t stride[RANK];
- hsize_t count[RANK];
- hsize_t block[RANK];
- char message[256];
- herr_t ret;
+test_actual_io_mode(int selection_mode)
+{
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = -1;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = -1;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = -1;
+ H5D_mpio_actual_io_mode_t actual_io_mode_write = -1;
+ H5D_mpio_actual_io_mode_t actual_io_mode_read = -1;
+ H5D_mpio_actual_io_mode_t actual_io_mode_expected = -1;
+ const char * filename;
+ const char * test_name;
+ hbool_t direct_multi_chunk_io;
+ hbool_t multi_chunk_io;
+ hbool_t is_chunked;
+ hbool_t is_collective;
+ int mpi_size = -1;
+ int mpi_rank = -1;
+ int length;
+ int * buffer;
+ int i;
+ MPI_Comm mpi_comm = MPI_COMM_NULL;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t dataset = -1;
+ hid_t data_type = H5T_NATIVE_INT;
+ hid_t fapl = -1;
+ hid_t mem_space = -1;
+ hid_t file_space = -1;
+ hid_t dcpl = -1;
+ hid_t dxpl_write = -1;
+ hid_t dxpl_read = -1;
+ hsize_t dims[RANK];
+ hsize_t chunk_dims[RANK];
+ hsize_t start[RANK];
+ hsize_t stride[RANK];
+ hsize_t count[RANK];
+ hsize_t block[RANK];
+ char message[256];
+ herr_t ret;
/* Set up some flags to make some future if statements slightly more readable */
- direct_multi_chunk_io = (
- selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND ||
- selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL );
+ direct_multi_chunk_io = (selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND ||
+ selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL);
/* Note: RESET performs the same tests as MULTI_CHUNK_MIX_DISAGREE and then
* tests independent I/O
*/
- multi_chunk_io = (
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND ||
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL ||
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX ||
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE ||
- selection_mode == TEST_ACTUAL_IO_RESET );
+ multi_chunk_io =
+ (selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE || selection_mode == TEST_ACTUAL_IO_RESET);
- is_chunked = (
- selection_mode != TEST_ACTUAL_IO_CONTIGUOUS &&
- selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE);
+ is_chunked =
+ (selection_mode != TEST_ACTUAL_IO_CONTIGUOUS && selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE);
is_collective = selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE;
@@ -3041,7 +2990,7 @@ test_actual_io_mode(int selection_mode) {
/* Create the basic Space */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple (RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* Create the dataset creation plist */
@@ -3049,17 +2998,16 @@ test_actual_io_mode(int selection_mode) {
VRFY((dcpl >= 0), "dataset creation plist created successfully");
/* If we are not testing contiguous datasets */
- if(is_chunked) {
+ if (is_chunked) {
/* Set up chunk information. */
- chunk_dims[0] = dims[0]/(hsize_t)mpi_size;
+ chunk_dims[0] = dims[0] / (hsize_t)mpi_size;
chunk_dims[1] = dims[1];
- ret = H5Pset_chunk(dcpl, 2, chunk_dims);
- VRFY((ret >= 0),"chunk creation property list succeeded");
+ ret = H5Pset_chunk(dcpl, 2, chunk_dims);
+ VRFY((ret >= 0), "chunk creation property list succeeded");
}
/* Create the dataset */
- dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT,
- dcpl, H5P_DEFAULT);
+ dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
/* Create the file dataspace */
@@ -3068,7 +3016,7 @@ test_actual_io_mode(int selection_mode) {
/* Choose a selection method based on the type of I/O we want to occur,
* and also set up some selection-dependeent test info. */
- switch(selection_mode) {
+ switch (selection_mode) {
/* Independent I/O with optimization */
case TEST_ACTUAL_IO_MULTI_CHUNK_IND:
@@ -3079,9 +3027,9 @@ test_actual_io_mode(int selection_mode) {
*/
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
- test_name = "Multi Chunk - Independent";
+ test_name = "Multi Chunk - Independent";
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
- actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
break;
/* Collective I/O with optimization */
@@ -3093,9 +3041,9 @@ test_actual_io_mode(int selection_mode) {
*/
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
- test_name = "Multi Chunk - Collective";
+ test_name = "Multi Chunk - Collective";
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
- if(mpi_size > 1)
+ if (mpi_size > 1)
actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
else
actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
@@ -3112,24 +3060,25 @@ test_actual_io_mode(int selection_mode) {
* and at least one chunk independently, reporting mixed I/O.
*/
- if(mpi_rank == 0) {
- /* Select the first column */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
- } else {
+ if (mpi_rank == 0) {
+ /* Select the first column */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+ }
+ else {
/* Select the first and the nth chunk in the nth column */
- block[0] = (hsize_t)(dim0 / mpi_size);
- block[1] = (hsize_t)(dim1 / mpi_size);
- count[0] = 2;
- count[1] = 1;
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)(dim1 / mpi_size);
+ count[0] = 2;
+ count[1] = 1;
stride[0] = (hsize_t)mpi_rank * block[0];
stride[1] = 1;
- start[0] = 0;
- start[1] = (hsize_t)mpi_rank*block[1];
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank * block[1];
}
- test_name = "Multi Chunk - Mixed";
+ test_name = "Multi Chunk - Mixed";
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
- actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
break;
/* RESET tests that the properties are properly reset to defaults each time I/O is
@@ -3152,20 +3101,21 @@ test_actual_io_mode(int selection_mode) {
* collectively, and their other chunk indpendently, reporting mixed I/O.
*/
- if(mpi_rank == 0) {
- /* Select the first chunk in the first column */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
- block[0] = block[0] / (hsize_t)mpi_size;
- } else {
+ if (mpi_rank == 0) {
+ /* Select the first chunk in the first column */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+ block[0] = block[0] / (hsize_t)mpi_size;
+ }
+ else {
/* Select the first and the nth chunk in the nth column */
- block[0] = (hsize_t)(dim0 / mpi_size);
- block[1] = (hsize_t)(dim1 / mpi_size);
- count[0] = 2;
- count[1] = 1;
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)(dim1 / mpi_size);
+ count[0] = 2;
+ count[1] = 1;
stride[0] = (hsize_t)mpi_rank * block[0];
stride[1] = 1;
- start[0] = 0;
- start[1] = (hsize_t)mpi_rank*block[1];
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank * block[1];
}
/* If the testname was not already set by the RESET case */
@@ -3175,8 +3125,8 @@ test_actual_io_mode(int selection_mode) {
test_name = "Multi Chunk - Mixed (Disagreement)";
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
- if(mpi_size > 1) {
- if(mpi_rank == 0)
+ if (mpi_size > 1) {
+ if (mpi_rank == 0)
actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
else
actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
@@ -3191,9 +3141,9 @@ test_actual_io_mode(int selection_mode) {
/* Nothing special; link chunk I/O is forced in the dxpl settings. */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
- test_name = "Link Chunk";
+ test_name = "Link Chunk";
actual_chunk_opt_mode_expected = H5D_MPIO_LINK_CHUNK;
- actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
break;
/* Contiguous Dataset */
@@ -3202,23 +3152,23 @@ test_actual_io_mode(int selection_mode) {
* collective I/O */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
- test_name = "Contiguous";
+ test_name = "Contiguous";
actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
- actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE;
+ actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE;
break;
case TEST_ACTUAL_IO_NO_COLLECTIVE:
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
- test_name = "Independent";
+ test_name = "Independent";
actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
- actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
+ actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
break;
default:
- test_name = "Undefined Selection Mode";
+ test_name = "Undefined Selection Mode";
actual_chunk_opt_mode_expected = -1;
- actual_io_mode_expected = -1;
+ actual_io_mode_expected = -1;
break;
}
@@ -3228,7 +3178,7 @@ test_actual_io_mode(int selection_mode) {
/* Create a memory dataspace mirroring the dataset and select the same hyperslab
* as in the file space.
*/
- mem_space = H5Screate_simple (RANK, dims, NULL);
+ mem_space = H5Screate_simple(RANK, dims, NULL);
VRFY((mem_space >= 0), "mem_space created");
ret = H5Sselect_hyperslab(mem_space, H5S_SELECT_SET, start, stride, count, block);
@@ -3240,7 +3190,7 @@ test_actual_io_mode(int selection_mode) {
/* Allocate and initialize the buffer */
buffer = (int *)HDmalloc(sizeof(int) * (size_t)length);
VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for(i = 0; i < length; i++)
+ for (i = 0; i < length; i++)
buffer[i] = i;
/* Set up the dxpl for the write */
@@ -3248,7 +3198,7 @@ test_actual_io_mode(int selection_mode) {
VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
/* Set collective I/O properties in the dxpl. */
- if(is_collective) {
+ if (is_collective) {
/* Request collective I/O */
ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
@@ -3258,19 +3208,19 @@ test_actual_io_mode(int selection_mode) {
* multi chunk io instead of link chunk io.
* This is via deault.
*/
- if(multi_chunk_io) {
+ if (multi_chunk_io) {
/* force multi-chunk-io by threshold */
- ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned) mpi_size*2);
+ ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned)mpi_size * 2);
VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded");
/* set this to manipulate testing senario about allocating processes
* to chunks */
- ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned) 99);
+ ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned)99);
VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded");
}
/* Set directly go to multi-chunk-io without threshold calc. */
- if(direct_multi_chunk_io) {
+ if (direct_multi_chunk_io) {
/* set for multi chunk io by property*/
ret = H5Pset_dxpl_mpio_chunk_opt(dxpl_write, H5FD_MPIO_CHUNK_MULTI_IO);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
@@ -3283,43 +3233,47 @@ test_actual_io_mode(int selection_mode) {
/* Write */
ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer);
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
/* Retreive Actual io valuess */
ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
- VRFY((ret >= 0), "retriving actual io mode suceeded" );
+ VRFY((ret >= 0), "retriving actual io mode suceeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
- VRFY((ret >= 0), "retriving actual chunk opt mode succeeded" );
+ VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
/* Read */
ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
/* Retreive Actual io values */
ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
- VRFY((ret >= 0), "retriving actual io mode succeeded" );
+ VRFY((ret >= 0), "retriving actual io mode succeeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
- VRFY((ret >= 0), "retriving actual chunk opt mode succeeded" );
+ VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
/* Check write vs read */
VRFY((actual_io_mode_read == actual_io_mode_write),
- "reading and writing are the same for actual_io_mode");
+ "reading and writing are the same for actual_io_mode");
VRFY((actual_chunk_opt_mode_read == actual_chunk_opt_mode_write),
- "reading and writing are the same for actual_chunk_opt_mode");
+ "reading and writing are the same for actual_chunk_opt_mode");
/* Test values */
- if(actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t) -1 && actual_io_mode_expected != (H5D_mpio_actual_io_mode_t) -1) {
- HDsprintf(message, "Actual Chunk Opt Mode has the correct value for %s.\n",test_name);
+ if (actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t)-1 &&
+ actual_io_mode_expected != (H5D_mpio_actual_io_mode_t)-1) {
+ HDsprintf(message, "Actual Chunk Opt Mode has the correct value for %s.\n", test_name);
VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message);
- HDsprintf(message, "Actual IO Mode has the correct value for %s.\n",test_name);
+ HDsprintf(message, "Actual IO Mode has the correct value for %s.\n", test_name);
VRFY((actual_io_mode_write == actual_io_mode_expected), message);
- } else {
- HDfprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank,
- actual_chunk_opt_mode_write, actual_io_mode_write);
+ }
+ else {
+ HDfprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank, actual_chunk_opt_mode_write,
+ actual_io_mode_write);
}
/* To test that the property is succesfully reset to the default, we perform some
@@ -3339,14 +3293,14 @@ test_actual_io_mode(int selection_mode) {
/* Check Properties */
ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
- VRFY( (ret >= 0), "retriving actual io mode succeeded" );
+ VRFY((ret >= 0), "retriving actual io mode succeeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
- VRFY( (ret >= 0), "retriving actual chunk opt mode succeeded" );
+ VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
VRFY(actual_chunk_opt_mode_write == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
- "actual_chunk_opt_mode has correct value for reset write (independent)");
+ "actual_chunk_opt_mode has correct value for reset write (independent)");
VRFY(actual_io_mode_write == H5D_MPIO_NO_COLLECTIVE,
- "actual_io_mode has correct value for reset write (independent)");
+ "actual_io_mode has correct value for reset write (independent)");
/* Read */
ret = H5Dread(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_read, buffer);
@@ -3354,15 +3308,15 @@ test_actual_io_mode(int selection_mode) {
/* Check Properties */
ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
- VRFY( (ret >= 0), "retriving actual io mode succeeded" );
+ VRFY((ret >= 0), "retriving actual io mode succeeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
- VRFY( (ret >= 0), "retriving actual chunk opt mode succeeded" );
+ VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
- "actual_chunk_opt_mode has correct value for reset read (independent)");
+ "actual_chunk_opt_mode has correct value for reset read (independent)");
VRFY(actual_io_mode_read == H5D_MPIO_NO_COLLECTIVE,
- "actual_io_mode has correct value for reset read (independent)");
- }
+ "actual_io_mode has correct value for reset read (independent)");
+ }
}
/* Release some resources */
@@ -3379,7 +3333,6 @@ test_actual_io_mode(int selection_mode) {
return;
}
-
/* Function: actual_io_mode_tests
*
* Purpose: Tests all possible cases of the actual_io_mode property.
@@ -3388,7 +3341,8 @@ test_actual_io_mode(int selection_mode) {
* Date: 2011-04-06
*/
void
-actual_io_mode_tests(void) {
+actual_io_mode_tests(void)
+{
int mpi_size = -1;
int mpi_rank = -1;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -3463,7 +3417,8 @@ actual_io_mode_tests(void) {
*
* TEST_FILTERS:
* Test for using filter (checksum) as the cause of breaking collective I/O.
- * Note: TEST_FILTERS mode will not work until H5Dcreate and H5write is supported for mpio and filter feature. Use test_no_collective_cause_mode_filter() function instead.
+ * Note: TEST_FILTERS mode will not work until H5Dcreate and H5write is supported for mpio and filter
+ * feature. Use test_no_collective_cause_mode_filter() function instead.
*
*
* Programmer: Jonathan Kim
@@ -3476,34 +3431,34 @@ actual_io_mode_tests(void) {
static void
test_no_collective_cause_mode(int selection_mode)
{
- uint32_t no_collective_cause_local_write = 0;
- uint32_t no_collective_cause_local_read = 0;
- uint32_t no_collective_cause_local_expected = 0;
- uint32_t no_collective_cause_global_write = 0;
- uint32_t no_collective_cause_global_read = 0;
+ uint32_t no_collective_cause_local_write = 0;
+ uint32_t no_collective_cause_local_read = 0;
+ uint32_t no_collective_cause_local_expected = 0;
+ uint32_t no_collective_cause_global_write = 0;
+ uint32_t no_collective_cause_global_read = 0;
uint32_t no_collective_cause_global_expected = 0;
- const char * filename;
- const char * test_name;
- hbool_t is_chunked=1;
- hbool_t is_independent=0;
- int mpi_size = -1;
- int mpi_rank = -1;
+ const char *filename;
+ const char *test_name;
+ hbool_t is_chunked = 1;
+ hbool_t is_independent = 0;
+ int mpi_size = -1;
+ int mpi_rank = -1;
int length;
- int * buffer;
+ int * buffer;
int i;
MPI_Comm mpi_comm;
MPI_Info mpi_info;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
- hid_t data_type = H5T_NATIVE_INT;
- hid_t fapl = -1;
- hid_t dcpl = -1;
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t dataset = -1;
+ hid_t data_type = H5T_NATIVE_INT;
+ hid_t fapl = -1;
+ hid_t dcpl = -1;
hid_t dxpl_write = -1;
- hid_t dxpl_read = -1;
+ hid_t dxpl_read = -1;
hsize_t dims[RANK];
- hid_t mem_space = -1;
+ hid_t mem_space = -1;
hid_t file_space = -1;
hsize_t chunk_dims[RANK];
herr_t ret;
@@ -3511,7 +3466,7 @@ test_no_collective_cause_mode(int selection_mode)
H5Z_filter_t filter_info;
#endif /* LATER */
/* set to global value as default */
- int l_facc_type = facc_type;
+ int l_facc_type = facc_type;
char message[256];
/* Set up MPI parameters */
@@ -3530,27 +3485,29 @@ test_no_collective_cause_mode(int selection_mode)
VRFY((dcpl >= 0), "dataset creation plist created successfully");
if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) {
- ret = H5Pset_layout (dcpl, H5D_COMPACT);
- VRFY((ret >= 0),"set COMPACT layout succeeded");
+ ret = H5Pset_layout(dcpl, H5D_COMPACT);
+ VRFY((ret >= 0), "set COMPACT layout succeeded");
is_chunked = 0;
}
if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
- ret = H5Pset_external (dcpl, FILE_EXTERNAL, (off_t) 0, H5F_UNLIMITED);
- VRFY((ret >= 0),"set EXTERNAL file layout succeeded");
+ ret = H5Pset_external(dcpl, FILE_EXTERNAL, (off_t)0, H5F_UNLIMITED);
+ VRFY((ret >= 0), "set EXTERNAL file layout succeeded");
is_chunked = 0;
}
#ifdef LATER /* fletcher32 */
if (selection_mode & TEST_FILTERS) {
ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32);
- VRFY ((ret >=0 ), "Fletcher32 filter is available.\n");
+ VRFY((ret >= 0), "Fletcher32 filter is available.\n");
- ret = H5Zget_filter_info (H5Z_FILTER_FLETCHER32, &filter_info);
- VRFY ( ( (filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) || (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED) ) , "Fletcher32 filter encoding and decoding available.\n");
+ ret = H5Zget_filter_info(H5Z_FILTER_FLETCHER32, &filter_info);
+ VRFY(((filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) ||
+ (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED)),
+ "Fletcher32 filter encoding and decoding available.\n");
ret = H5Pset_fletcher32(dcpl);
- VRFY((ret >= 0),"set filter (flecher32) succeeded");
+ VRFY((ret >= 0), "set filter (flecher32) succeeded");
}
#endif /* LATER */
@@ -3570,11 +3527,10 @@ test_no_collective_cause_mode(int selection_mode)
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
}
- sid = H5Screate_simple (RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
}
-
filename = (const char *)GetTestParameters();
HDassert(filename != NULL);
@@ -3588,20 +3544,18 @@ test_no_collective_cause_mode(int selection_mode)
VRFY((fid >= 0), "H5Fcreate succeeded");
/* If we are not testing contiguous datasets */
- if(is_chunked) {
+ if (is_chunked) {
/* Set up chunk information. */
- chunk_dims[0] = dims[0]/(hsize_t)mpi_size;
+ chunk_dims[0] = dims[0] / (hsize_t)mpi_size;
chunk_dims[1] = dims[1];
- ret = H5Pset_chunk(dcpl, 2, chunk_dims);
- VRFY((ret >= 0),"chunk creation property list succeeded");
+ ret = H5Pset_chunk(dcpl, 2, chunk_dims);
+ VRFY((ret >= 0), "chunk creation property list succeeded");
}
-
/* Create the dataset */
dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
-
/*
* Set expected causes and some tweaks based on the type of test
*/
@@ -3641,14 +3595,14 @@ test_no_collective_cause_mode(int selection_mode)
#endif /* LATER */
if (selection_mode & TEST_COLLECTIVE) {
- test_name = "Broken Collective I/O - Not Broken";
- no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE;
+ test_name = "Broken Collective I/O - Not Broken";
+ no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE;
no_collective_cause_global_expected = H5D_MPIO_COLLECTIVE;
}
if (selection_mode & TEST_SET_INDEPENDENT) {
- test_name = "Broken Collective I/O - Independent";
- no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT;
+ test_name = "Broken Collective I/O - Independent";
+ no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT;
no_collective_cause_global_expected = H5D_MPIO_SET_INDEPENDENT;
/* switch to independent io */
is_independent = 1;
@@ -3658,7 +3612,7 @@ test_no_collective_cause_mode(int selection_mode)
if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES ||
selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
file_space = H5S_ALL;
- mem_space = H5S_ALL;
+ mem_space = H5S_ALL;
}
else {
/* Get the file dataspace */
@@ -3666,7 +3620,7 @@ test_no_collective_cause_mode(int selection_mode)
VRFY((file_space >= 0), "H5Dget_space succeeded");
/* Create the memory dataspace */
- mem_space = H5Screate_simple (RANK, dims, NULL);
+ mem_space = H5Screate_simple(RANK, dims, NULL);
VRFY((mem_space >= 0), "mem_space created");
}
@@ -3676,14 +3630,14 @@ test_no_collective_cause_mode(int selection_mode)
/* Allocate and initialize the buffer */
buffer = (int *)HDmalloc(sizeof(int) * (size_t)length);
VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for(i = 0; i < length; i++)
+ for (i = 0; i < length; i++)
buffer[i] = i;
/* Set up the dxpl for the write */
dxpl_write = H5Pcreate(H5P_DATASET_XFER);
VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
- if(is_independent) {
+ if (is_independent) {
/* Set Independent I/O */
ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
@@ -3692,11 +3646,10 @@ test_no_collective_cause_mode(int selection_mode)
/* Set Collective I/O */
ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
}
if (selection_mode & TEST_DATA_TRANSFORMS) {
- ret = H5Pset_data_transform (dxpl_write, "x+1");
+ ret = H5Pset_data_transform(dxpl_write, "x+1");
VRFY((ret >= 0), "H5Pset_data_transform succeeded");
}
@@ -3706,14 +3659,14 @@ test_no_collective_cause_mode(int selection_mode)
/* Write */
ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer);
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
-
/* Get the cause of broken collective I/O */
- ret = H5Pget_mpio_no_collective_cause (dxpl_write, &no_collective_cause_local_write, &no_collective_cause_global_write);
- VRFY((ret >= 0), "retriving no collective cause succeeded" );
-
+ ret = H5Pget_mpio_no_collective_cause(dxpl_write, &no_collective_cause_local_write,
+ &no_collective_cause_global_write);
+ VRFY((ret >= 0), "retriving no collective cause succeeded");
/*---------------------
* Test Read access
@@ -3726,25 +3679,27 @@ test_no_collective_cause_mode(int selection_mode)
/* Read */
ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
/* Get the cause of broken collective I/O */
- ret = H5Pget_mpio_no_collective_cause (dxpl_read, &no_collective_cause_local_read, &no_collective_cause_global_read);
- VRFY((ret >= 0), "retriving no collective cause succeeded" );
+ ret = H5Pget_mpio_no_collective_cause(dxpl_read, &no_collective_cause_local_read,
+ &no_collective_cause_global_read);
+ VRFY((ret >= 0), "retriving no collective cause succeeded");
/* Check write vs read */
VRFY((no_collective_cause_local_read == no_collective_cause_local_write),
- "reading and writing are the same for local cause of Broken Collective I/O");
+ "reading and writing are the same for local cause of Broken Collective I/O");
VRFY((no_collective_cause_global_read == no_collective_cause_global_write),
- "reading and writing are the same for global cause of Broken Collective I/O");
+ "reading and writing are the same for global cause of Broken Collective I/O");
/* Test values */
- HDmemset (message, 0, sizeof (message));
- HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset(message, 0, sizeof(message));
+ HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name);
VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message);
- HDmemset (message, 0, sizeof (message));
- HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset(message, 0, sizeof(message));
+ HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message);
/* Release some resources */
@@ -3775,7 +3730,6 @@ test_no_collective_cause_mode(int selection_mode)
return;
}
-
/*
* Function: test_no_collective_cause_mode_filter
*
@@ -3800,31 +3754,31 @@ test_no_collective_cause_mode(int selection_mode)
static void
test_no_collective_cause_mode_filter(int selection_mode)
{
- uint32_t no_collective_cause_local_read = 0;
- uint32_t no_collective_cause_local_expected = 0;
- uint32_t no_collective_cause_global_read = 0;
+ uint32_t no_collective_cause_local_read = 0;
+ uint32_t no_collective_cause_local_expected = 0;
+ uint32_t no_collective_cause_global_read = 0;
uint32_t no_collective_cause_global_expected = 0;
- const char * filename;
- const char * test_name = "I/O";
- hbool_t is_chunked=1;
- int mpi_size = -1;
- int mpi_rank = -1;
+ const char *filename;
+ const char *test_name = "I/O";
+ hbool_t is_chunked = 1;
+ int mpi_size = -1;
+ int mpi_rank = -1;
int length;
- int * buffer;
+ int * buffer;
int i;
- MPI_Comm mpi_comm = MPI_COMM_NULL;
- MPI_Info mpi_info = MPI_INFO_NULL;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
- hid_t data_type = H5T_NATIVE_INT;
+ MPI_Comm mpi_comm = MPI_COMM_NULL;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t dataset = -1;
+ hid_t data_type = H5T_NATIVE_INT;
hid_t fapl_write = -1;
- hid_t fapl_read = -1;
- hid_t dcpl = -1;
- hid_t dxpl = -1;
+ hid_t fapl_read = -1;
+ hid_t dcpl = -1;
+ hid_t dxpl = -1;
hsize_t dims[RANK];
- hid_t mem_space = -1;
+ hid_t mem_space = -1;
hid_t file_space = -1;
hsize_t chunk_dims[RANK];
herr_t ret;
@@ -3848,29 +3802,30 @@ test_no_collective_cause_mode_filter(int selection_mode)
dcpl = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dcpl >= 0), "dataset creation plist created successfully");
- if (selection_mode == TEST_FILTERS_READ ) {
+ if (selection_mode == TEST_FILTERS_READ) {
#ifdef LATER /* fletcher32 */
- ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32);
- VRFY ((ret >=0 ), "Fletcher32 filter is available.\n");
+ ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32);
+ VRFY((ret >= 0), "Fletcher32 filter is available.\n");
- ret = H5Zget_filter_info (H5Z_FILTER_FLETCHER32, (unsigned int *) &filter_info);
- VRFY ( ( (filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) || (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED) ) , "Fletcher32 filter encoding and decoding available.\n");
+ ret = H5Zget_filter_info(H5Z_FILTER_FLETCHER32, (unsigned int *)&filter_info);
+ VRFY(((filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) ||
+ (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED)),
+ "Fletcher32 filter encoding and decoding available.\n");
- ret = H5Pset_fletcher32(dcpl);
- VRFY((ret >= 0),"set filter (flecher32) succeeded");
+ ret = H5Pset_fletcher32(dcpl);
+ VRFY((ret >= 0), "set filter (flecher32) succeeded");
#endif /* LATER */
}
- else {
+ else {
VRFY(0, "Unexpected mode, only test for TEST_FILTERS_READ.");
}
/* Create the basic Space */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple (RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
-
filename = (const char *)GetTestParameters();
HDassert(filename != NULL);
@@ -3882,23 +3837,22 @@ test_no_collective_cause_mode_filter(int selection_mode)
VRFY((fid >= 0), "H5Fcreate succeeded");
/* If we are not testing contiguous datasets */
- if(is_chunked) {
+ if (is_chunked) {
/* Set up chunk information. */
- chunk_dims[0] = dims[0]/(hsize_t)mpi_size;
+ chunk_dims[0] = dims[0] / (hsize_t)mpi_size;
chunk_dims[1] = dims[1];
- ret = H5Pset_chunk(dcpl, 2, chunk_dims);
- VRFY((ret >= 0),"chunk creation property list succeeded");
+ ret = H5Pset_chunk(dcpl, 2, chunk_dims);
+ VRFY((ret >= 0), "chunk creation property list succeeded");
}
-
/* Create the dataset */
dataset = H5Dcreate2(fid, DSET_NOCOLCAUSE, data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
#ifdef LATER /* fletcher32 */
/* Set expected cause */
- test_name = "Broken Collective I/O - Filter is required";
- no_collective_cause_local_expected = H5D_MPIO_FILTERS;
+ test_name = "Broken Collective I/O - Filter is required";
+ no_collective_cause_local_expected = H5D_MPIO_FILTERS;
no_collective_cause_global_expected = H5D_MPIO_FILTERS;
#endif /* LATER */
@@ -3907,7 +3861,7 @@ test_no_collective_cause_mode_filter(int selection_mode)
VRFY((file_space >= 0), "H5Dget_space succeeded");
/* Create the memory dataspace */
- mem_space = H5Screate_simple (RANK, dims, NULL);
+ mem_space = H5Screate_simple(RANK, dims, NULL);
VRFY((mem_space >= 0), "mem_space created");
/* Get the number of elements in the selection */
@@ -3916,33 +3870,32 @@ test_no_collective_cause_mode_filter(int selection_mode)
/* Allocate and initialize the buffer */
buffer = (int *)HDmalloc(sizeof(int) * length);
VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for(i = 0; i < length; i++)
+ for (i = 0; i < length; i++)
buffer[i] = i;
/* Set up the dxpl for the write */
dxpl = H5Pcreate(H5P_DATASET_XFER);
VRFY((dxpl >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
- if (selection_mode == TEST_FILTERS_READ) {
+ if (selection_mode == TEST_FILTERS_READ) {
/* To test read in collective I/O mode , write in independent mode
* because write fails with mpio + filter */
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
}
- else {
+ else {
/* To test write in collective I/O mode. */
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
}
-
/* Write */
ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl, buffer);
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
-
/* Make a copy of the dxpl to test the read operation */
dxpl = H5Pcopy(dxpl);
VRFY((dxpl >= 0), "H5Pcopy succeeded");
@@ -3954,7 +3907,6 @@ test_no_collective_cause_mode_filter(int selection_mode)
if (fid)
H5Fclose(fid);
-
/*---------------------
* Test Read access
*---------------------*/
@@ -3963,8 +3915,8 @@ test_no_collective_cause_mode_filter(int selection_mode)
fapl_read = create_faccess_plist(mpi_comm, mpi_info, facc_type);
VRFY((fapl_read >= 0), "create_faccess_plist() succeeded");
- fid = H5Fopen (filename, H5F_ACC_RDONLY, fapl_read);
- dataset = H5Dopen2 (fid, DSET_NOCOLCAUSE, H5P_DEFAULT);
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl_read);
+ dataset = H5Dopen2(fid, DSET_NOCOLCAUSE, H5P_DEFAULT);
/* Set collective I/O properties in the dxpl. */
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
@@ -3973,19 +3925,21 @@ test_no_collective_cause_mode_filter(int selection_mode)
/* Read */
ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl, buffer);
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
/* Get the cause of broken collective I/O */
- ret = H5Pget_mpio_no_collective_cause (dxpl, &no_collective_cause_local_read, &no_collective_cause_global_read);
- VRFY((ret >= 0), "retriving no collective cause succeeded" );
+ ret = H5Pget_mpio_no_collective_cause(dxpl, &no_collective_cause_local_read,
+ &no_collective_cause_global_read);
+ VRFY((ret >= 0), "retriving no collective cause succeeded");
/* Test values */
- HDmemset (message, 0, sizeof (message));
- HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset(message, 0, sizeof(message));
+ HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name);
VRFY((no_collective_cause_local_read == (uint32_t)no_collective_cause_local_expected), message);
- HDmemset (message, 0, sizeof (message));
- HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset(message, 0, sizeof(message));
+ HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
VRFY((no_collective_cause_global_read == (uint32_t)no_collective_cause_global_expected), message);
/* Release some resources */
@@ -4023,27 +3977,28 @@ no_collective_cause_tests(void)
/*
* Test individual cause
*/
- test_no_collective_cause_mode (TEST_COLLECTIVE);
- test_no_collective_cause_mode (TEST_SET_INDEPENDENT);
- test_no_collective_cause_mode (TEST_DATATYPE_CONVERSION);
- test_no_collective_cause_mode (TEST_DATA_TRANSFORMS);
- test_no_collective_cause_mode (TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES);
- test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
- test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
+ test_no_collective_cause_mode(TEST_COLLECTIVE);
+ test_no_collective_cause_mode(TEST_SET_INDEPENDENT);
+ test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION);
+ test_no_collective_cause_mode(TEST_DATA_TRANSFORMS);
+ test_no_collective_cause_mode(TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
#ifdef LATER /* fletcher32 */
- /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
- * H5Dwrite is ready for mpio + filter feature.
- */
+ /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
+ * H5Dwrite is ready for mpio + filter feature.
+ */
/* test_no_collective_cause_mode (TEST_FILTERS); */
- test_no_collective_cause_mode_filter (TEST_FILTERS_READ);
+ test_no_collective_cause_mode_filter(TEST_FILTERS_READ);
#endif /* LATER */
/*
* Test combined causes
*/
- test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION);
- test_no_collective_cause_mode (TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
- test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION);
+ test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION |
+ TEST_DATA_TRANSFORMS);
return;
}
@@ -4062,41 +4017,42 @@ no_collective_cause_tests(void)
void
dataset_atomicity(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t dataset1; /* Dataset IDs */
- hsize_t dims[RANK]; /* dataset dim sizes */
- int *write_buf = NULL; /* data buffer */
- int *read_buf = NULL; /* data buffer */
- int buf_size;
- hid_t dataset2;
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* Memory dataspace ID */
- hsize_t start[RANK];
- hsize_t stride[RANK];
- hsize_t count[RANK];
- hsize_t block[RANK];
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t dataset1; /* Dataset IDs */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ int * write_buf = NULL; /* data buffer */
+ int * read_buf = NULL; /* data buffer */
+ int buf_size;
+ hid_t dataset2;
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* Memory dataspace ID */
+ hsize_t start[RANK];
+ hsize_t stride[RANK];
+ hsize_t count[RANK];
+ hsize_t block[RANK];
const char *filename;
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
- int i, j, k;
- hbool_t atomicity = FALSE;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- dim0 = 64; dim1 = 32;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+ int i, j, k;
+ hbool_t atomicity = FALSE;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ dim0 = 64;
+ dim1 = 32;
filename = GetTestParameters();
if (facc_type != FACC_MPIO) {
HDprintf("Atomicity tests will not work without the MPIO VFD\n");
return;
}
- if(VERBOSE_MED)
+ if (VERBOSE_MED)
HDprintf("atomic writes to file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
buf_size = dim0 * dim1;
/* allocate memory for data buffer */
@@ -4121,26 +4077,22 @@ dataset_atomicity(void)
/* setup dimensionality object */
dims[0] = (hsize_t)dim0;
dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple (RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* create datasets */
- dataset1 = H5Dcreate2(fid, DATASETNAME5, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dataset1 = H5Dcreate2(fid, DATASETNAME5, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
- dataset2 = H5Dcreate2(fid, DATASETNAME6, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dataset2 = H5Dcreate2(fid, DATASETNAME6, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
/* initialize datasets to 0s */
if (0 == mpi_rank) {
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
- H5P_DEFAULT, write_buf);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf);
VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
- H5P_DEFAULT, write_buf);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
}
@@ -4153,39 +4105,39 @@ dataset_atomicity(void)
ret = H5Fclose(fid);
VRFY((ret >= 0), "H5Fclose succeeded");
- MPI_Barrier (comm);
+ MPI_Barrier(comm);
/* make sure setting atomicity fails on a serial file ID */
/* file locking allows only one file open (serial) for writing */
- if(MAINPROCESS){
- fid=H5Fopen(filename,H5F_ACC_RDWR,H5P_DEFAULT);
+ if (MAINPROCESS) {
+ fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
VRFY((fid >= 0), "H5Fopen succeeed");
}
/* should fail */
- ret = H5Fset_mpi_atomicity(fid , TRUE);
+ ret = H5Fset_mpi_atomicity(fid, TRUE);
VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed");
- if(MAINPROCESS){
+ if (MAINPROCESS) {
ret = H5Fclose(fid);
VRFY((ret >= 0), "H5Fclose succeeded");
}
- MPI_Barrier (comm);
+ MPI_Barrier(comm);
/* setup file access template */
acc_tpl = create_faccess_plist(comm, info, facc_type);
VRFY((acc_tpl >= 0), "");
/* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl);
+ fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl);
VRFY((fid >= 0), "H5Fopen succeeded");
/* Release file-access template */
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Fset_mpi_atomicity(fid , TRUE);
+ ret = H5Fset_mpi_atomicity(fid, TRUE);
VRFY((ret >= 0), "H5Fset_mpi_atomicity succeeded");
/* open dataset1 (contiguous case) */
@@ -4193,22 +4145,22 @@ dataset_atomicity(void)
VRFY((dataset1 >= 0), "H5Dopen2 succeeded");
if (0 == mpi_rank) {
- for (i=0 ; i<buf_size ; i++) {
+ for (i = 0; i < buf_size; i++) {
write_buf[i] = 5;
}
}
else {
- for (i=0 ; i<buf_size ; i++) {
+ for (i = 0; i < buf_size; i++) {
read_buf[i] = 8;
}
}
/* check that the atomicity flag is set */
- ret = H5Fget_mpi_atomicity(fid , &atomicity);
+ ret = H5Fget_mpi_atomicity(fid, &atomicity);
VRFY((ret >= 0), "atomcity get failed");
VRFY((atomicity == TRUE), "atomcity set failed");
- MPI_Barrier (comm);
+ MPI_Barrier(comm);
/* Process 0 writes contiguously to the entire dataset */
if (0 == mpi_rank) {
@@ -4221,12 +4173,14 @@ dataset_atomicity(void)
VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
}
- if(VERBOSE_MED) {
- i=0;j=0;k=0;
- for (i=0 ; i<dim0 ; i++) {
- HDprintf ("\n");
- for (j=0 ; j<dim1 ; j++)
- HDprintf ("%d ", read_buf[k++]);
+ if (VERBOSE_MED) {
+ i = 0;
+ j = 0;
+ k = 0;
+ for (i = 0; i < dim0; i++) {
+ HDprintf("\n");
+ for (j = 0; j < dim1; j++)
+ HDprintf("%d ", read_buf[k++]);
}
}
@@ -4238,10 +4192,11 @@ dataset_atomicity(void)
VRFY((compare == 0 || compare == 5),
"Atomicity Test Failed Process %d: Value read should be 0 or 5\n");
- for (i=1; i<buf_size; i++) {
+ for (i = 1; i < buf_size; i++) {
if (read_buf[i] != compare) {
- HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i, read_buf[i], compare);
- nerrors ++;
+ HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i,
+ read_buf[i], compare);
+ nerrors++;
}
}
}
@@ -4250,8 +4205,10 @@ dataset_atomicity(void)
VRFY((ret >= 0), "H5D close succeeded");
/* release data buffers */
- if(write_buf) HDfree(write_buf);
- if(read_buf) HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (read_buf)
+ HDfree(read_buf);
/* open dataset2 (non-contiguous case) */
dataset2 = H5Dopen2(fid, DATASETNAME6, H5P_DEFAULT);
@@ -4264,69 +4221,68 @@ dataset_atomicity(void)
read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
VRFY((read_buf != NULL), "read_buf HDcalloc succeeded");
- for (i=0 ; i<buf_size ; i++) {
+ for (i = 0; i < buf_size; i++) {
write_buf[i] = 5;
}
- for (i=0 ; i<buf_size ; i++) {
+ for (i = 0; i < buf_size; i++) {
read_buf[i] = 8;
}
atomicity = FALSE;
/* check that the atomicity flag is set */
- ret = H5Fget_mpi_atomicity(fid , &atomicity);
+ ret = H5Fget_mpi_atomicity(fid, &atomicity);
VRFY((ret >= 0), "atomcity get failed");
VRFY((atomicity == TRUE), "atomcity set failed");
-
- block[0] = (hsize_t)(dim0/mpi_size - 1);
- block[1] = (hsize_t)(dim1/mpi_size - 1);
+ block[0] = (hsize_t)(dim0 / mpi_size - 1);
+ block[1] = (hsize_t)(dim1 / mpi_size - 1);
stride[0] = block[0] + 1;
stride[1] = block[1] + 1;
- count[0] = (hsize_t)mpi_size;
- count[1] = (hsize_t)mpi_size;
- start[0] = 0;
- start[1] = 0;
+ count[0] = (hsize_t)mpi_size;
+ count[1] = (hsize_t)mpi_size;
+ start[0] = 0;
+ start[1] = 0;
/* create a file dataspace */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace */
- mem_dataspace = H5Screate_simple (RANK, dims, NULL);
+ mem_dataspace = H5Screate_simple(RANK, dims, NULL);
VRFY((mem_dataspace >= 0), "");
ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
- MPI_Barrier (comm);
+ MPI_Barrier(comm);
/* Process 0 writes to the dataset */
if (0 == mpi_rank) {
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, write_buf);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, write_buf);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
}
/* All processes wait for the write to finish. This works because
atomicity is set to true */
- MPI_Barrier (comm);
+ MPI_Barrier(comm);
/* The other processes read the entire dataset */
if (0 != mpi_rank) {
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, read_buf);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, read_buf);
VRFY((ret >= 0), "H5Dread dataset2 succeeded");
}
- if(VERBOSE_MED) {
+ if (VERBOSE_MED) {
if (mpi_rank == 1) {
- i=0;j=0;k=0;
- for (i=0 ; i<dim0 ; i++) {
- HDprintf ("\n");
- for (j=0 ; j<dim1 ; j++)
- HDprintf ("%d ", read_buf[k++]);
+ i = 0;
+ j = 0;
+ k = 0;
+ for (i = 0; i < dim0; i++) {
+ HDprintf("\n");
+ for (j = 0; j < dim1; j++)
+ HDprintf("%d ", read_buf[k++]);
}
- HDprintf ("\n");
+ HDprintf("\n");
}
}
@@ -4334,34 +4290,37 @@ dataset_atomicity(void)
as 5 (read happened after process 0 wrote to dataset 1) */
if (0 != mpi_rank) {
int compare;
- i=0;j=0;k=0;
+ i = 0;
+ j = 0;
+ k = 0;
compare = 5;
H5_CHECK_OVERFLOW(block[0], hsize_t, int);
H5_CHECK_OVERFLOW(block[1], hsize_t, int);
- for (i=0 ; i<dim0 ; i++) {
- if (i >= mpi_rank*((int)block[0]+1)) {
+ for (i = 0; i < dim0; i++) {
+ if (i >= mpi_rank * ((int)block[0] + 1)) {
break;
}
- if ((i+1)%((int)block[0]+1)==0) {
+ if ((i + 1) % ((int)block[0] + 1) == 0) {
k += dim1;
continue;
}
- for (j=0 ; j<dim1 ; j++) {
- if (j >= mpi_rank*((int)block[1]+1)) {
- k += dim1 - mpi_rank*((int)block[1]+1);
+ for (j = 0; j < dim1; j++) {
+ if (j >= mpi_rank * ((int)block[1] + 1)) {
+ k += dim1 - mpi_rank * ((int)block[1] + 1);
break;
}
- if ((j+1)%((int)block[1]+1)==0) {
+ if ((j + 1) % ((int)block[1] + 1) == 0) {
k++;
continue;
}
else if (compare != read_buf[k]) {
- HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, k, read_buf[k], compare);
+ HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank,
+ k, read_buf[k], compare);
nerrors++;
}
- k ++;
+ k++;
}
}
}
@@ -4374,12 +4333,13 @@ dataset_atomicity(void)
VRFY((ret >= 0), "H5Sclose succeeded");
/* release data buffers */
- if(write_buf) HDfree(write_buf);
- if(read_buf) HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (read_buf)
+ HDfree(read_buf);
ret = H5Fclose(fid);
VRFY((ret >= 0), "H5Fclose succeeded");
-
}
/* Function: dense_attr_test
@@ -4392,21 +4352,21 @@ dataset_atomicity(void)
void
test_dense_attr(void)
{
- int mpi_size, mpi_rank;
- hid_t fpid, fid;
- hid_t gid, gpid;
- hid_t atFileSpace, atid;
- hsize_t atDims[1] = {10000};
- herr_t status;
+ int mpi_size, mpi_rank;
+ hid_t fpid, fid;
+ hid_t gid, gpid;
+ hid_t atFileSpace, atid;
+ hsize_t atDims[1] = {10000};
+ herr_t status;
const char *filename;
/* get filename */
filename = (const char *)GetTestParameters();
- HDassert( filename != NULL );
+ HDassert(filename != NULL);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
fpid = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fpid > 0), "H5Pcreate succeeded");
@@ -4445,4 +4405,3 @@ test_dense_attr(void)
return;
}
-
diff --git a/testpar/t_file.c b/testpar/t_file.c
index 19a75c8..4e652bf 100644
--- a/testpar/t_file.c
+++ b/testpar/t_file.c
@@ -17,30 +17,30 @@
#include "testphdf5.h"
-#include "H5CXprivate.h" /* API Contexts */
+#include "H5CXprivate.h" /* API Contexts */
#include "H5Iprivate.h"
#include "H5PBprivate.h"
/*
* This file needs to access private information from the H5F package.
*/
-#define H5AC_FRIEND /*suppress error about including H5ACpkg */
+#define H5AC_FRIEND /*suppress error about including H5ACpkg */
#include "H5ACpkg.h"
-#define H5C_FRIEND /*suppress error about including H5Cpkg */
+#define H5C_FRIEND /*suppress error about including H5Cpkg */
#include "H5Cpkg.h"
-#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
#define H5F_TESTING
#include "H5Fpkg.h"
-#define H5MF_FRIEND /*suppress error about including H5MFpkg */
+#define H5MF_FRIEND /*suppress error about including H5MFpkg */
#include "H5MFpkg.h"
-#define NUM_DSETS 5
+#define NUM_DSETS 5
int mpi_size, mpi_rank;
static int create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy);
-static int open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
- hsize_t page_size, size_t page_buffer_size);
+static int open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t page_size,
+ size_t page_buffer_size);
/*
* test file access by communicator besides COMM_WORLD.
@@ -56,79 +56,79 @@ static int open_file(const char *filename, hid_t fapl, int metadata_write_strate
void
test_split_comm_access(void)
{
- MPI_Comm comm;
- MPI_Info info = MPI_INFO_NULL;
- int is_old, mrc;
- int newrank, newprocs;
- hid_t fid; /* file IDs */
- hid_t acc_tpl; /* File access properties */
- herr_t ret; /* generic return value */
+ MPI_Comm comm;
+ MPI_Info info = MPI_INFO_NULL;
+ int is_old, mrc;
+ int newrank, newprocs;
+ hid_t fid; /* file IDs */
+ hid_t acc_tpl; /* File access properties */
+ herr_t ret; /* generic return value */
const char *filename;
filename = (const char *)GetTestParameters();
if (VERBOSE_MED)
- HDprintf("Split Communicator access test on file %s\n",
- filename);
+ HDprintf("Split Communicator access test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- is_old = mpi_rank%2;
- mrc = MPI_Comm_split(MPI_COMM_WORLD, is_old, mpi_rank, &comm);
- VRFY((mrc==MPI_SUCCESS), "");
- MPI_Comm_size(comm,&newprocs);
- MPI_Comm_rank(comm,&newrank);
-
- if (is_old){
- /* odd-rank processes */
- mrc = MPI_Barrier(comm);
- VRFY((mrc==MPI_SUCCESS), "");
- }else{
- /* even-rank processes */
- int sub_mpi_rank; /* rank in the sub-comm */
- MPI_Comm_rank(comm,&sub_mpi_rank);
-
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
-
- /* create the file collectively */
- fid=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- /* Release file-access template */
- ret=H5Pclose(acc_tpl);
- VRFY((ret >= 0), "");
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ is_old = mpi_rank % 2;
+ mrc = MPI_Comm_split(MPI_COMM_WORLD, is_old, mpi_rank, &comm);
+ VRFY((mrc == MPI_SUCCESS), "");
+ MPI_Comm_size(comm, &newprocs);
+ MPI_Comm_rank(comm, &newrank);
+
+ if (is_old) {
+ /* odd-rank processes */
+ mrc = MPI_Barrier(comm);
+ VRFY((mrc == MPI_SUCCESS), "");
+ }
+ else {
+ /* even-rank processes */
+ int sub_mpi_rank; /* rank in the sub-comm */
+ MPI_Comm_rank(comm, &sub_mpi_rank);
- /* close the file */
- ret=H5Fclose(fid);
- VRFY((ret >= 0), "");
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
- /* delete the test file */
- if (sub_mpi_rank == 0){
- mrc = MPI_File_delete((char *)filename, info);
- /*VRFY((mrc==MPI_SUCCESS), ""); */
- }
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* close the file */
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "");
+
+ /* delete the test file */
+ if (sub_mpi_rank == 0) {
+ mrc = MPI_File_delete((char *)filename, info);
+ /*VRFY((mrc==MPI_SUCCESS), ""); */
+ }
}
mrc = MPI_Comm_free(&comm);
- VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free succeeded");
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free succeeded");
mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc==MPI_SUCCESS), "final MPI_Barrier succeeded");
+ VRFY((mrc == MPI_SUCCESS), "final MPI_Barrier succeeded");
}
void
test_page_buffer_access(void)
{
- hid_t file_id = -1; /* File ID */
- hid_t fcpl, fapl;
- size_t page_count = 0;
- int i, num_elements = 200;
- haddr_t raw_addr, meta_addr;
- int *data;
- H5F_t *f = NULL;
- herr_t ret; /* generic return value */
+ hid_t file_id = -1; /* File ID */
+ hid_t fcpl, fapl;
+ size_t page_count = 0;
+ int i, num_elements = 200;
+ haddr_t raw_addr, meta_addr;
+ int * data;
+ H5F_t * f = NULL;
+ herr_t ret; /* generic return value */
const char *filename;
- hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
+ hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -136,7 +136,7 @@ test_page_buffer_access(void)
filename = (const char *)GetTestParameters();
if (VERBOSE_MED)
- HDprintf("Page Buffer Usage in Parallel %s\n", filename);
+ HDprintf("Page Buffer Usage in Parallel %s\n", filename);
fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
VRFY((fapl >= 0), "create_faccess_plist succeeded");
@@ -145,15 +145,14 @@ test_page_buffer_access(void)
ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)0);
VRFY((ret == 0), "");
- ret = H5Pset_file_space_page_size(fcpl, sizeof(int)*128);
+ ret = H5Pset_file_space_page_size(fcpl, sizeof(int) * 128);
VRFY((ret == 0), "");
- ret = H5Pset_page_buffer_size(fapl, sizeof(int)*100000, 0, 0);
+ ret = H5Pset_page_buffer_size(fapl, sizeof(int) * 100000, 0, 0);
VRFY((ret == 0), "");
/* This should fail because collective metadata writes are not supported with page buffering */
- H5E_BEGIN_TRY {
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
- } H5E_END_TRY;
+ H5E_BEGIN_TRY { file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); }
+ H5E_END_TRY;
VRFY((file_id < 0), "H5Fcreate failed");
/* disable collective metadata writes for page buffering to work */
@@ -162,27 +161,29 @@ test_page_buffer_access(void)
ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
VRFY((ret == 0), "");
- ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, sizeof(int)*100, sizeof(int)*100000);
+ ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, sizeof(int) * 100,
+ sizeof(int) * 100000);
VRFY((ret == 0), "");
ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
VRFY((ret == 0), "");
- ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, sizeof(int)*100, sizeof(int)*100000);
+ ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, sizeof(int) * 100,
+ sizeof(int) * 100000);
VRFY((ret == 0), "");
- ret = H5Pset_file_space_page_size(fcpl, sizeof(int)*100);
+ ret = H5Pset_file_space_page_size(fcpl, sizeof(int) * 100);
VRFY((ret == 0), "");
- data = (int *) HDmalloc(sizeof(int)*(size_t)num_elements);
+ data = (int *)HDmalloc(sizeof(int) * (size_t)num_elements);
/* intialize all the elements to have a value of -1 */
- for(i=0 ; i<num_elements ; i++)
+ for (i = 0; i < num_elements; i++)
data[i] = -1;
- if(MAINPROCESS) {
+ if (MAINPROCESS) {
hid_t fapl_self = H5I_INVALID_HID;
- fapl_self = create_faccess_plist(MPI_COMM_SELF, MPI_INFO_NULL, facc_type);
+ fapl_self = create_faccess_plist(MPI_COMM_SELF, MPI_INFO_NULL, facc_type);
- ret = H5Pset_page_buffer_size(fapl_self, sizeof(int)*1000, 0, 0);
+ ret = H5Pset_page_buffer_size(fapl_self, sizeof(int) * 1000, 0, 0);
VRFY((ret == 0), "");
/* collective metadata writes do not work with page buffering */
ret = H5Pset_coll_metadata_write(fapl_self, FALSE);
@@ -202,49 +203,49 @@ test_page_buffer_access(void)
VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process");
/* allocate space for 200 raw elements */
- raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int)*(size_t)num_elements);
+ raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int) * (size_t)num_elements);
VRFY((raw_addr != HADDR_UNDEF), "");
/* allocate space for 200 metadata elements */
- meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int)*(size_t)num_elements);
+ meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int) * (size_t)num_elements);
VRFY((meta_addr != HADDR_UNDEF), "");
page_count = 0;
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, data);
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*(size_t)num_elements, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * (size_t)num_elements, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
/* update the first 50 elements */
- for(i=0 ; i<50 ; i++)
+ for (i = 0; i < 50; i++)
data[i] = i;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
H5Eprint2(H5E_DEFAULT, stderr);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
VRFY((ret == 0), "");
page_count += 2;
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
/* update the second 50 elements */
- for(i=0 ; i<50 ; i++)
- data[i] = i+50;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*50), sizeof(int)*50, data);
+ for (i = 0; i < 50; i++)
+ data[i] = i + 50;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*50), sizeof(int)*50, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
/* update 100 - 200 */
- for(i=0 ; i<100 ; i++)
- data[i] = i+100;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*100), sizeof(int)*100, data);
+ for (i = 0; i < 100; i++)
+ data[i] = i + 100;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*100), sizeof(int)*100, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
@@ -252,43 +253,47 @@ test_page_buffer_access(void)
VRFY((ret == 0), "");
/* read elements 0 - 200 */
- ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*200, data);
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 200, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i=0; i < 200; i++)
+ for (i = 0; i < 200; i++)
VRFY((data[i] == i), "Read different values than written");
- ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*200, data);
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 200, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i=0; i < 200; i++)
+ for (i = 0; i < 200; i++)
VRFY((data[i] == i), "Read different values than written");
/* read elements 0 - 50 */
- ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i=0; i < 50; i++)
+ for (i = 0; i < 50; i++)
VRFY((data[i] == i), "Read different values than written");
- ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i=0; i < 50; i++)
+ for (i = 0; i < 50; i++)
VRFY((data[i] == i), "Read different values than written");
/* close the file */
ret = H5Fclose(file_id);
VRFY((ret >= 0), "H5Fclose succeeded");
ret = H5Pclose(fapl_self);
- VRFY((ret>=0), "H5Pclose succeeded");
+ VRFY((ret >= 0), "H5Pclose succeeded");
/* Pop API context */
- if(api_ctx_pushed) { ret = H5CX_pop(); VRFY((ret == 0), "H5CX_pop()"); api_ctx_pushed = FALSE; }
+ if (api_ctx_pushed) {
+ ret = H5CX_pop();
+ VRFY((ret == 0), "H5CX_pop()");
+ api_ctx_pushed = FALSE;
+ }
}
MPI_Barrier(MPI_COMM_WORLD);
- if(mpi_size > 1) {
- ret = H5Pset_page_buffer_size(fapl, sizeof(int)*1000, 0, 0);
+ if (mpi_size > 1) {
+ ret = H5Pset_page_buffer_size(fapl, sizeof(int) * 1000, 0, 0);
VRFY((ret == 0), "");
/* collective metadata writes do not work with page buffering */
ret = H5Pset_coll_metadata_write(fapl, FALSE);
@@ -308,45 +313,45 @@ test_page_buffer_access(void)
VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process");
/* allocate space for 200 raw elements */
- raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int)*(size_t)num_elements);
+ raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int) * (size_t)num_elements);
VRFY((raw_addr != HADDR_UNDEF), "");
/* allocate space for 200 metadata elements */
- meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int)*(size_t)num_elements);
+ meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int) * (size_t)num_elements);
VRFY((meta_addr != HADDR_UNDEF), "");
page_count = 0;
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*(size_t)num_elements, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * (size_t)num_elements, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
/* update the first 50 elements */
- for(i=0 ; i<50 ; i++)
+ for (i = 0; i < 50; i++)
data[i] = i;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
/* update the second 50 elements */
- for(i=0 ; i<50 ; i++)
- data[i] = i+50;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*50), sizeof(int)*50, data);
+ for (i = 0; i < 50; i++)
+ data[i] = i + 50;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*50), sizeof(int)*50, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
/* update 100 - 200 */
- for(i=0 ; i<100 ; i++)
- data[i] = i+100;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*100), sizeof(int)*100, data);
+ for (i = 0; i < 100; i++)
+ data[i] = i + 100;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*100), sizeof(int)*100, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
@@ -354,51 +359,51 @@ test_page_buffer_access(void)
VRFY((ret == 0), "");
/* read elements 0 - 200 */
- ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*200, data);
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 200, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i=0; i < 200; i++)
+ for (i = 0; i < 200; i++)
VRFY((data[i] == i), "Read different values than written");
- ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*200, data);
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 200, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i=0; i < 200; i++)
+ for (i = 0; i < 200; i++)
VRFY((data[i] == i), "Read different values than written");
/* read elements 0 - 50 */
- ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i=0; i < 50; i++)
+ for (i = 0; i < 50; i++)
VRFY((data[i] == i), "Read different values than written");
- ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
VRFY((ret == 0), "");
page_count += 1;
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i=0; i < 50; i++)
+ for (i = 0; i < 50; i++)
VRFY((data[i] == i), "Read different values than written");
MPI_Barrier(MPI_COMM_WORLD);
/* reset the first 50 elements to -1*/
- for(i=0 ; i<50 ; i++)
+ for (i = 0; i < 50; i++)
data[i] = -1;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
/* read elements 0 - 50 */
- ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i=0; i < 50; i++)
+ for (i = 0; i < 50; i++)
VRFY((data[i] == -1), "Read different values than written");
- ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i=0; i < 50; i++)
+ for (i = 0; i < 50; i++)
VRFY((data[i] == -1), "Read different values than written");
/* close the file */
@@ -407,12 +412,16 @@ test_page_buffer_access(void)
}
ret = H5Pclose(fapl);
- VRFY((ret>=0), "H5Pclose succeeded");
+ VRFY((ret >= 0), "H5Pclose succeeded");
ret = H5Pclose(fcpl);
- VRFY((ret>=0), "H5Pclose succeeded");
+ VRFY((ret >= 0), "H5Pclose succeeded");
/* Pop API context */
- if(api_ctx_pushed) { ret = H5CX_pop(); VRFY((ret == 0), "H5CX_pop()"); api_ctx_pushed = FALSE; }
+ if (api_ctx_pushed) {
+ ret = H5CX_pop();
+ VRFY((ret == 0), "H5CX_pop()");
+ api_ctx_pushed = FALSE;
+ }
HDfree(data);
data = NULL;
@@ -422,22 +431,22 @@ test_page_buffer_access(void)
static int
create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy)
{
- hid_t file_id, dset_id, grp_id;
- hid_t sid, mem_dataspace;
- hsize_t start[RANK];
- hsize_t count[RANK];
- hsize_t stride[RANK];
- hsize_t block[RANK];
- DATATYPE *data_array = NULL;
- hsize_t dims[RANK], i;
- hsize_t num_elements;
- int k;
- char dset_name[20];
- H5F_t *f = NULL;
- H5C_t *cache_ptr = NULL;
+ hid_t file_id, dset_id, grp_id;
+ hid_t sid, mem_dataspace;
+ hsize_t start[RANK];
+ hsize_t count[RANK];
+ hsize_t stride[RANK];
+ hsize_t block[RANK];
+ DATATYPE * data_array = NULL;
+ hsize_t dims[RANK], i;
+ hsize_t num_elements;
+ int k;
+ char dset_name[20];
+ H5F_t * f = NULL;
+ H5C_t * cache_ptr = NULL;
H5AC_cache_config_t config;
- hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
- herr_t ret;
+ hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
+ herr_t ret;
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
VRFY((file_id >= 0), "");
@@ -471,61 +480,57 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
grp_id = H5Gcreate2(file_id, "GROUP", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((grp_id >= 0), "");
- dims[0] = (hsize_t)(ROW_FACTOR*mpi_size);
- dims[1] = (hsize_t)(COL_FACTOR*mpi_size);
- sid = H5Screate_simple (RANK, dims, NULL);
+ dims[0] = (hsize_t)(ROW_FACTOR * mpi_size);
+ dims[1] = (hsize_t)(COL_FACTOR * mpi_size);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* Each process takes a slabs of rows. */
- block[0] = dims[0]/(hsize_t)mpi_size;
- block[1] = dims[1];
+ block[0] = dims[0] / (hsize_t)mpi_size;
+ block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)mpi_rank*block[0];
- start[1] = 0;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
num_elements = block[0] * block[1];
/* allocate memory for data buffer */
- data_array = (DATATYPE *)HDmalloc(num_elements*sizeof(DATATYPE));
+ data_array = (DATATYPE *)HDmalloc(num_elements * sizeof(DATATYPE));
VRFY((data_array != NULL), "data_array HDmalloc succeeded");
/* put some trivial data in the data_array */
- for(i=0 ; i<num_elements; i++)
+ for (i = 0; i < num_elements; i++)
data_array[i] = mpi_rank + 1;
ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (1, &num_elements, NULL);
+ mem_dataspace = H5Screate_simple(1, &num_elements, NULL);
VRFY((mem_dataspace >= 0), "");
- for(k=0 ; k<NUM_DSETS; k++) {
+ for (k = 0; k < NUM_DSETS; k++) {
HDsprintf(dset_name, "D1dset%d", k);
- dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
HDsprintf(dset_name, "D2dset%d", k);
- dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
HDsprintf(dset_name, "D3dset%d", k);
- dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
HDsprintf(dset_name, "dset%d", k);
- dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
@@ -534,7 +539,7 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- HDmemset(data_array, 0, num_elements*sizeof(DATATYPE));
+ HDmemset(data_array, 0, num_elements * sizeof(DATATYPE));
dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
@@ -544,8 +549,8 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- for (i=0; i < num_elements; i++)
- VRFY((data_array[i] == mpi_rank+1), "Dataset Verify failed");
+ for (i = 0; i < num_elements; i++)
+ VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed");
HDsprintf(dset_name, "D1dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
@@ -568,7 +573,11 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
VRFY((ret == 0), "");
/* Pop API context */
- if(api_ctx_pushed) { ret = H5CX_pop(); VRFY((ret == 0), "H5CX_pop()"); api_ctx_pushed = FALSE; }
+ if (api_ctx_pushed) {
+ ret = H5CX_pop();
+ VRFY((ret == 0), "H5CX_pop()");
+ api_ctx_pushed = FALSE;
+ }
MPI_Barrier(MPI_COMM_WORLD);
HDfree(data_array);
@@ -576,28 +585,28 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
} /* create_file */
static int
-open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
- hsize_t page_size, size_t page_buffer_size)
+open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t page_size,
+ size_t page_buffer_size)
{
- hid_t file_id, dset_id, grp_id, grp_id2;
- hid_t sid, mem_dataspace;
- DATATYPE *data_array = NULL;
- hsize_t dims[RANK];
- hsize_t start[RANK];
- hsize_t count[RANK];
- hsize_t stride[RANK];
- hsize_t block[RANK];
- int i, k, ndims;
- hsize_t num_elements;
- char dset_name[20];
- H5F_t *f = NULL;
- H5C_t *cache_ptr = NULL;
+ hid_t file_id, dset_id, grp_id, grp_id2;
+ hid_t sid, mem_dataspace;
+ DATATYPE * data_array = NULL;
+ hsize_t dims[RANK];
+ hsize_t start[RANK];
+ hsize_t count[RANK];
+ hsize_t stride[RANK];
+ hsize_t block[RANK];
+ int i, k, ndims;
+ hsize_t num_elements;
+ char dset_name[20];
+ H5F_t * f = NULL;
+ H5C_t * cache_ptr = NULL;
H5AC_cache_config_t config;
- hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
- herr_t ret;
+ hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
+ herr_t ret;
config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
- ret = H5Pget_mdc_config(fapl, &config);
+ ret = H5Pget_mdc_config(fapl, &config);
VRFY((ret == 0), "");
config.metadata_write_strategy = metadata_write_strategy;
@@ -632,29 +641,29 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
grp_id = H5Gopen2(file_id, "GROUP", H5P_DEFAULT);
VRFY((grp_id >= 0), "");
- dims[0] = (hsize_t)(ROW_FACTOR*mpi_size);
- dims[1] = (hsize_t)(COL_FACTOR*mpi_size);
+ dims[0] = (hsize_t)(ROW_FACTOR * mpi_size);
+ dims[1] = (hsize_t)(COL_FACTOR * mpi_size);
/* Each process takes a slabs of rows. */
- block[0] = dims[0]/(hsize_t)mpi_size;
- block[1] = dims[1];
+ block[0] = dims[0] / (hsize_t)mpi_size;
+ block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)mpi_rank*block[0];
- start[1] = 0;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
num_elements = block[0] * block[1];
/* allocate memory for data buffer */
- data_array = (DATATYPE *)HDmalloc(num_elements*sizeof(DATATYPE));
+ data_array = (DATATYPE *)HDmalloc(num_elements * sizeof(DATATYPE));
VRFY((data_array != NULL), "data_array HDmalloc succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (1, &num_elements, NULL);
+ mem_dataspace = H5Screate_simple(1, &num_elements, NULL);
VRFY((mem_dataspace >= 0), "");
- for(k=0 ; k<NUM_DSETS; k++) {
+ for (k = 0; k < NUM_DSETS; k++) {
HDsprintf(dset_name, "dset%d", k);
dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
@@ -664,8 +673,8 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
ndims = H5Sget_simple_extent_dims(sid, dims, NULL);
VRFY((ndims == 2), "H5Sget_simple_extent_dims succeeded");
- VRFY(dims[0] == (hsize_t)(ROW_FACTOR*mpi_size), "Wrong dataset dimensions");
- VRFY(dims[1] == (hsize_t)(COL_FACTOR*mpi_size), "Wrong dataset dimensions");
+ VRFY(dims[0] == (hsize_t)(ROW_FACTOR * mpi_size), "Wrong dataset dimensions");
+ VRFY(dims[1] == (hsize_t)(COL_FACTOR * mpi_size), "Wrong dataset dimensions");
ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
@@ -678,8 +687,8 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
ret = H5Sclose(sid);
VRFY((ret == 0), "");
- for (i=0; i < (int)num_elements; i++)
- VRFY((data_array[i] == mpi_rank+1), "Dataset Verify failed");
+ for (i = 0; i < (int)num_elements; i++)
+ VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed");
}
grp_id2 = H5Gcreate2(file_id, "GROUP/GROUP2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -694,16 +703,16 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
/* flush invalidate each ring, starting from the outermost ring and
* working inward.
*/
- for ( i = 0; i < H5C__HASH_TABLE_LEN; i++ ) {
- H5C_cache_entry_t * entry_ptr = NULL;
+ for (i = 0; i < H5C__HASH_TABLE_LEN; i++) {
+ H5C_cache_entry_t *entry_ptr = NULL;
entry_ptr = cache_ptr->index[i];
- while ( entry_ptr != NULL ) {
+ while (entry_ptr != NULL) {
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(entry_ptr->is_dirty == FALSE);
- if(!entry_ptr->is_pinned && !entry_ptr->is_protected) {
+ if (!entry_ptr->is_pinned && !entry_ptr->is_protected) {
ret = H5AC_expunge_entry(f, entry_ptr->type, entry_ptr->addr, 0);
VRFY((ret == 0), "");
}
@@ -728,7 +737,11 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
VRFY((ret == 0), "");
/* Pop API context */
- if(api_ctx_pushed) { ret = H5CX_pop(); VRFY((ret == 0), "H5CX_pop()"); api_ctx_pushed = FALSE; }
+ if (api_ctx_pushed) {
+ ret = H5CX_pop();
+ VRFY((ret == 0), "H5CX_pop()");
+ api_ctx_pushed = FALSE;
+ }
HDfree(data_array);
@@ -743,19 +756,19 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
void
test_file_properties(void)
{
- hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */
- hid_t fapl_id = H5I_INVALID_HID; /* File access plist */
- hid_t fapl_copy_id = H5I_INVALID_HID; /* File access plist */
- hbool_t is_coll;
- htri_t are_equal;
+ hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */
+ hid_t fapl_id = H5I_INVALID_HID; /* File access plist */
+ hid_t fapl_copy_id = H5I_INVALID_HID; /* File access plist */
+ hbool_t is_coll;
+ htri_t are_equal;
const char *filename;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
- MPI_Comm comm_out = MPI_COMM_NULL;
- MPI_Info info_out = MPI_INFO_NULL;
- herr_t ret; /* Generic return value */
- int mpi_ret; /* MPI return value */
- int cmp; /* Compare value */
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ MPI_Comm comm_out = MPI_COMM_NULL;
+ MPI_Info info_out = MPI_INFO_NULL;
+ herr_t ret; /* Generic return value */
+ int mpi_ret; /* MPI return value */
+ int cmp; /* Compare value */
filename = (const char *)GetTestParameters();
@@ -785,7 +798,7 @@ test_file_properties(void)
/* Check the communicator */
VRFY((comm != comm_out), "Communicators should not be bitwise identical");
- cmp = MPI_UNEQUAL;
+ cmp = MPI_UNEQUAL;
mpi_ret = MPI_Comm_compare(comm, comm_out, &cmp);
VRFY((ret >= 0), "MPI_Comm_compare succeeded");
VRFY((cmp == MPI_CONGRUENT), "Communicators should be congruent via MPI_Comm_compare");
@@ -932,4 +945,3 @@ test_file_properties(void)
VRFY((mpi_ret >= 0), "MPI_Info_free succeeded");
} /* end test_file_properties() */
-
diff --git a/testpar/t_file_image.c b/testpar/t_file_image.c
index 81bb7c2..702f73a 100644
--- a/testpar/t_file_image.c
+++ b/testpar/t_file_image.c
@@ -61,81 +61,75 @@
void
file_image_daisy_chain_test(void)
{
- char file_name[1024] = "\0";
- int mpi_size, mpi_rank;
- int mpi_result;
- int i;
- int space_ndims;
+ char file_name[1024] = "\0";
+ int mpi_size, mpi_rank;
+ int mpi_result;
+ int i;
+ int space_ndims;
MPI_Status rcvstat;
- int * vector_ptr = NULL;
- hid_t fapl_id = -1;
- hid_t file_id; /* file IDs */
- hid_t dset_id = -1;
- hid_t dset_type_id = -1;
- hid_t space_id = -1;
- herr_t err;
- hsize_t dims[1];
- void * image_ptr = NULL;
- ssize_t bytes_read;
- ssize_t image_len;
- hbool_t vector_ok = TRUE;
- htri_t tri_result;
-
+ int * vector_ptr = NULL;
+ hid_t fapl_id = -1;
+ hid_t file_id; /* file IDs */
+ hid_t dset_id = -1;
+ hid_t dset_type_id = -1;
+ hid_t space_id = -1;
+ herr_t err;
+ hsize_t dims[1];
+ void * image_ptr = NULL;
+ ssize_t bytes_read;
+ ssize_t image_len;
+ hbool_t vector_ok = TRUE;
+ htri_t tri_result;
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* setup file name */
- HDsnprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5",
- (int)mpi_rank);
+ HDsnprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5", (int)mpi_rank);
- if(mpi_rank == 0) {
+ if (mpi_rank == 0) {
- /* 1) Creates a core file with an integer vector data set
+ /* 1) Creates a core file with an integer vector data set
* of length mpi_size,
*/
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl_id >= 0), "creating fapl");
- err = H5Pset_fapl_core(fapl_id, (size_t)(64 *1024), FALSE);
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE);
VRFY((err >= 0), "setting core file driver in fapl.");
file_id = H5Fcreate(file_name, 0, H5P_DEFAULT, fapl_id);
VRFY((file_id >= 0), "created core file");
- dims[0] = (hsize_t)mpi_size;
- space_id = H5Screate_simple(1, dims, dims);
+ dims[0] = (hsize_t)mpi_size;
+ space_id = H5Screate_simple(1, dims, dims);
VRFY((space_id >= 0), "created data space");
- dset_id = H5Dcreate2(file_id, "v", H5T_NATIVE_INT, space_id,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, "v", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "created data set");
-
- /* 2) Initialize the vector to zero in location 0, and
+ /* 2) Initialize the vector to zero in location 0, and
* to -1 everywhere else.
*/
- vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
+ vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
VRFY((vector_ptr != NULL), "allocated in memory representation of vector");
vector_ptr[0] = 0;
- for(i = 1; i < mpi_size; i++)
+ for (i = 1; i < mpi_size; i++)
vector_ptr[i] = -1;
- err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
- H5P_DEFAULT, (void *)vector_ptr);
+ err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
VRFY((err >= 0), "wrote initial data to vector.");
HDfree(vector_ptr);
vector_ptr = NULL;
-
/* 3) Flush the core file, and get an image of it. Close
* the core file.
*/
- err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((err >= 0), "flushed core file.");
image_len = H5Fget_file_image(file_id, NULL, (size_t)0);
@@ -148,206 +142,192 @@ file_image_daisy_chain_test(void)
VRFY(bytes_read == image_len, "wrote file into image buffer");
err = H5Sclose(space_id);
- VRFY((err >= 0), "closed data space.");
+ VRFY((err >= 0), "closed data space.");
- err = H5Dclose(dset_id);
- VRFY((err >= 0), "closed data set.");
+ err = H5Dclose(dset_id);
+ VRFY((err >= 0), "closed data set.");
- err = H5Fclose(file_id);
- VRFY((err >= 0), "closed core file(1).");
-
- err = H5Pclose(fapl_id);
- VRFY((err >= 0), "closed fapl(1).");
+ err = H5Fclose(file_id);
+ VRFY((err >= 0), "closed core file(1).");
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0), "closed fapl(1).");
/* 4) Send the image to process 1. */
- mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t),
- MPI_BYTE, 1, 0, MPI_COMM_WORLD);
- VRFY((mpi_result == MPI_SUCCESS), "sent image size to process 1");
+ mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, 1, 0, MPI_COMM_WORLD);
+ VRFY((mpi_result == MPI_SUCCESS), "sent image size to process 1");
- mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len,
- MPI_BYTE, 1, 0, MPI_COMM_WORLD);
- VRFY((mpi_result == MPI_SUCCESS), "sent image to process 1");
+ mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len, MPI_BYTE, 1, 0, MPI_COMM_WORLD);
+ VRFY((mpi_result == MPI_SUCCESS), "sent image to process 1");
HDfree(image_ptr);
image_ptr = NULL;
image_len = 0;
+ /* 5) Await receipt on a file image from process n-1. */
- /* 5) Await receipt on a file image from process n-1. */
-
- mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t),
- MPI_BYTE, mpi_size - 1, 0, MPI_COMM_WORLD,
- &rcvstat);
- VRFY((mpi_result == MPI_SUCCESS), "received image len from process n-1");
+ mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, mpi_size - 1, 0,
+ MPI_COMM_WORLD, &rcvstat);
+ VRFY((mpi_result == MPI_SUCCESS), "received image len from process n-1");
image_ptr = (void *)HDmalloc((size_t)image_len);
VRFY(image_ptr != NULL, "allocated file image receive buffer.");
- mpi_result = MPI_Recv((void *)image_ptr, (int)image_len,
- MPI_BYTE, mpi_size - 1, 0, MPI_COMM_WORLD,
- &rcvstat);
- VRFY((mpi_result == MPI_SUCCESS), \
- "received file image from process n-1");
+ mpi_result =
+ MPI_Recv((void *)image_ptr, (int)image_len, MPI_BYTE, mpi_size - 1, 0, MPI_COMM_WORLD, &rcvstat);
+ VRFY((mpi_result == MPI_SUCCESS), "received file image from process n-1");
- /* 6) open the image received from process n-1, verify that
+ /* 6) open the image received from process n-1, verify that
* it contains a vector of length equal to mpi_size, and
- * that the vector contains (0, 1, 2, ... n-1).
+ * that the vector contains (0, 1, 2, ... n-1).
*/
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl_id >= 0), "creating fapl");
- err = H5Pset_fapl_core(fapl_id, (size_t)(64 *1024), FALSE);
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE);
VRFY((err >= 0), "setting core file driver in fapl.");
- err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len);
+ err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len);
VRFY((err >= 0), "set file image in fapl.");
file_id = H5Fopen(file_name, H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "opened received file image file");
- dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT);
+ dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT);
VRFY((dset_id >= 0), "opened data set");
- dset_type_id = H5Dget_type(dset_id);
+ dset_type_id = H5Dget_type(dset_id);
VRFY((dset_type_id >= 0), "obtained data set type");
- tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT);
+ tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT);
VRFY((tri_result == TRUE), "verified data set type");
- space_id = H5Dget_space(dset_id);
+ space_id = H5Dget_space(dset_id);
VRFY((space_id >= 0), "opened data space");
- space_ndims = H5Sget_simple_extent_ndims(space_id);
- VRFY((space_ndims == 1), "verified data space num dims(1)");
+ space_ndims = H5Sget_simple_extent_ndims(space_id);
+ VRFY((space_ndims == 1), "verified data space num dims(1)");
- space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
- VRFY((space_ndims == 1), "verified data space num dims(2)");
- VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims");
+ space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
+ VRFY((space_ndims == 1), "verified data space num dims(2)");
+ VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims");
- vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
+ vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
VRFY((vector_ptr != NULL), "allocated in memory rep of vector");
- err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
- H5P_DEFAULT, (void *)vector_ptr);
+ err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
VRFY((err >= 0), "read received vector.");
- vector_ok = TRUE;
- for(i = 0; i < mpi_size; i++)
- if(vector_ptr[i] != i)
+ vector_ok = TRUE;
+ for (i = 0; i < mpi_size; i++)
+ if (vector_ptr[i] != i)
vector_ok = FALSE;
VRFY((vector_ok), "verified received vector.");
HDfree(vector_ptr);
vector_ptr = NULL;
- /* 7) closes the core file and exit. */
+ /* 7) closes the core file and exit. */
err = H5Sclose(space_id);
- VRFY((err >= 0), "closed data space.");
+ VRFY((err >= 0), "closed data space.");
- err = H5Dclose(dset_id);
- VRFY((err >= 0), "closed data set.");
+ err = H5Dclose(dset_id);
+ VRFY((err >= 0), "closed data set.");
- err = H5Fclose(file_id);
- VRFY((err >= 0), "closed core file(1).");
+ err = H5Fclose(file_id);
+ VRFY((err >= 0), "closed core file(1).");
- err = H5Pclose(fapl_id);
- VRFY((err >= 0), "closed fapl(1).");
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0), "closed fapl(1).");
HDfree(image_ptr);
image_ptr = NULL;
image_len = 0;
- } else {
+ }
+ else {
/* 1) Await receipt of file image from process (i - 1). */
- mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t),
- MPI_BYTE, mpi_rank - 1, 0, MPI_COMM_WORLD,
- &rcvstat);
- VRFY((mpi_result == MPI_SUCCESS), \
- "received image size from process mpi_rank-1");
+ mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, mpi_rank - 1, 0,
+ MPI_COMM_WORLD, &rcvstat);
+ VRFY((mpi_result == MPI_SUCCESS), "received image size from process mpi_rank-1");
image_ptr = (void *)HDmalloc((size_t)image_len);
VRFY(image_ptr != NULL, "allocated file image receive buffer.");
- mpi_result = MPI_Recv((void *)image_ptr, (int)image_len,
- MPI_BYTE, mpi_rank - 1, 0, MPI_COMM_WORLD,
- &rcvstat);
- VRFY((mpi_result == MPI_SUCCESS), \
- "received file image from process mpi_rank-1");
-
- /* 2) Open the image with the core file driver, verify that it
- * contains a vector v of length, and that v[j] = j for
- * 0 <= j < i, and that v[j] == -1 for i <= j < n
- */
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ mpi_result =
+ MPI_Recv((void *)image_ptr, (int)image_len, MPI_BYTE, mpi_rank - 1, 0, MPI_COMM_WORLD, &rcvstat);
+ VRFY((mpi_result == MPI_SUCCESS), "received file image from process mpi_rank-1");
+
+ /* 2) Open the image with the core file driver, verify that it
+ * contains a vector v of length, and that v[j] = j for
+ * 0 <= j < i, and that v[j] == -1 for i <= j < n
+ */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl_id >= 0), "creating fapl");
- err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE);
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE);
VRFY((err >= 0), "setting core file driver in fapl.");
- err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len);
+ err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len);
VRFY((err >= 0), "set file image in fapl.");
file_id = H5Fopen(file_name, H5F_ACC_RDWR, fapl_id);
- H5Eprint2(H5P_DEFAULT, stderr);
+ H5Eprint2(H5P_DEFAULT, stderr);
VRFY((file_id >= 0), "opened received file image file");
- dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT);
+ dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT);
VRFY((dset_id >= 0), "opened data set");
- dset_type_id = H5Dget_type(dset_id);
+ dset_type_id = H5Dget_type(dset_id);
VRFY((dset_type_id >= 0), "obtained data set type");
- tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT);
+ tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT);
VRFY((tri_result == TRUE), "verified data set type");
- space_id = H5Dget_space(dset_id);
+ space_id = H5Dget_space(dset_id);
VRFY((space_id >= 0), "opened data space");
- space_ndims = H5Sget_simple_extent_ndims(space_id);
- VRFY((space_ndims == 1), "verified data space num dims(1)");
+ space_ndims = H5Sget_simple_extent_ndims(space_id);
+ VRFY((space_ndims == 1), "verified data space num dims(1)");
- space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
- VRFY((space_ndims == 1), "verified data space num dims(2)");
- VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims");
+ space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
+ VRFY((space_ndims == 1), "verified data space num dims(2)");
+ VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims");
- vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
+ vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
VRFY((vector_ptr != NULL), "allocated in memory rep of vector");
- err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
- H5P_DEFAULT, (void *)vector_ptr);
+ err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
VRFY((err >= 0), "read received vector.");
- vector_ok = TRUE;
- for(i = 0; i < mpi_size; i++){
- if(i < mpi_rank) {
- if(vector_ptr[i] != i)
+ vector_ok = TRUE;
+ for (i = 0; i < mpi_size; i++) {
+ if (i < mpi_rank) {
+ if (vector_ptr[i] != i)
vector_ok = FALSE;
- } else {
- if(vector_ptr[i] != -1)
+ }
+ else {
+ if (vector_ptr[i] != -1)
vector_ok = FALSE;
- }
+ }
}
VRFY((vector_ok), "verified received vector.");
+ /* 3) Set v[i] = i in the core file. */
- /* 3) Set v[i] = i in the core file. */
-
- vector_ptr[mpi_rank] = mpi_rank;
+ vector_ptr[mpi_rank] = mpi_rank;
- err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
- H5P_DEFAULT, (void *)vector_ptr);
+ err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
VRFY((err >= 0), "wrote modified data to vector.");
HDfree(vector_ptr);
vector_ptr = NULL;
+ /* 4) Flush the core file and send it to process (mpi_rank + 1) % n. */
- /* 4) Flush the core file and send it to process (mpi_rank + 1) % n. */
-
- err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((err >= 0), "flushed core file.");
image_len = H5Fget_file_image(file_id, NULL, (size_t)0);
@@ -359,38 +339,33 @@ file_image_daisy_chain_test(void)
bytes_read = H5Fget_file_image(file_id, image_ptr, (size_t)image_len);
VRFY(bytes_read == image_len, "wrote file into image buffer");
- mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t),
- MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
- MPI_COMM_WORLD);
- VRFY((mpi_result == MPI_SUCCESS), \
- "sent image size to process (mpi_rank + 1) % mpi_size");
+ mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE,
+ (mpi_rank + 1) % mpi_size, 0, MPI_COMM_WORLD);
+ VRFY((mpi_result == MPI_SUCCESS), "sent image size to process (mpi_rank + 1) % mpi_size");
- mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len,
- MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
+ mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len, MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
MPI_COMM_WORLD);
- VRFY((mpi_result == MPI_SUCCESS), \
- "sent image to process (mpi_rank + 1) % mpi_size");
+ VRFY((mpi_result == MPI_SUCCESS), "sent image to process (mpi_rank + 1) % mpi_size");
HDfree(image_ptr);
image_ptr = NULL;
image_len = 0;
- /* 5) close the core file and exit. */
+ /* 5) close the core file and exit. */
err = H5Sclose(space_id);
- VRFY((err >= 0), "closed data space.");
+ VRFY((err >= 0), "closed data space.");
- err = H5Dclose(dset_id);
- VRFY((err >= 0), "closed data set.");
+ err = H5Dclose(dset_id);
+ VRFY((err >= 0), "closed data set.");
- err = H5Fclose(file_id);
- VRFY((err >= 0), "closed core file(1).");
+ err = H5Fclose(file_id);
+ VRFY((err >= 0), "closed core file(1).");
- err = H5Pclose(fapl_id);
- VRFY((err >= 0), "closed fapl(1).");
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0), "closed fapl(1).");
}
return;
} /* file_image_daisy_chain_test() */
-
diff --git a/testpar/t_filter_read.c b/testpar/t_filter_read.c
index 7b0e677..10d6c27 100644
--- a/testpar/t_filter_read.c
+++ b/testpar/t_filter_read.c
@@ -19,24 +19,22 @@
* Date: 2007/05/15
*/
-
#include "testphdf5.h"
#ifdef H5_HAVE_SZLIB_H
-# include "szlib.h"
+#include "szlib.h"
#endif
static int mpi_size, mpi_rank;
/* Chunk sizes */
-#define CHUNK_DIM1 7
-#define CHUNK_DIM2 27
+#define CHUNK_DIM1 7
+#define CHUNK_DIM2 27
/* Sizes of the vertical hyperslabs. Total dataset size is
{HS_DIM1, HS_DIM2 * mpi_size } */
-#define HS_DIM1 200
-#define HS_DIM2 100
-
+#define HS_DIM1 200
+#define HS_DIM2 100
/*-------------------------------------------------------------------------
* Function: filter_read_internal
@@ -51,28 +49,27 @@ static int mpi_size, mpi_rank;
*-------------------------------------------------------------------------
*/
static void
-filter_read_internal(const char *filename, hid_t dcpl,
- hsize_t *dset_size)
+filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size)
{
- hid_t file, dataset; /* HDF5 IDs */
- hid_t access_plist; /* Access property list ID */
- hid_t sid, memspace; /* Dataspace IDs */
- hsize_t size[2]; /* Dataspace dimensions */
- hsize_t hs_offset[2]; /* Hyperslab offset */
- hsize_t hs_size[2]; /* Hyperslab size */
- size_t i, j; /* Local index variables */
- char name[32] = "dataset";
- herr_t hrc; /* Error status */
- int *points = NULL; /* Writing buffer for entire dataset */
- int *check = NULL; /* Reading buffer for selected hyperslab */
+ hid_t file, dataset; /* HDF5 IDs */
+ hid_t access_plist; /* Access property list ID */
+ hid_t sid, memspace; /* Dataspace IDs */
+ hsize_t size[2]; /* Dataspace dimensions */
+ hsize_t hs_offset[2]; /* Hyperslab offset */
+ hsize_t hs_size[2]; /* Hyperslab size */
+ size_t i, j; /* Local index variables */
+ char name[32] = "dataset";
+ herr_t hrc; /* Error status */
+ int * points = NULL; /* Writing buffer for entire dataset */
+ int * check = NULL; /* Reading buffer for selected hyperslab */
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* set sizes for dataset and hyperslabs */
hs_size[0] = size[0] = HS_DIM1;
- hs_size[1] = HS_DIM2;
+ hs_size[1] = HS_DIM2;
size[1] = hs_size[1] * (hsize_t)mpi_size;
@@ -81,50 +78,50 @@ filter_read_internal(const char *filename, hid_t dcpl,
/* Create the data space */
sid = H5Screate_simple(2, size, NULL);
- VRFY(sid>=0, "H5Screate_simple");
+ VRFY(sid >= 0, "H5Screate_simple");
/* Create buffers */
points = (int *)HDmalloc(size[0] * size[1] * sizeof(int));
- VRFY(points!=NULL, "HDmalloc");
+ VRFY(points != NULL, "HDmalloc");
check = (int *)HDmalloc(hs_size[0] * hs_size[1] * sizeof(int));
- VRFY(check!=NULL, "HDmalloc");
+ VRFY(check != NULL, "HDmalloc");
/* Initialize writing buffer with random data */
- for(i = 0; i < size[0]; i++)
- for(j = 0; j < size[1]; j++)
- points[i * size[1]+j] = (int)(i+j+7);
+ for (i = 0; i < size[0]; i++)
+ for (j = 0; j < size[1]; j++)
+ points[i * size[1] + j] = (int)(i + j + 7);
VRFY(H5Pall_filters_avail(dcpl), "Incorrect filter availability");
/* Serial write phase */
- if(MAINPROCESS) {
+ if (MAINPROCESS) {
file = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
- VRFY(file>=0, "H5Fcreate");
+ VRFY(file >= 0, "H5Fcreate");
/* Create the dataset */
dataset = H5Dcreate2(file, name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY(dataset>=0, "H5Dcreate2");
+ VRFY(dataset >= 0, "H5Dcreate2");
hrc = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, points);
- VRFY(hrc>=0, "H5Dwrite");
+ VRFY(hrc >= 0, "H5Dwrite");
*dset_size = H5Dget_storage_size(dataset);
- VRFY(*dset_size>0, "H5Dget_storage_size");
+ VRFY(*dset_size > 0, "H5Dget_storage_size");
- hrc = H5Dclose (dataset);
- VRFY(hrc>=0, "H5Dclose");
+ hrc = H5Dclose(dataset);
+ VRFY(hrc >= 0, "H5Dclose");
- hrc = H5Fclose (file);
- VRFY(hrc>=0, "H5Fclose");
+ hrc = H5Fclose(file);
+ VRFY(hrc >= 0, "H5Fclose");
}
MPI_Barrier(MPI_COMM_WORLD);
/* Parallel read phase */
/* Set up MPIO file access property lists */
- access_plist = H5Pcreate(H5P_FILE_ACCESS);
+ access_plist = H5Pcreate(H5P_FILE_ACCESS);
VRFY((access_plist >= 0), "H5Pcreate");
hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL);
@@ -138,51 +135,48 @@ filter_read_internal(const char *filename, hid_t dcpl,
VRFY((dataset >= 0), "H5Dopen2");
hrc = H5Sselect_hyperslab(sid, H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL);
- VRFY(hrc>=0, "H5Sselect_hyperslab");
+ VRFY(hrc >= 0, "H5Sselect_hyperslab");
memspace = H5Screate_simple(2, hs_size, NULL);
- VRFY(memspace>=0, "H5Screate_simple");
+ VRFY(memspace >= 0, "H5Screate_simple");
- hrc = H5Dread (dataset, H5T_NATIVE_INT, memspace, sid, H5P_DEFAULT, check);
- VRFY(hrc>=0, "H5Dread");
+ hrc = H5Dread(dataset, H5T_NATIVE_INT, memspace, sid, H5P_DEFAULT, check);
+ VRFY(hrc >= 0, "H5Dread");
/* Check that the values read are the same as the values written */
- for (i=0; i<hs_size[0]; i++) {
- for (j=0; j<hs_size[1]; j++) {
- if(points[i*size[1]+(size_t)hs_offset[1]+j] !=
- check[i*hs_size[1]+j]) {
- HDfprintf(stderr," Read different values than written.\n");
- HDfprintf(stderr," At index %lu,%lu\n",
- (unsigned long)(i),
- (unsigned long)(hs_offset[1]+j));
- HDfprintf(stderr," At original: %d\n",
- (int)points[i*size[1]+(size_t)hs_offset[1]+j]);
- HDfprintf(stderr," At returned: %d\n",
- (int)check[i*hs_size[1]+j]);
- VRFY(FALSE, "");
+ for (i = 0; i < hs_size[0]; i++) {
+ for (j = 0; j < hs_size[1]; j++) {
+ if (points[i * size[1] + (size_t)hs_offset[1] + j] != check[i * hs_size[1] + j]) {
+ HDfprintf(stderr, " Read different values than written.\n");
+ HDfprintf(stderr, " At index %lu,%lu\n", (unsigned long)(i),
+ (unsigned long)(hs_offset[1] + j));
+ HDfprintf(stderr, " At original: %d\n",
+ (int)points[i * size[1] + (size_t)hs_offset[1] + j]);
+ HDfprintf(stderr, " At returned: %d\n", (int)check[i * hs_size[1] + j]);
+ VRFY(FALSE, "");
+ }
}
}
- }
/* Get the storage size of the dataset */
- *dset_size=H5Dget_storage_size(dataset);
- VRFY(*dset_size!=0, "H5Dget_storage_size");
+ *dset_size = H5Dget_storage_size(dataset);
+ VRFY(*dset_size != 0, "H5Dget_storage_size");
/* Clean up objects used for this test */
- hrc = H5Dclose (dataset);
- VRFY(hrc>=0, "H5Dclose");
+ hrc = H5Dclose(dataset);
+ VRFY(hrc >= 0, "H5Dclose");
- hrc = H5Sclose (sid);
- VRFY(hrc>=0, "H5Sclose");
+ hrc = H5Sclose(sid);
+ VRFY(hrc >= 0, "H5Sclose");
- hrc = H5Sclose (memspace);
- VRFY(hrc>=0, "H5Sclose");
+ hrc = H5Sclose(memspace);
+ VRFY(hrc >= 0, "H5Sclose");
- hrc = H5Pclose (access_plist);
- VRFY(hrc>=0, "H5Pclose");
+ hrc = H5Pclose(access_plist);
+ VRFY(hrc >= 0, "H5Pclose");
- hrc = H5Fclose (file);
- VRFY(hrc>=0, "H5Fclose");
+ hrc = H5Fclose(file);
+ VRFY(hrc >= 0, "H5Fclose");
HDfree(points);
HDfree(check);
@@ -190,7 +184,6 @@ filter_read_internal(const char *filename, hid_t dcpl,
MPI_Barrier(MPI_COMM_WORLD);
}
-
/*-------------------------------------------------------------------------
* Function: test_filter_read
*
@@ -208,36 +201,36 @@ filter_read_internal(const char *filename, hid_t dcpl,
void
test_filter_read(void)
{
- hid_t dc; /* HDF5 IDs */
- const hsize_t chunk_size[2] = {CHUNK_DIM1, CHUNK_DIM2}; /* Chunk dimensions */
- hsize_t null_size; /* Size of dataset without filters */
- unsigned chunk_opts; /* Chunk options */
- unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
- herr_t hrc;
- const char *filename;
+ hid_t dc; /* HDF5 IDs */
+ const hsize_t chunk_size[2] = {CHUNK_DIM1, CHUNK_DIM2}; /* Chunk dimensions */
+ hsize_t null_size; /* Size of dataset without filters */
+ unsigned chunk_opts; /* Chunk options */
+ unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
+ herr_t hrc;
+ const char * filename;
#ifdef H5_HAVE_FILTER_FLETCHER32
- hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */
+ hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */
#endif
#ifdef H5_HAVE_FILTER_DEFLATE
- hsize_t deflate_size; /* Size of dataset with deflate filter */
-#endif /* H5_HAVE_FILTER_DEFLATE */
+ hsize_t deflate_size; /* Size of dataset with deflate filter */
+#endif /* H5_HAVE_FILTER_DEFLATE */
#ifdef H5_HAVE_FILTER_SZIP
- hsize_t szip_size; /* Size of dataset with szip filter */
- unsigned szip_options_mask=H5_SZIP_NN_OPTION_MASK;
- unsigned szip_pixels_per_block=4;
+ hsize_t szip_size; /* Size of dataset with szip filter */
+ unsigned szip_options_mask = H5_SZIP_NN_OPTION_MASK;
+ unsigned szip_pixels_per_block = 4;
#endif /* H5_HAVE_FILTER_SZIP */
- hsize_t shuffle_size; /* Size of dataset with shuffle filter */
+ hsize_t shuffle_size; /* Size of dataset with shuffle filter */
-#if(defined H5_HAVE_FILTER_DEFLATE || defined H5_HAVE_FILTER_SZIP)
- hsize_t combo_size; /* Size of dataset with multiple filters */
-#endif /* H5_HAVE_FILTER_DEFLATE || H5_HAVE_FILTER_SZIP */
+#if (defined H5_HAVE_FILTER_DEFLATE || defined H5_HAVE_FILTER_SZIP)
+ hsize_t combo_size; /* Size of dataset with multiple filters */
+#endif /* H5_HAVE_FILTER_DEFLATE || H5_HAVE_FILTER_SZIP */
filename = GetTestParameters();
- if(VERBOSE_MED)
+ if (VERBOSE_MED)
HDprintf("Parallel reading of dataset written with filters %s\n", filename);
/*----------------------------------------------------------
@@ -245,116 +238,115 @@ test_filter_read(void)
*----------------------------------------------------------
*/
dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc>=0,"H5Pcreate");
+ VRFY(dc >= 0, "H5Pcreate");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
- VRFY(hrc>=0,"H5Pset_chunk");
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
- filter_read_internal(filename,dc,&null_size);
+ filter_read_internal(filename, dc, &null_size);
/* Clean up objects used for this test */
- hrc = H5Pclose (dc);
- VRFY(hrc>=0,"H5Pclose");
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
/* Run steps 1-3 both with and without filters disabled on partial chunks */
- for(disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
- disable_partial_chunk_filters++) {
+ for (disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
+ disable_partial_chunk_filters++) {
/* Set chunk options appropriately */
dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc>=0,"H5Pcreate");
+ VRFY(dc >= 0, "H5Pcreate");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
- VRFY(hrc>=0,"H5Pset_filter");
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_filter");
hrc = H5Pget_chunk_opts(dc, &chunk_opts);
- VRFY(hrc>=0,"H5Pget_chunk_opts");
+ VRFY(hrc >= 0, "H5Pget_chunk_opts");
- if(disable_partial_chunk_filters)
+ if (disable_partial_chunk_filters)
chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
- hrc = H5Pclose (dc);
- VRFY(hrc>=0,"H5Pclose");
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
/*----------------------------------------------------------
- * STEP 1: Test Fletcher32 Checksum by itself.
- *----------------------------------------------------------
- */
+ * STEP 1: Test Fletcher32 Checksum by itself.
+ *----------------------------------------------------------
+ */
#ifdef H5_HAVE_FILTER_FLETCHER32
dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc>=0,"H5Pset_filter");
+ VRFY(dc >= 0, "H5Pset_filter");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
- VRFY(hrc>=0,"H5Pset_filter");
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_filter");
- hrc = H5Pset_chunk_opts (dc, chunk_opts);
- VRFY(hrc>=0,"H5Pset_chunk_opts");
+ hrc = H5Pset_chunk_opts(dc, chunk_opts);
+ VRFY(hrc >= 0, "H5Pset_chunk_opts");
- hrc = H5Pset_filter (dc,H5Z_FILTER_FLETCHER32,0,0,NULL);
- VRFY(hrc>=0,"H5Pset_filter");
+ hrc = H5Pset_filter(dc, H5Z_FILTER_FLETCHER32, 0, 0, NULL);
+ VRFY(hrc >= 0, "H5Pset_filter");
- filter_read_internal(filename,dc,&fletcher32_size);
- VRFY(fletcher32_size > null_size,"Size after checksumming is incorrect.");
+ filter_read_internal(filename, dc, &fletcher32_size);
+ VRFY(fletcher32_size > null_size, "Size after checksumming is incorrect.");
/* Clean up objects used for this test */
- hrc = H5Pclose (dc);
- VRFY(hrc>=0, "H5Pclose");
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
#endif /* H5_HAVE_FILTER_FLETCHER32 */
/*----------------------------------------------------------
- * STEP 2: Test deflation by itself.
- *----------------------------------------------------------
- */
+ * STEP 2: Test deflation by itself.
+ *----------------------------------------------------------
+ */
#ifdef H5_HAVE_FILTER_DEFLATE
dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc>=0, "H5Pcreate");
+ VRFY(dc >= 0, "H5Pcreate");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
- VRFY(hrc>=0, "H5Pset_chunk");
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
- hrc = H5Pset_chunk_opts (dc, chunk_opts);
- VRFY(hrc>=0,"H5Pset_chunk_opts");
+ hrc = H5Pset_chunk_opts(dc, chunk_opts);
+ VRFY(hrc >= 0, "H5Pset_chunk_opts");
- hrc = H5Pset_deflate (dc, 6);
- VRFY(hrc>=0, "H5Pset_deflate");
+ hrc = H5Pset_deflate(dc, 6);
+ VRFY(hrc >= 0, "H5Pset_deflate");
- filter_read_internal(filename,dc,&deflate_size);
+ filter_read_internal(filename, dc, &deflate_size);
/* Clean up objects used for this test */
- hrc = H5Pclose (dc);
- VRFY(hrc>=0, "H5Pclose");
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
#endif /* H5_HAVE_FILTER_DEFLATE */
/*----------------------------------------------------------
- * STEP 3: Test szip compression by itself.
- *----------------------------------------------------------
- */
+ * STEP 3: Test szip compression by itself.
+ *----------------------------------------------------------
+ */
#ifdef H5_HAVE_FILTER_SZIP
- if(h5_szip_can_encode() == 1) {
+ if (h5_szip_can_encode() == 1) {
dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc>=0, "H5Pcreate");
+ VRFY(dc >= 0, "H5Pcreate");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
- VRFY(hrc>=0, "H5Pset_chunk");
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
- hrc = H5Pset_chunk_opts (dc, chunk_opts);
- VRFY(hrc>=0,"H5Pset_chunk_opts");
+ hrc = H5Pset_chunk_opts(dc, chunk_opts);
+ VRFY(hrc >= 0, "H5Pset_chunk_opts");
hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
- VRFY(hrc>=0, "H5Pset_szip");
+ VRFY(hrc >= 0, "H5Pset_szip");
- filter_read_internal(filename,dc,&szip_size);
+ filter_read_internal(filename, dc, &szip_size);
/* Clean up objects used for this test */
- hrc = H5Pclose (dc);
- VRFY(hrc>=0, "H5Pclose");
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
}
#endif /* H5_HAVE_FILTER_SZIP */
- } /* end for */
-
+ } /* end for */
/*----------------------------------------------------------
* STEP 4: Test shuffling by itself.
@@ -362,21 +354,20 @@ test_filter_read(void)
*/
dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc>=0, "H5Pcreate");
+ VRFY(dc >= 0, "H5Pcreate");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
- VRFY(hrc>=0, "H5Pset_chunk");
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
- hrc = H5Pset_shuffle (dc);
- VRFY(hrc>=0, "H5Pset_shuffle");
+ hrc = H5Pset_shuffle(dc);
+ VRFY(hrc >= 0, "H5Pset_shuffle");
- filter_read_internal(filename,dc,&shuffle_size);
- VRFY(shuffle_size==null_size,"Shuffled size not the same as uncompressed size.");
+ filter_read_internal(filename, dc, &shuffle_size);
+ VRFY(shuffle_size == null_size, "Shuffled size not the same as uncompressed size.");
/* Clean up objects used for this test */
- hrc = H5Pclose (dc);
- VRFY(hrc>=0, "H5Pclose");
-
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
/*----------------------------------------------------------
* STEP 5: Test shuffle + deflate + checksum in any order.
@@ -385,47 +376,47 @@ test_filter_read(void)
#ifdef H5_HAVE_FILTER_DEFLATE
/* Testing shuffle+deflate+checksum filters (checksum first) */
dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc>=0, "H5Pcreate");
+ VRFY(dc >= 0, "H5Pcreate");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
- VRFY(hrc>=0, "H5Pset_chunk");
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
- hrc = H5Pset_fletcher32 (dc);
- VRFY(hrc>=0, "H5Pset_fletcher32");
+ hrc = H5Pset_fletcher32(dc);
+ VRFY(hrc >= 0, "H5Pset_fletcher32");
- hrc = H5Pset_shuffle (dc);
- VRFY(hrc>=0, "H5Pset_shuffle");
+ hrc = H5Pset_shuffle(dc);
+ VRFY(hrc >= 0, "H5Pset_shuffle");
- hrc = H5Pset_deflate (dc, 6);
- VRFY(hrc>=0, "H5Pset_deflate");
+ hrc = H5Pset_deflate(dc, 6);
+ VRFY(hrc >= 0, "H5Pset_deflate");
- filter_read_internal(filename,dc,&combo_size);
+ filter_read_internal(filename, dc, &combo_size);
/* Clean up objects used for this test */
- hrc = H5Pclose (dc);
- VRFY(hrc>=0, "H5Pclose");
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
/* Testing shuffle+deflate+checksum filters (checksum last) */
dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc>=0, "H5Pcreate");
+ VRFY(dc >= 0, "H5Pcreate");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
- VRFY(hrc>=0, "H5Pset_chunk");
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
- hrc = H5Pset_shuffle (dc);
- VRFY(hrc>=0, "H5Pset_shuffle");
+ hrc = H5Pset_shuffle(dc);
+ VRFY(hrc >= 0, "H5Pset_shuffle");
- hrc = H5Pset_deflate (dc, 6);
- VRFY(hrc>=0, "H5Pset_deflate");
+ hrc = H5Pset_deflate(dc, 6);
+ VRFY(hrc >= 0, "H5Pset_deflate");
- hrc = H5Pset_fletcher32 (dc);
- VRFY(hrc>=0, "H5Pset_fletcher32");
+ hrc = H5Pset_fletcher32(dc);
+ VRFY(hrc >= 0, "H5Pset_fletcher32");
- filter_read_internal(filename,dc,&combo_size);
+ filter_read_internal(filename, dc, &combo_size);
/* Clean up objects used for this test */
- hrc = H5Pclose (dc);
- VRFY(hrc>=0, "H5Pclose");
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
#endif /* H5_HAVE_FILTER_DEFLATE */
@@ -437,54 +428,53 @@ test_filter_read(void)
/* Testing shuffle+szip(with encoder)+checksum filters(checksum first) */
dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc>=0, "H5Pcreate");
+ VRFY(dc >= 0, "H5Pcreate");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
- VRFY(hrc>=0, "H5Pset_chunk");
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
- hrc = H5Pset_fletcher32 (dc);
- VRFY(hrc>=0, "H5Pset_fletcher32");
+ hrc = H5Pset_fletcher32(dc);
+ VRFY(hrc >= 0, "H5Pset_fletcher32");
- hrc = H5Pset_shuffle (dc);
- VRFY(hrc>=0, "H5Pset_shuffle");
+ hrc = H5Pset_shuffle(dc);
+ VRFY(hrc >= 0, "H5Pset_shuffle");
/* Make sure encoding is enabled */
- if(h5_szip_can_encode() == 1) {
- hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
- VRFY(hrc>=0, "H5Pset_szip");
+ if (h5_szip_can_encode() == 1) {
+ hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
+ VRFY(hrc >= 0, "H5Pset_szip");
- filter_read_internal(filename,dc,&combo_size);
+ filter_read_internal(filename, dc, &combo_size);
}
/* Clean up objects used for this test */
- hrc = H5Pclose (dc);
- VRFY(hrc>=0, "H5Pclose");
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
/* Testing shuffle+szip(with encoder)+checksum filters(checksum last) */
/* Make sure encoding is enabled */
- if(h5_szip_can_encode() == 1) {
- dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc>=0, "H5Pcreate");
+ if (h5_szip_can_encode() == 1) {
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
- VRFY(hrc>=0, "H5Pset_chunk");
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
- hrc = H5Pset_shuffle (dc);
- VRFY(hrc>=0, "H5Pset_shuffle");
+ hrc = H5Pset_shuffle(dc);
+ VRFY(hrc >= 0, "H5Pset_shuffle");
- hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
- VRFY(hrc>=0, "H5Pset_szip");
+ hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
+ VRFY(hrc >= 0, "H5Pset_szip");
- hrc = H5Pset_fletcher32 (dc);
- VRFY(hrc>=0, "H5Pset_fletcher32");
+ hrc = H5Pset_fletcher32(dc);
+ VRFY(hrc >= 0, "H5Pset_fletcher32");
- filter_read_internal(filename,dc,&combo_size);
+ filter_read_internal(filename, dc, &combo_size);
- /* Clean up objects used for this test */
- hrc = H5Pclose (dc);
- VRFY(hrc>=0, "H5Pclose");
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
}
#endif /* H5_HAVE_FILTER_SZIP */
}
-
diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c
index 13f408d..5153bce 100644
--- a/testpar/t_filters_parallel.c
+++ b/testpar/t_filters_parallel.c
@@ -23,16 +23,13 @@
#include "t_filters_parallel.h"
-const char *FILENAME[] = {
- "t_filters_parallel",
- NULL
-};
-char filenames[1][256];
+const char *FILENAME[] = {"t_filters_parallel", NULL};
+char filenames[1][256];
int nerrors = 0;
size_t cur_filter_idx = 0;
-#define GZIP_INDEX 0
+#define GZIP_INDEX 0
#define FLETCHER32_INDEX 1
#define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0])
@@ -40,19 +37,19 @@ size_t cur_filter_idx = 0;
/*
* Used to check if a filter is available before running a test.
*/
-#define CHECK_CUR_FILTER_AVAIL() \
-{ \
- htri_t filter_is_avail; \
- \
- if (cur_filter_idx == GZIP_INDEX) { \
- if ((filter_is_avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE)) != TRUE) { \
- if (MAINPROCESS) { \
- HDputs(" - SKIPPED - Deflate filter not available"); \
- } \
- return; \
- } \
- } \
-}
+#define CHECK_CUR_FILTER_AVAIL() \
+ { \
+ htri_t filter_is_avail; \
+ \
+ if (cur_filter_idx == GZIP_INDEX) { \
+ if ((filter_is_avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE)) != TRUE) { \
+ if (MAINPROCESS) { \
+ HDputs(" - SKIPPED - Deflate filter not available"); \
+ } \
+ return; \
+ } \
+ } \
+ }
static herr_t set_dcpl_filter(hid_t dcpl);
@@ -183,8 +180,8 @@ set_dcpl_filter(hid_t dcpl)
static void
test_write_one_chunk_filtered_dataset(void)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
hsize_t dataset_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
hsize_t chunk_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
@@ -197,7 +194,8 @@ test_write_one_chunk_filtered_dataset(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) HDputs("Testing write to one-chunk filtered dataset");
+ if (MAINPROCESS)
+ HDputs("Testing write to one-chunk filtered dataset");
CHECK_CUR_FILTER_AVAIL();
@@ -205,10 +203,9 @@ test_write_one_chunk_filtered_dataset(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -216,12 +213,12 @@ test_write_one_chunk_filtered_dataset(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS;
- dataset_dims[1] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS;
- chunk_dims[0] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
- chunk_dims[1] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
- sel_dims[0] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t) mpi_size;
- sel_dims[1] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS;
+ dataset_dims[0] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS;
+ chunk_dims[0] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
+ sel_dims[0] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t)mpi_size;
+ sel_dims[1] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS;
filespace = H5Screate_simple(WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -233,14 +230,13 @@ test_write_one_chunk_filtered_dataset(void)
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
dset_id = H5Dcreate2(file_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -249,18 +245,20 @@ test_write_one_chunk_filtered_dataset(void)
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
- count[0] = 1;
- count[1] = 1;
- stride[0] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
- stride[1] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
- block[0] = sel_dims[0];
- block[1] = sel_dims[1];
- start[0] = ((hsize_t) mpi_rank * sel_dims[0]);
- start[1] = 0;
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
+ block[0] = sel_dims[0];
+ block[1] = sel_dims[1];
+ start[0] = ((hsize_t)mpi_rank * sel_dims[0]);
+ start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -269,55 +267,58 @@ test_write_one_chunk_filtered_dataset(void)
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Fill data buffer */
- data_size = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS
- * (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS * sizeof(*data);
+ data_size = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS *
+ (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- data = (C_DATATYPE *) HDcalloc(1, data_size);
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE) GEN_DATA(i);
+ data[i] = (C_DATATYPE)GEN_DATA(i);
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = ((C_DATATYPE) i % (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS))
- + ((C_DATATYPE) i / (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS));
+ correct_buf[i] = ((C_DATATYPE)i % (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size *
+ WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) +
+ ((C_DATATYPE)i / (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size *
+ WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS));
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
- if (data) HDfree(data);
+ if (data)
+ HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
dset_id = H5Dopen2(file_id, "/" WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (correct_buf) HDfree(correct_buf);
- if (read_buf) HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -340,8 +341,8 @@ test_write_one_chunk_filtered_dataset(void)
static void
test_write_filtered_dataset_no_overlap(void)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
@@ -354,7 +355,8 @@ test_write_filtered_dataset_no_overlap(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks");
+ if (MAINPROCESS)
+ HDputs("Testing write to unshared filtered chunks");
CHECK_CUR_FILTER_AVAIL();
@@ -362,11 +364,10 @@ test_write_filtered_dataset_no_overlap(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -374,12 +375,12 @@ test_write_filtered_dataset_no_overlap(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_NROWS;
- dataset_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS;
- chunk_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
- sel_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
- sel_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS;
+ dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS;
filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -392,13 +393,13 @@ test_write_filtered_dataset_no_overlap(void)
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -408,17 +409,20 @@ test_write_filtered_dataset_no_overlap(void)
* it to the hyperslab in the file
*/
count[0] = 1;
- count[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
- stride[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
- stride[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
- block[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
- block[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
- start[0] = ((hsize_t) mpi_rank * (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]);
- start[1] = 0;
+ count[1] =
+ (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS / (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]);
+ start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -427,57 +431,55 @@ test_write_filtered_dataset_no_overlap(void)
VRFY((dset_id >= 0), "File dataspace retrieval succeeded");
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- data = (C_DATATYPE *) HDcalloc(1, data_size);
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE) GEN_DATA(i);
+ data[i] = (C_DATATYPE)GEN_DATA(i);
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] =
- (C_DATATYPE) (
- (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
- + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
- );
+ correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
+ (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
- if (data) HDfree(data);
+ if (data)
+ HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (correct_buf) HDfree(correct_buf);
- if (read_buf) HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -501,8 +503,8 @@ test_write_filtered_dataset_no_overlap(void)
static void
test_write_filtered_dataset_overlap(void)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
hsize_t dataset_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
@@ -515,7 +517,8 @@ test_write_filtered_dataset_overlap(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) HDputs("Testing write to shared filtered chunks");
+ if (MAINPROCESS)
+ HDputs("Testing write to shared filtered chunks");
CHECK_CUR_FILTER_AVAIL();
@@ -523,11 +526,10 @@ test_write_filtered_dataset_overlap(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -535,12 +537,12 @@ test_write_filtered_dataset_overlap(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_NROWS;
- dataset_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_NCOLS;
- chunk_dims[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS;
- sel_dims[0] = (hsize_t) DIM0_SCALE_FACTOR;
- sel_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS * (hsize_t) DIM1_SCALE_FACTOR;
+ dataset_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR;
filespace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -553,13 +555,13 @@ test_write_filtered_dataset_overlap(void)
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
dset_id = H5Dcreate2(file_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -568,18 +570,20 @@ test_write_filtered_dataset_overlap(void)
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
- count[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_NROWS / (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS;
- count[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS;
- stride[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS;
- stride[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS;
- block[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS / (hsize_t) mpi_size;
- block[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS;
- start[0] = (hsize_t) mpi_rank * block[0];
- start[1] = 0;
+ count[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_NROWS / (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ count[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_NCOLS / (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS / (hsize_t)mpi_size;
+ block[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -588,57 +592,56 @@ test_write_filtered_dataset_overlap(void)
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- data = (C_DATATYPE *) HDcalloc(1, data_size);
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE) GEN_DATA(i);
+ data[i] = (C_DATATYPE)GEN_DATA(i);
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE) (
- (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
- + (i % dataset_dims[1])
- + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
- );
+ correct_buf[i] = (C_DATATYPE)(
+ (dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
+ (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
- if (data) HDfree(data);
+ if (data)
+ HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify correct data was written */
- read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
dset_id = H5Dopen2(file_id, "/" WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (correct_buf) HDfree(correct_buf);
- if (read_buf) HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -664,8 +667,8 @@ test_write_filtered_dataset_overlap(void)
static void
test_write_filtered_dataset_single_no_selection(void)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
hsize_t dataset_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
@@ -679,7 +682,8 @@ test_write_filtered_dataset_single_no_selection(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) HDputs("Testing write to filtered chunks with a single process having no selection");
+ if (MAINPROCESS)
+ HDputs("Testing write to filtered chunks with a single process having no selection");
CHECK_CUR_FILTER_AVAIL();
@@ -687,11 +691,10 @@ test_write_filtered_dataset_single_no_selection(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -699,12 +702,12 @@ test_write_filtered_dataset_single_no_selection(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS;
- dataset_dims[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
- chunk_dims[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- sel_dims[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- sel_dims[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+ dataset_dims[0] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
if (mpi_rank == WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
sel_dims[0] = sel_dims[1] = 0;
@@ -720,13 +723,13 @@ test_write_filtered_dataset_single_no_selection(void)
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -736,17 +739,20 @@ test_write_filtered_dataset_single_no_selection(void)
* it to the hyperslab in the file
*/
count[0] = 1;
- count[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- stride[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- stride[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- block[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- block[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- start[0] = (hsize_t) mpi_rank * (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0];
- start[1] = 0;
+ count[1] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS /
+ (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank * (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0];
+ start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -758,62 +764,61 @@ test_write_filtered_dataset_single_no_selection(void)
VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded");
else
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- data = (C_DATATYPE *) HDcalloc(1, data_size);
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE) GEN_DATA(i);
+ data[i] = (C_DATATYPE)GEN_DATA(i);
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] =
- (C_DATATYPE) (
- (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
- + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
- );
+ correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
+ (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
/* Compute the correct offset into the buffer for the process having no selection and clear it */
- segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t) mpi_size;
- HDmemset(correct_buf + ((size_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length),
- 0, segment_length * sizeof(*data));
+ segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t)mpi_size;
+ HDmemset(correct_buf +
+ ((size_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length),
+ 0, segment_length * sizeof(*data));
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
- if (data) HDfree(data);
+ if (data)
+ HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
dset_id = H5Dopen2(file_id, "/" WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (correct_buf) HDfree(correct_buf);
- if (read_buf) HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -840,8 +845,8 @@ test_write_filtered_dataset_single_no_selection(void)
static void
test_write_filtered_dataset_all_no_selection(void)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
hsize_t dataset_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
@@ -850,7 +855,8 @@ test_write_filtered_dataset_all_no_selection(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) HDputs("Testing write to filtered chunks with all processes having no selection");
+ if (MAINPROCESS)
+ HDputs("Testing write to filtered chunks with all processes having no selection");
CHECK_CUR_FILTER_AVAIL();
@@ -858,11 +864,10 @@ test_write_filtered_dataset_all_no_selection(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -870,10 +875,10 @@ test_write_filtered_dataset_all_no_selection(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS;
- dataset_dims[1] = (hsize_t) WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
- chunk_dims[0] = (hsize_t) WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t) WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ dataset_dims[0] = (hsize_t)WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t)WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
sel_dims[0] = sel_dims[1] = 0;
filespace = H5Screate_simple(WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
@@ -887,13 +892,13 @@ test_write_filtered_dataset_all_no_selection(void)
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -905,47 +910,48 @@ test_write_filtered_dataset_all_no_selection(void)
VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded");
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- data = (C_DATATYPE *) HDcalloc(1, data_size);
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE) GEN_DATA(i);
+ data[i] = (C_DATATYPE)GEN_DATA(i);
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
- if (data) HDfree(data);
+ if (data)
+ HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
dset_id = H5Dopen2(file_id, "/" WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (correct_buf) HDfree(correct_buf);
- if (read_buf) HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -966,10 +972,10 @@ test_write_filtered_dataset_all_no_selection(void)
static void
test_write_filtered_dataset_point_selection(void)
{
- C_DATATYPE *data = NULL;
+ C_DATATYPE *data = NULL;
C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *read_buf = NULL;
- hsize_t *coords = NULL;
+ C_DATATYPE *read_buf = NULL;
+ hsize_t * coords = NULL;
hsize_t dataset_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
@@ -978,7 +984,8 @@ test_write_filtered_dataset_point_selection(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) HDputs("Testing write to filtered chunks with point selection");
+ if (MAINPROCESS)
+ HDputs("Testing write to filtered chunks with point selection");
CHECK_CUR_FILTER_AVAIL();
@@ -986,11 +993,10 @@ test_write_filtered_dataset_point_selection(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -998,14 +1004,14 @@ test_write_filtered_dataset_point_selection(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS;
- dataset_dims[1] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
- chunk_dims[0] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- sel_dims[0] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t) mpi_size;
- sel_dims[1] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
-
- filespace = H5Screate_simple(WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims,NULL);
+ dataset_dims[0] = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t)mpi_size;
+ sel_dims[1] = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
+
+ filespace = H5Screate_simple(WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
memspace = H5Screate_simple(WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
@@ -1016,13 +1022,13 @@ test_write_filtered_dataset_point_selection(void)
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -1032,69 +1038,71 @@ test_write_filtered_dataset_point_selection(void)
filespace = H5Dget_space(dset_id);
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
- num_points = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS * (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) mpi_size;
- coords = (hsize_t *) HDcalloc(1, 2 * num_points * sizeof(*coords));
+ num_points = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS *
+ (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t)mpi_size;
+ coords = (hsize_t *)HDcalloc(1, 2 * num_points * sizeof(*coords));
VRFY((NULL != coords), "Coords HDcalloc succeeded");
for (i = 0; i < num_points; i++)
for (j = 0; j < WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++)
coords[(i * WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS) + j] =
- (j > 0) ? (i % (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)
- : ((hsize_t) mpi_rank + ((hsize_t) mpi_size * (i / (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)));
+ (j > 0) ? (i % (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)
+ : ((hsize_t)mpi_rank +
+ ((hsize_t)mpi_size * (i / (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)));
- VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t ) num_points, (const hsize_t * ) coords) >= 0),
- "Point selection succeeded");
+ VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t)num_points, (const hsize_t *)coords) >= 0),
+ "Point selection succeeded");
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- data = (C_DATATYPE *) HDcalloc(1, data_size);
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE) GEN_DATA(i);
+ data[i] = (C_DATATYPE)GEN_DATA(i);
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE) (
- (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
- + (i % dataset_dims[1])
- + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
- );
+ correct_buf[i] = (C_DATATYPE)(
+ (dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
+ (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
- if (data) HDfree(data);
+ if (data)
+ HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
dset_id = H5Dopen2(file_id, "/" WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (coords) HDfree(coords);
- if (correct_buf) HDfree(correct_buf);
- if (read_buf) HDfree(read_buf);
+ if (coords)
+ HDfree(coords);
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -1119,8 +1127,8 @@ test_write_filtered_dataset_point_selection(void)
static void
test_write_filtered_dataset_interleaved_write(void)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
hsize_t dataset_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
hsize_t chunk_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
@@ -1133,7 +1141,8 @@ test_write_filtered_dataset_interleaved_write(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) HDputs("Testing interleaved write to filtered chunks");
+ if (MAINPROCESS)
+ HDputs("Testing interleaved write to filtered chunks");
CHECK_CUR_FILTER_AVAIL();
@@ -1141,11 +1150,10 @@ test_write_filtered_dataset_interleaved_write(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1153,12 +1161,12 @@ test_write_filtered_dataset_interleaved_write(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NROWS;
- dataset_dims[1] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS;
- chunk_dims[0] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS;
- chunk_dims[1] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS;
- sel_dims[0] = (hsize_t) (INTERLEAVED_WRITE_FILTERED_DATASET_NROWS / mpi_size);
- sel_dims[1] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS;
+ dataset_dims[0] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NROWS;
+ dataset_dims[1] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS;
+ chunk_dims[0] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS;
+ chunk_dims[1] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS;
+ sel_dims[0] = (hsize_t)(INTERLEAVED_WRITE_FILTERED_DATASET_NROWS / mpi_size);
+ sel_dims[1] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS;
filespace = H5Screate_simple(INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -1171,13 +1179,13 @@ test_write_filtered_dataset_interleaved_write(void)
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
dset_id = H5Dcreate2(file_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -1186,18 +1194,22 @@ test_write_filtered_dataset_interleaved_write(void)
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
- count[0] = (hsize_t) (INTERLEAVED_WRITE_FILTERED_DATASET_NROWS / INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS);
- count[1] = (hsize_t) (INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS / INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS);
- stride[0] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS;
- stride[1] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS;
- block[0] = 1;
- block[1] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS;
- start[0] = (hsize_t) mpi_rank;
- start[1] = 0;
+ count[0] =
+ (hsize_t)(INTERLEAVED_WRITE_FILTERED_DATASET_NROWS / INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS);
+ count[1] =
+ (hsize_t)(INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS / INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS);
+ stride[0] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS;
+ stride[1] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS;
+ block[0] = 1;
+ block[1] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -1206,63 +1218,65 @@ test_write_filtered_dataset_interleaved_write(void)
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- data = (C_DATATYPE *) HDcalloc(1, data_size);
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE) GEN_DATA(i);
+ data[i] = (C_DATATYPE)GEN_DATA(i);
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
/* Add Column Index */
correct_buf[i] =
- (C_DATATYPE) (
- (i % (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)
+ (C_DATATYPE)((i % (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)
- /* Add the Row Index */
- + ((i % (hsize_t) (mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)) / (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)
+ /* Add the Row Index */
+ + ((i % (hsize_t)(mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)) /
+ (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)
- /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */
- + ((hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS * (i / (hsize_t) (mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)))
- );
+ /* Add the amount that gets added when a rank moves down to its next section
+ vertically in the dataset */
+ + ((hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS *
+ (i / (hsize_t)(mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS))));
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
- if (data) HDfree(data);
+ if (data)
+ HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
dset_id = H5Dopen2(file_id, "/" INTERLEAVED_WRITE_FILTERED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (correct_buf) HDfree(correct_buf);
- if (read_buf) HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -1284,8 +1298,8 @@ test_write_filtered_dataset_interleaved_write(void)
static void
test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
@@ -1298,7 +1312,8 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks on separate pages in 3D dataset");
+ if (MAINPROCESS)
+ HDputs("Testing write to unshared filtered chunks on separate pages in 3D dataset");
CHECK_CUR_FILTER_AVAIL();
@@ -1306,11 +1321,10 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1318,20 +1332,20 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
- dataset_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
- dataset_dims[2] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH;
- chunk_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
- chunk_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
- chunk_dims[2] = 1;
- sel_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
- sel_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
- sel_dims[2] = 1;
-
- filespace = H5Screate_simple( WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL);
+ dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
+ dataset_dims[2] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH;
+ chunk_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ chunk_dims[2] = 1;
+ sel_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
+ sel_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
+ sel_dims[2] = 1;
+
+ filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple( WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, sel_dims, NULL);
+ memspace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
@@ -1339,13 +1353,13 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -1354,22 +1368,28 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
- count[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
- count[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
- count[2] = 1;
- stride[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
- stride[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ count[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS /
+ (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ count[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS /
+ (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ count[2] = 1;
+ stride[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
stride[2] = 1;
- block[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
- block[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
- block[2] = 1;
- start[0] = 0;
- start[1] = 0;
- start[2] = (hsize_t) mpi_rank;
+ block[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ block[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ block[2] = 1;
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = (hsize_t)mpi_rank;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]);
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE
+ ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ]\n",
+ mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1],
+ start[2], block[0], block[1], block[2]);
HDfflush(stdout);
}
@@ -1378,53 +1398,54 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
+ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
- data = (C_DATATYPE *) HDcalloc(1, data_size);
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE) GEN_DATA(i);
+ data[i] = (C_DATATYPE)GEN_DATA(i);
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE) ((i % (hsize_t) mpi_size) + (i / (hsize_t) mpi_size));
+ correct_buf[i] = (C_DATATYPE)((i % (hsize_t)mpi_size) + (i / (hsize_t)mpi_size));
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
- if (data) HDfree(data);
+ if (data)
+ HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (correct_buf) HDfree(correct_buf);
- if (read_buf) HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -1447,8 +1468,8 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
static void
test_write_3d_filtered_dataset_no_overlap_same_pages(void)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
@@ -1461,7 +1482,8 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
hid_t file_id, dset_id, plist_id;
hid_t filespace, memspace;
- if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks on the same pages in 3D dataset");
+ if (MAINPROCESS)
+ HDputs("Testing write to unshared filtered chunks on the same pages in 3D dataset");
CHECK_CUR_FILTER_AVAIL();
@@ -1469,11 +1491,10 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1481,17 +1502,18 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS;
- dataset_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
- dataset_dims[2] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
- chunk_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
- chunk_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
- chunk_dims[2] = 1;
- sel_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
- sel_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
- sel_dims[2] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
-
- filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL);
+ dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
+ dataset_dims[2] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
+ chunk_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ chunk_dims[2] = 1;
+ sel_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ sel_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
+ sel_dims[2] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
+
+ filespace =
+ H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
memspace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, sel_dims, NULL);
@@ -1502,13 +1524,13 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME,
+ HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -1518,21 +1540,26 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
* it to the hyperslab in the file
*/
count[0] = 1;
- count[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
- count[2] = (hsize_t) mpi_size;
- stride[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
- stride[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ count[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS /
+ (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ count[2] = (hsize_t)mpi_size;
+ stride[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
stride[2] = 1;
- block[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
- block[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
- block[2] = 1;
- start[0] = ((hsize_t) mpi_rank * (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS * count[0]);
+ block[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ block[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ block[2] = 1;
+ start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS * count[0]);
start[1] = 0;
start[2] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]);
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE
+ ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ]\n",
+ mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1],
+ start[2], block[0], block[1], block[2]);
HDfflush(stdout);
}
@@ -1541,56 +1568,55 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
+ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
- data = (C_DATATYPE *) HDcalloc(1, data_size);
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE) GEN_DATA(i);
+ data[i] = (C_DATATYPE)GEN_DATA(i);
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE) (
- (i % (dataset_dims[0] * dataset_dims[1]))
- + (i / (dataset_dims[0] * dataset_dims[1]))
- );
+ correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] * dataset_dims[1])) +
+ (i / (dataset_dims[0] * dataset_dims[1])));
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
- if (data) HDfree(data);
+ if (data)
+ HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (correct_buf) HDfree(correct_buf);
- if (read_buf) HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -1613,8 +1639,8 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
static void
test_write_3d_filtered_dataset_overlap(void)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
hsize_t dataset_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
hsize_t chunk_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
@@ -1627,7 +1653,8 @@ test_write_3d_filtered_dataset_overlap(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) HDputs("Testing write to shared filtered chunks in 3D dataset");
+ if (MAINPROCESS)
+ HDputs("Testing write to shared filtered chunks in 3D dataset");
CHECK_CUR_FILTER_AVAIL();
@@ -1635,11 +1662,10 @@ test_write_3d_filtered_dataset_overlap(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1647,15 +1673,15 @@ test_write_3d_filtered_dataset_overlap(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS;
- dataset_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS;
- dataset_dims[2] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH;
- chunk_dims[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
- chunk_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
- chunk_dims[2] = 1;
- sel_dims[0] = (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size);
- sel_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS;
- sel_dims[2] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH;
+ dataset_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS;
+ dataset_dims[2] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH;
+ chunk_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ chunk_dims[2] = 1;
+ sel_dims[0] = (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size);
+ sel_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS;
+ sel_dims[2] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH;
filespace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -1668,13 +1694,13 @@ test_write_3d_filtered_dataset_overlap(void)
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
dset_id = H5Dcreate2(file_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -1683,22 +1709,26 @@ test_write_3d_filtered_dataset_overlap(void)
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
- count[0] = (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS / WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS);
- count[1] = (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS / WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS);
- count[2] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH;
- stride[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
- stride[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ count[0] = (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS / WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS);
+ count[1] = (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS / WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS);
+ count[2] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH;
+ stride[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
stride[2] = 1;
- block[0] = 1;
- block[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
- block[2] = 1;
- start[0] = (hsize_t) mpi_rank;
- start[1] = 0;
- start[2] = 0;
+ block[0] = 1;
+ block[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ block[2] = 1;
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
+ start[2] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]);
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE
+ ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ]\n",
+ mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1],
+ start[2], block[0], block[1], block[2]);
HDfflush(stdout);
}
@@ -1706,66 +1736,68 @@ test_write_3d_filtered_dataset_overlap(void)
filespace = H5Dget_space(dset_id);
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride,
- count, block) >= 0), "Hyperslab selection succeeded");
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
+ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
- data = (C_DATATYPE *) HDcalloc(1, data_size);
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
VRFY((NULL != data), "HDcalloc succeeded");
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE) GEN_DATA(i);
+ data[i] = (C_DATATYPE)GEN_DATA(i);
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
/* Add the Column Index */
- correct_buf[i] =
- (C_DATATYPE) (
- (i % (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+ correct_buf[i] = (C_DATATYPE)(
+ (i % (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))
- /* Add the Row Index */
- + ((i % (hsize_t) (mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))
- / (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+ /* Add the Row Index */
+ + ((i % (hsize_t)(mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH *
+ WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) /
+ (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))
- /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */
- + ((hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)
- * (i / (hsize_t) (mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)))
- );
+ /* Add the amount that gets added when a rank moves down to its next section vertically in the
+ dataset */
+ + ((hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS) *
+ (i / (hsize_t)(mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH *
+ WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))));
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
- if (data) HDfree(data);
+ if (data)
+ HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
dset_id = H5Dopen2(file_id, "/" WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (correct_buf) HDfree(correct_buf);
- if (read_buf) HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -1787,8 +1819,8 @@ test_write_3d_filtered_dataset_overlap(void)
static void
test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
{
- COMPOUND_C_DATATYPE *data = NULL;
- COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *data = NULL;
+ COMPOUND_C_DATATYPE *read_buf = NULL;
COMPOUND_C_DATATYPE *correct_buf = NULL;
hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
@@ -1801,7 +1833,9 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset without Datatype conversion");
+ if (MAINPROCESS)
+ HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset without Datatype "
+ "conversion");
CHECK_CUR_FILTER_AVAIL();
@@ -1809,11 +1843,10 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1822,24 +1855,27 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
/* Create the dataspace for the dataset */
dataset_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS;
- dataset_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS;
- chunk_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
- chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
- sel_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
- sel_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
-
- filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL);
+ dataset_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS;
+ chunk_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
+ sel_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ sel_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+
+ filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS,
+ dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, sel_dims, NULL);
+ memspace =
+ H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS,
+ chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
@@ -1849,14 +1885,14 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((memtype >= 0), "Datatype creation succeeded");
VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
- dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -1865,18 +1901,20 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
- count[0] = 1;
- count[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+ count[0] = 1;
+ count[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
stride[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
- block[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
- block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
- start[0] = 0;
- start[1] = ((hsize_t) mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
+ block[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
+ start[0] = 0;
+ start[1] = ((hsize_t)mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -1885,69 +1923,61 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
- data = (COMPOUND_C_DATATYPE *) HDcalloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data));
+ data = (COMPOUND_C_DATATYPE *)HDcalloc(
+ 1, (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data));
VRFY((NULL != data), "HDcalloc succeeded");
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
- correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
/* Fill data buffer */
- for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) {
- data[i].field1 = (short) GEN_DATA(i);
- data[i].field2 = (int) GEN_DATA(i);
- data[i].field3 = (long) GEN_DATA(i);
+ for (i = 0; i < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) {
+ data[i].field1 = (short)GEN_DATA(i);
+ data[i].field2 = (int)GEN_DATA(i);
+ data[i].field3 = (long)GEN_DATA(i);
}
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
- correct_buf[i].field1 = (short) (
- (i % dataset_dims[1])
- + (i / dataset_dims[1])
- );
-
- correct_buf[i].field2 = (int) (
- (i % dataset_dims[1])
- + (i / dataset_dims[1])
- );
-
- correct_buf[i].field3 = (long) (
- (i % dataset_dims[1])
- + (i / dataset_dims[1])
- );
+ correct_buf[i].field1 = (short)((i % dataset_dims[1]) + (i / dataset_dims[1]));
+
+ correct_buf[i].field2 = (int)((i % dataset_dims[1]) + (i / dataset_dims[1]));
+
+ correct_buf[i].field3 = (long)((i % dataset_dims[1]) + (i / dataset_dims[1]));
}
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0),
- "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
- if (data) HDfree(data);
+ if (data)
+ HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME,
+ H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
- VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (correct_buf) HDfree(correct_buf);
- if (read_buf) HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -1970,8 +2000,8 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
static void
test_write_cmpd_filtered_dataset_no_conversion_shared(void)
{
- COMPOUND_C_DATATYPE *data = NULL;
- COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *data = NULL;
+ COMPOUND_C_DATATYPE *read_buf = NULL;
COMPOUND_C_DATATYPE *correct_buf = NULL;
hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
@@ -1984,7 +2014,9 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
hid_t file_id, dset_id, plist_id, memtype;
hid_t filespace, memspace;
- if (MAINPROCESS) HDputs("Testing write to shared filtered chunks in Compound Datatype dataset without Datatype conversion");
+ if (MAINPROCESS)
+ HDputs("Testing write to shared filtered chunks in Compound Datatype dataset without Datatype "
+ "conversion");
CHECK_CUR_FILTER_AVAIL();
@@ -1992,11 +2024,10 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2004,25 +2035,28 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS;
- dataset_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS;
- chunk_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
- chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
- sel_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
- sel_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
-
- filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL);
+ dataset_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS;
+ chunk_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
+ chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+ sel_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size;
+ sel_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
+
+ filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS,
+ dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL);
+ memspace =
+ H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS,
+ chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
@@ -2032,14 +2066,14 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((memtype >= 0), "Datatype creation succeeded");
VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
- dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -2048,18 +2082,20 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
- count[0] = 1;
- count[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
- stride[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
+ count[0] = 1;
+ count[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
+ stride[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
- block[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
- block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
- start[0] = (hsize_t) mpi_rank;
- start[1] = 0;
+ block[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size;
+ block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -2068,72 +2104,67 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
- data = (COMPOUND_C_DATATYPE *) HDcalloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data));
+ data = (COMPOUND_C_DATATYPE *)HDcalloc(
+ 1, (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data));
VRFY((NULL != data), "HDcalloc succeeded");
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
- correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
/* Fill data buffer */
- for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) {
- data[i].field1 = (short) GEN_DATA(i);
- data[i].field2 = (int) GEN_DATA(i);
- data[i].field3 = (long) GEN_DATA(i);
+ for (i = 0; i < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) {
+ data[i].field1 = (short)GEN_DATA(i);
+ data[i].field2 = (int)GEN_DATA(i);
+ data[i].field3 = (long)GEN_DATA(i);
}
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
- correct_buf[i].field1 = (short) (
- (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
- + (i % dataset_dims[1])
- + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
- );
-
- correct_buf[i].field2 = (int) (
- (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
- + (i % dataset_dims[1])
- + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
- );
-
- correct_buf[i].field3 = (long) (
- (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
- + (i % dataset_dims[1])
- + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
- );
+ correct_buf[i].field1 =
+ (short)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
+ (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+
+ correct_buf[i].field2 =
+ (int)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
+ (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+
+ correct_buf[i].field3 =
+ (long)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
+ (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
}
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0),
- "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
- if (data) HDfree(data);
+ if (data)
+ HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ dset_id =
+ H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
- VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (correct_buf) HDfree(correct_buf);
- if (read_buf) HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -2161,8 +2192,8 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
static void
test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
{
- COMPOUND_C_DATATYPE *data = NULL;
- COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *data = NULL;
+ COMPOUND_C_DATATYPE *read_buf = NULL;
COMPOUND_C_DATATYPE *correct_buf = NULL;
hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
@@ -2175,7 +2206,9 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype conversion");
+ if (MAINPROCESS)
+ HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype "
+ "conversion");
CHECK_CUR_FILTER_AVAIL();
@@ -2183,11 +2216,10 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2196,24 +2228,27 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
/* Create the dataspace for the dataset */
dataset_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS;
- dataset_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS;
- chunk_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
- chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
- sel_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
- sel_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
-
- filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL);
+ dataset_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS;
+ chunk_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
+ sel_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ sel_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+
+ filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS,
+ dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, sel_dims, NULL);
+ memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS,
+ sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS,
+ chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
@@ -2223,25 +2258,22 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((memtype >= 0), "Datatype creation succeeded");
VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
/* Create the compound type for file. */
filetype = H5Tcreate(H5T_COMPOUND, 32);
VRFY((filetype >= 0), "Datatype creation succeeded");
- VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0),
- "Datatype insertion succeeded");
- VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0),
- "Datatype insertion succeeded");
- VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0),
- "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
- dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, filetype, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
+ filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -2250,18 +2282,20 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
- count[0] = 1;
- count[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+ count[0] = 1;
+ count[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
stride[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
- block[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
- block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
- start[0] = 0;
- start[1] = ((hsize_t) mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
+ block[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
+ start[0] = 0;
+ start[1] = ((hsize_t)mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -2270,55 +2304,59 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
- data = (COMPOUND_C_DATATYPE *) HDcalloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data));
+ data = (COMPOUND_C_DATATYPE *)HDcalloc(
+ 1, (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data));
VRFY((NULL != data), "HDcalloc succeeded");
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
- correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
/* Fill data buffer */
- for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) {
- data[i].field1 = (short) GEN_DATA(i);
- data[i].field2 = (int) GEN_DATA(i);
- data[i].field3 = (long) GEN_DATA(i);
+ for (i = 0; i < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) {
+ data[i].field1 = (short)GEN_DATA(i);
+ data[i].field2 = (int)GEN_DATA(i);
+ data[i].field3 = (long)GEN_DATA(i);
}
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
/* Ensure that this test currently fails since type conversions break collective mode */
- H5E_BEGIN_TRY {
+ H5E_BEGIN_TRY
+ {
VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) < 0),
- "Dataset write succeeded");
- } H5E_END_TRY;
+ "Dataset write succeeded");
+ }
+ H5E_END_TRY;
- if (data) HDfree(data);
+ if (data)
+ HDfree(data);
/* Verify that no data was written */
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
+ H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
- VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (correct_buf) HDfree(correct_buf);
- if (read_buf) HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -2347,8 +2385,8 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
static void
test_write_cmpd_filtered_dataset_type_conversion_shared(void)
{
- COMPOUND_C_DATATYPE *data = NULL;
- COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *data = NULL;
+ COMPOUND_C_DATATYPE *read_buf = NULL;
COMPOUND_C_DATATYPE *correct_buf = NULL;
hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
@@ -2361,7 +2399,9 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
hid_t file_id, dset_id, plist_id, filetype, memtype;
hid_t filespace, memspace;
- if (MAINPROCESS) HDputs("Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion");
+ if (MAINPROCESS)
+ HDputs(
+ "Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion");
CHECK_CUR_FILTER_AVAIL();
@@ -2369,11 +2409,10 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2381,25 +2420,28 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS;
- dataset_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS;
- chunk_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
- chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
- sel_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
- sel_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
-
- filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL);
+ dataset_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS;
+ chunk_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
+ chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
+ sel_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size;
+ sel_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
+
+ filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS,
+ dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL);
+ memspace =
+ H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL);
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS,
+ chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
@@ -2409,25 +2451,22 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((memtype >= 0), "Datatype creation succeeded");
VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
/* Create the compound type for file. */
filetype = H5Tcreate(H5T_COMPOUND, 32);
VRFY((filetype >= 0), "Datatype creation succeeded");
- VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0),
- "Datatype insertion succeeded");
- VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0),
- "Datatype insertion succeeded");
- VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0),
- "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
- dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, filetype, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME,
+ filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -2436,18 +2475,20 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
- count[0] = 1;
- count[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
- stride[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
+ count[0] = 1;
+ count[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
+ stride[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
- block[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
- block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
- start[0] = (hsize_t) mpi_rank;
- start[1] = 0;
+ block[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size;
+ block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -2456,55 +2497,59 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
- data = (COMPOUND_C_DATATYPE *) HDcalloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data));
+ data = (COMPOUND_C_DATATYPE *)HDcalloc(
+ 1, (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data));
VRFY((NULL != data), "HDcalloc succeeded");
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
- correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
/* Fill data buffer */
- for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) {
- data[i].field1 = (short) GEN_DATA(i);
- data[i].field2 = (int) GEN_DATA(i);
- data[i].field3 = (long) GEN_DATA(i);
+ for (i = 0; i < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) {
+ data[i].field1 = (short)GEN_DATA(i);
+ data[i].field2 = (int)GEN_DATA(i);
+ data[i].field3 = (long)GEN_DATA(i);
}
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
/* Ensure that this test currently fails since type conversions break collective mode */
- H5E_BEGIN_TRY {
+ H5E_BEGIN_TRY
+ {
VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) < 0),
- "Dataset write succeeded");
- } H5E_END_TRY;
+ "Dataset write succeeded");
+ }
+ H5E_END_TRY;
- if (data) HDfree(data);
+ if (data)
+ HDfree(data);
/* Verify that no data was written */
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME,
+ H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
- VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (correct_buf) HDfree(correct_buf);
- if (read_buf) HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -2533,9 +2578,9 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
static void
test_read_one_chunk_filtered_dataset(void)
{
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *global_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
hsize_t dataset_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
hsize_t chunk_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
hsize_t sel_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
@@ -2547,32 +2592,35 @@ test_read_one_chunk_filtered_dataset(void)
size_t i, read_buf_size, correct_buf_size;
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- int *recvcounts = NULL;
- int *displs = NULL;
+ int * recvcounts = NULL;
+ int * displs = NULL;
- if (MAINPROCESS) HDputs("Testing read from one-chunk filtered dataset");
+ if (MAINPROCESS)
+ HDputs("Testing read from one-chunk filtered dataset");
CHECK_CUR_FILTER_AVAIL();
- dataset_dims[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NROWS;
- dataset_dims[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NCOLS;
+ dataset_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NROWS;
+ dataset_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NCOLS;
/* Setup the buffer for writing and for comparison */
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = ((C_DATATYPE) i % (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS))
- + ((C_DATATYPE) i / (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS));
+ correct_buf[i] = ((C_DATATYPE)i % (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size *
+ READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) +
+ ((C_DATATYPE)i / (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size *
+ READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS));
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2584,27 +2632,27 @@ test_read_one_chunk_filtered_dataset(void)
VRFY((filespace >= 0), "File dataspace creation succeeded");
/* Create chunked dataset */
- chunk_dims[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
- chunk_dims[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
+ chunk_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, READ_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
dset_id = H5Dcreate2(file_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -2614,11 +2662,10 @@ test_read_one_chunk_filtered_dataset(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2628,8 +2675,8 @@ test_read_one_chunk_filtered_dataset(void)
dset_id = H5Dopen2(file_id, "/" READ_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- sel_dims[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t) mpi_size;
- sel_dims[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NCOLS;
+ sel_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t)mpi_size;
+ sel_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NCOLS;
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
@@ -2645,66 +2692,72 @@ test_read_one_chunk_filtered_dataset(void)
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
*/
- count[0] = 1;
- count[1] = 1;
- stride[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
- stride[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
- block[0] = sel_dims[0];
- block[1] = sel_dims[1];
- start[0] = ((hsize_t) mpi_rank * sel_dims[0]);
- start[1] = 0;
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
+ stride[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
+ block[0] = sel_dims[0];
+ block[1] = sel_dims[1];
+ start[0] = ((hsize_t)mpi_rank * sel_dims[0]);
+ start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Create property list for collective dataset read */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
read_buf_size = flat_dims[0] * sizeof(*read_buf);
- read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
/* Collect each piece of data from all ranks into a global buffer on all ranks */
- recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++)
- recvcounts[i] = (int) flat_dims[0];
+ for (i = 0; i < (size_t)mpi_size; i++)
+ recvcounts[i] = (int)flat_dims[0];
- displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs));
VRFY((NULL != displs), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++)
- displs[i] = (int) (i * flat_dims[0]);
+ for (i = 0; i < (size_t)mpi_size; i++)
+ displs[i] = (int)(i * flat_dims[0]);
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts,
+ displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
- VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (displs) HDfree(displs);
- if (recvcounts) HDfree(recvcounts);
- if (global_buf) HDfree(global_buf);
- if (read_buf) HDfree(read_buf);
- if (correct_buf) HDfree(correct_buf);
+ if (displs)
+ HDfree(displs);
+ if (recvcounts)
+ HDfree(recvcounts);
+ if (global_buf)
+ HDfree(global_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -2730,9 +2783,9 @@ test_read_one_chunk_filtered_dataset(void)
static void
test_read_filtered_dataset_no_overlap(void)
{
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *global_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
@@ -2744,34 +2797,34 @@ test_read_filtered_dataset_no_overlap(void)
size_t i, read_buf_size, correct_buf_size;
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- int *recvcounts = NULL;
- int *displs = NULL;
+ int * recvcounts = NULL;
+ int * displs = NULL;
- if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks");
+ if (MAINPROCESS)
+ HDputs("Testing read from unshared filtered chunks");
CHECK_CUR_FILTER_AVAIL();
- dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NROWS;
- dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS;
+ dataset_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS;
/* Setup the buffer for writing and for comparison */
- correct_buf_size = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NROWS * (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS * sizeof(*correct_buf);
+ correct_buf_size = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NROWS *
+ (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS * sizeof(*correct_buf);
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE) (
- (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
- + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
- );
+ correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
+ (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2783,27 +2836,27 @@ test_read_filtered_dataset_no_overlap(void)
VRFY((filespace >= 0), "File dataspace creation succeeded");
/* Create chunked dataset */
- chunk_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -2813,11 +2866,10 @@ test_read_filtered_dataset_no_overlap(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2827,8 +2879,8 @@ test_read_filtered_dataset_no_overlap(void)
dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- sel_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
- sel_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS;
+ sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS;
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
@@ -2845,65 +2897,71 @@ test_read_filtered_dataset_no_overlap(void)
* it to the selection in memory
*/
count[0] = 1;
- count[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
- stride[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
- stride[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
- block[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
- block[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
- start[0] = ((hsize_t) mpi_rank * (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]);
- start[1] = 0;
+ count[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS / (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = ((hsize_t)mpi_rank * (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]);
+ start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Create property list for collective dataset read */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
read_buf_size = flat_dims[0] * sizeof(*read_buf);
- read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
/* Collect each piece of data from all ranks into a global buffer on all ranks */
- recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++)
- recvcounts[i] = (int) flat_dims[0];
+ for (i = 0; i < (size_t)mpi_size; i++)
+ recvcounts[i] = (int)flat_dims[0];
- displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs));
VRFY((NULL != displs), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++)
- displs[i] = (int) (i * flat_dims[0]);
+ for (i = 0; i < (size_t)mpi_size; i++)
+ displs[i] = (int)(i * flat_dims[0]);
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts,
+ displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
- VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (displs) HDfree(displs);
- if (recvcounts) HDfree(recvcounts);
- if (global_buf) HDfree(global_buf);
- if (read_buf) HDfree(read_buf);
- if (correct_buf) HDfree(correct_buf);
+ if (displs)
+ HDfree(displs);
+ if (recvcounts)
+ HDfree(recvcounts);
+ if (global_buf)
+ HDfree(global_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -2930,9 +2988,9 @@ test_read_filtered_dataset_no_overlap(void)
static void
test_read_filtered_dataset_overlap(void)
{
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *global_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
hsize_t dataset_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
@@ -2944,35 +3002,34 @@ test_read_filtered_dataset_overlap(void)
size_t i, read_buf_size, correct_buf_size;
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- int *recvcounts = NULL;
- int *displs = NULL;
+ int * recvcounts = NULL;
+ int * displs = NULL;
- if (MAINPROCESS) HDputs("Testing read from shared filtered chunks");
+ if (MAINPROCESS)
+ HDputs("Testing read from shared filtered chunks");
CHECK_CUR_FILTER_AVAIL();
- dataset_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NROWS;
- dataset_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NCOLS;
+ dataset_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_NCOLS;
/* Setup the buffer for writing and for comparison */
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE) (
- (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
- + (i % dataset_dims[1])
- + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
- );
+ correct_buf[i] = (C_DATATYPE)(
+ (dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
+ (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2984,27 +3041,27 @@ test_read_filtered_dataset_overlap(void)
VRFY((filespace >= 0), "File dataspace creation succeeded");
/* Create chunked dataset */
- chunk_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ chunk_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
dset_id = H5Dcreate2(file_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -3014,11 +3071,10 @@ test_read_filtered_dataset_overlap(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -3028,8 +3084,8 @@ test_read_filtered_dataset_overlap(void)
dset_id = H5Dopen2(file_id, "/" READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- sel_dims[0] = (hsize_t) DIM0_SCALE_FACTOR;
- sel_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS * (hsize_t) DIM1_SCALE_FACTOR;
+ sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR;
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
@@ -3045,40 +3101,41 @@ test_read_filtered_dataset_overlap(void)
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
*/
- count[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NROWS / (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NROWS;
- count[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
- stride[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NROWS;
- stride[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
- block[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NROWS / (hsize_t) mpi_size;
- block[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
- start[0] = (hsize_t) mpi_rank * block[0];
- start[1] = 0;
+ count[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_NROWS / (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ count[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_NCOLS / (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NROWS / (hsize_t)mpi_size;
+ block[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Create property list for collective dataset read */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
read_buf_size = flat_dims[0] * sizeof(*read_buf);
- read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
/*
@@ -3088,38 +3145,44 @@ test_read_filtered_dataset_overlap(void)
* of chunks in the first dimension of the dataset.
*/
{
- size_t loop_count = count[0];
+ size_t loop_count = count[0];
size_t total_recvcounts = 0;
- recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "HDcalloc succeeded");
- displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs));
VRFY((NULL != displs), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++) {
- recvcounts[i] = (int) dataset_dims[1];
- total_recvcounts += (size_t) recvcounts[i];
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ recvcounts[i] = (int)dataset_dims[1];
+ total_recvcounts += (size_t)recvcounts[i];
}
- for (i = 0; i < (size_t) mpi_size; i++)
- displs[i] = (int) (i * dataset_dims[1]);
+ for (i = 0; i < (size_t)mpi_size; i++)
+ displs[i] = (int)(i * dataset_dims[1]);
for (; loop_count; loop_count--) {
- VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(count[0] - loop_count) * dataset_dims[1]], recvcounts[mpi_rank], C_DATATYPE_MPI,
- &global_buf[(count[0] - loop_count) * total_recvcounts], recvcounts, displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(count[0] - loop_count) * dataset_dims[1]],
+ recvcounts[mpi_rank], C_DATATYPE_MPI,
+ &global_buf[(count[0] - loop_count) * total_recvcounts],
+ recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
}
}
- VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (displs) HDfree(displs);
- if (recvcounts) HDfree(recvcounts);
- if (global_buf) HDfree(global_buf);
- if (read_buf) HDfree(read_buf);
- if (correct_buf) HDfree(correct_buf);
+ if (displs)
+ HDfree(displs);
+ if (recvcounts)
+ HDfree(recvcounts);
+ if (global_buf)
+ HDfree(global_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -3146,9 +3209,9 @@ test_read_filtered_dataset_overlap(void)
static void
test_read_filtered_dataset_single_no_selection(void)
{
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *global_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
hsize_t dataset_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
@@ -3161,40 +3224,38 @@ test_read_filtered_dataset_single_no_selection(void)
size_t segment_length;
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- int *recvcounts = NULL;
- int *displs = NULL;
+ int * recvcounts = NULL;
+ int * displs = NULL;
- if (MAINPROCESS) HDputs("Testing read from filtered chunks with a single process having no selection");
+ if (MAINPROCESS)
+ HDputs("Testing read from filtered chunks with a single process having no selection");
CHECK_CUR_FILTER_AVAIL();
- dataset_dims[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS;
- dataset_dims[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+ dataset_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
/* Setup the buffer for writing and for comparison */
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] =
- (C_DATATYPE) (
- (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
- + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
- );
+ correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
+ (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
/* Compute the correct offset into the buffer for the process having no selection and clear it */
- segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t) mpi_size;
- HDmemset(correct_buf + ((size_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length),
- 0, segment_length * sizeof(*correct_buf));
+ segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t)mpi_size;
+ HDmemset(correct_buf + ((size_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length),
+ 0, segment_length * sizeof(*correct_buf));
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -3202,31 +3263,32 @@ test_read_filtered_dataset_single_no_selection(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- filespace = H5Screate_simple(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ filespace =
+ H5Screate_simple(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
/* Create chunked dataset */
- chunk_dims[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ chunk_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME,
+ HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -3236,11 +3298,10 @@ test_read_filtered_dataset_single_no_selection(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -3250,8 +3311,8 @@ test_read_filtered_dataset_single_no_selection(void)
dset_id = H5Dopen2(file_id, "/" READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- sel_dims[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- sel_dims[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+ sel_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
sel_dims[0] = sel_dims[1] = 0;
@@ -3271,17 +3332,20 @@ test_read_filtered_dataset_single_no_selection(void)
* reads it to the selection in memory
*/
count[0] = 1;
- count[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- stride[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- stride[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- block[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- block[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- start[0] = (hsize_t) mpi_rank * (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0];
- start[1] = 0;
+ count[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS /
+ (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank * (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0];
+ start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -3289,55 +3353,62 @@ test_read_filtered_dataset_single_no_selection(void)
VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded");
else
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Create property list for collective dataset read */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
read_buf_size = flat_dims[0] * sizeof(*read_buf);
- read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
/* Collect each piece of data from all ranks into a global buffer on all ranks */
- recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++)
- recvcounts[i] = (int) (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS);
+ for (i = 0; i < (size_t)mpi_size; i++)
+ recvcounts[i] = (int)(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS *
+ READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS);
recvcounts[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC] = 0;
- displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs));
VRFY((NULL != displs), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++)
- displs[i] = (int) (i * (size_t) (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS));
+ for (i = 0; i < (size_t)mpi_size; i++)
+ displs[i] = (int)(i * (size_t)(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS *
+ READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS));
if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, 0, C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, 0, C_DATATYPE_MPI, global_buf, recvcounts, displs,
+ C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
else
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
-
- VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
-
- if (displs) HDfree(displs);
- if (recvcounts) HDfree(recvcounts);
- if (global_buf) HDfree(global_buf);
- if (read_buf) HDfree(read_buf);
- if (correct_buf) HDfree(correct_buf);
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf,
+ recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+
+ if (displs)
+ HDfree(displs);
+ if (recvcounts)
+ HDfree(recvcounts);
+ if (global_buf)
+ HDfree(global_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -3365,7 +3436,7 @@ test_read_filtered_dataset_single_no_selection(void)
static void
test_read_filtered_dataset_all_no_selection(void)
{
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
hsize_t dataset_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
@@ -3374,17 +3445,18 @@ test_read_filtered_dataset_all_no_selection(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) HDputs("Testing read from filtered chunks with all processes having no selection");
+ if (MAINPROCESS)
+ HDputs("Testing read from filtered chunks with all processes having no selection");
CHECK_CUR_FILTER_AVAIL();
- dataset_dims[0] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS;
- dataset_dims[1] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+ dataset_dims[0] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
/* Setup the buffer for writing and for comparison */
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
if (MAINPROCESS) {
@@ -3392,7 +3464,7 @@ test_read_filtered_dataset_all_no_selection(void)
VRFY((plist_id >= 0), "FAPL creation succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -3404,27 +3476,27 @@ test_read_filtered_dataset_all_no_selection(void)
VRFY((filespace >= 0), "File dataspace creation succeeded");
/* Create chunked dataset */
- chunk_dims[0] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ chunk_dims[0] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -3434,11 +3506,10 @@ test_read_filtered_dataset_all_no_selection(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -3463,19 +3534,20 @@ test_read_filtered_dataset_all_no_selection(void)
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf);
- read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- if (read_buf) HDfree(read_buf);
- if (correct_buf) HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -3503,9 +3575,9 @@ static void
test_read_filtered_dataset_point_selection(void)
{
C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *global_buf = NULL;
- hsize_t *coords = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t * coords = NULL;
hsize_t dataset_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
@@ -3514,35 +3586,34 @@ test_read_filtered_dataset_point_selection(void)
size_t num_points;
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- int *recvcounts = NULL;
- int *displs = NULL;
+ int * recvcounts = NULL;
+ int * displs = NULL;
- if (MAINPROCESS) HDputs("Testing read from filtered chunks with point selection");
+ if (MAINPROCESS)
+ HDputs("Testing read from filtered chunks with point selection");
CHECK_CUR_FILTER_AVAIL();
- dataset_dims[0] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS;
- dataset_dims[1] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
+ dataset_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
/* Setup the buffer for writing and for comparison */
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE) (
- (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
- + (i % dataset_dims[1])
- + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
- );
+ correct_buf[i] = (C_DATATYPE)(
+ (dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
+ (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -3554,27 +3625,27 @@ test_read_filtered_dataset_point_selection(void)
VRFY((filespace >= 0), "File dataspace creation succeeded");
/* Create chunked dataset */
- chunk_dims[0] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ chunk_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -3584,11 +3655,10 @@ test_read_filtered_dataset_point_selection(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -3598,8 +3668,8 @@ test_read_filtered_dataset_point_selection(void)
dset_id = H5Dopen2(file_id, "/" READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- sel_dims[0] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t) mpi_size;
- sel_dims[1] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
+ sel_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t)mpi_size;
+ sel_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
@@ -3611,35 +3681,36 @@ test_read_filtered_dataset_point_selection(void)
filespace = H5Dget_space(dset_id);
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
- num_points = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS * (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) mpi_size;
- coords = (hsize_t *) HDcalloc(1, 2 * num_points * sizeof(*coords));
+ num_points = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS *
+ (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t)mpi_size;
+ coords = (hsize_t *)HDcalloc(1, 2 * num_points * sizeof(*coords));
VRFY((NULL != coords), "Coords HDcalloc succeeded");
for (i = 0; i < num_points; i++)
for (j = 0; j < READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++)
coords[(i * READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS) + j] =
- (j > 0) ? (i % (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)
- : ((hsize_t) mpi_rank + ((hsize_t) mpi_size * (i / (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)));
+ (j > 0) ? (i % (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)
+ : ((hsize_t)mpi_rank +
+ ((hsize_t)mpi_size * (i / (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)));
- VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t ) num_points, (const hsize_t * ) coords) >= 0),
- "Point selection succeeded");
+ VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t)num_points, (const hsize_t *)coords) >= 0),
+ "Point selection succeeded");
/* Create property list for collective dataset read */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
read_buf_size = flat_dims[0] * sizeof(*read_buf);
- read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
/*
@@ -3649,39 +3720,46 @@ test_read_filtered_dataset_point_selection(void)
* of chunks in the first dimension of the dataset.
*/
{
- size_t original_loop_count = dataset_dims[0] / (hsize_t) mpi_size;
- size_t cur_loop_count = original_loop_count;
- size_t total_recvcounts = 0;
+ size_t original_loop_count = dataset_dims[0] / (hsize_t)mpi_size;
+ size_t cur_loop_count = original_loop_count;
+ size_t total_recvcounts = 0;
- recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "HDcalloc succeeded");
- displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs));
VRFY((NULL != displs), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++) {
- recvcounts[i] = (int) dataset_dims[1];
- total_recvcounts += (size_t) recvcounts[i];
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ recvcounts[i] = (int)dataset_dims[1];
+ total_recvcounts += (size_t)recvcounts[i];
}
- for (i = 0; i < (size_t) mpi_size; i++)
- displs[i] = (int) (i * dataset_dims[1]);
+ for (i = 0; i < (size_t)mpi_size; i++)
+ displs[i] = (int)(i * dataset_dims[1]);
for (; cur_loop_count; cur_loop_count--) {
- VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(original_loop_count - cur_loop_count) * dataset_dims[1]], recvcounts[mpi_rank], C_DATATYPE_MPI,
- &global_buf[(original_loop_count - cur_loop_count) * total_recvcounts], recvcounts, displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
+ VRFY((MPI_SUCCESS ==
+ MPI_Allgatherv(&read_buf[(original_loop_count - cur_loop_count) * dataset_dims[1]],
+ recvcounts[mpi_rank], C_DATATYPE_MPI,
+ &global_buf[(original_loop_count - cur_loop_count) * total_recvcounts],
+ recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
}
}
- VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (displs) HDfree(displs);
- if (recvcounts) HDfree(recvcounts);
- if (global_buf) HDfree(global_buf);
- if (read_buf) HDfree(read_buf);
- if (correct_buf) HDfree(correct_buf);
+ if (displs)
+ HDfree(displs);
+ if (recvcounts)
+ HDfree(recvcounts);
+ if (global_buf)
+ HDfree(global_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
HDfree(coords);
@@ -3713,9 +3791,9 @@ test_read_filtered_dataset_point_selection(void)
static void
test_read_filtered_dataset_interleaved_read(void)
{
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *global_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
hsize_t dataset_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
hsize_t chunk_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
hsize_t sel_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
@@ -3727,41 +3805,43 @@ test_read_filtered_dataset_interleaved_read(void)
size_t i, read_buf_size, correct_buf_size;
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- int *recvcounts = NULL;
- int *displs = NULL;
+ int * recvcounts = NULL;
+ int * displs = NULL;
- if (MAINPROCESS) HDputs("Testing interleaved read from filtered chunks");
+ if (MAINPROCESS)
+ HDputs("Testing interleaved read from filtered chunks");
CHECK_CUR_FILTER_AVAIL();
- dataset_dims[0] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NROWS;
- dataset_dims[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS;
+ dataset_dims[0] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NROWS;
+ dataset_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS;
/* Setup the buffer for writing and for comparison */
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
/* Add Column Index */
correct_buf[i] =
- (C_DATATYPE) (
- (i % (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS)
+ (C_DATATYPE)((i % (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS)
- /* Add the Row Index */
- + ((i % (hsize_t) (mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS)) / (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS)
+ /* Add the Row Index */
+ + ((i % (hsize_t)(mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS)) /
+ (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS)
- /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */
- + ((hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS * (i / (hsize_t) (mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS)))
- );
+ /* Add the amount that gets added when a rank moves down to its next section
+ vertically in the dataset */
+ + ((hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS *
+ (i / (hsize_t)(mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS))));
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -3773,27 +3853,27 @@ test_read_filtered_dataset_interleaved_read(void)
VRFY((filespace >= 0), "File dataspace creation succeeded");
/* Create chunked dataset */
- chunk_dims[0] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS;
- chunk_dims[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS;
+ chunk_dims[0] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS;
+ chunk_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS;
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, INTERLEAVED_READ_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
dset_id = H5Dcreate2(file_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -3803,11 +3883,10 @@ test_read_filtered_dataset_interleaved_read(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -3815,10 +3894,10 @@ test_read_filtered_dataset_interleaved_read(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
dset_id = H5Dopen2(file_id, "/" INTERLEAVED_READ_FILTERED_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ VRFY((dset_id >= 0), "Dataset open succeeded");
- sel_dims[0] = (hsize_t) (INTERLEAVED_READ_FILTERED_DATASET_NROWS / mpi_size);
- sel_dims[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS;
+ sel_dims[0] = (hsize_t)(INTERLEAVED_READ_FILTERED_DATASET_NROWS / mpi_size);
+ sel_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS;
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
@@ -3834,40 +3913,43 @@ test_read_filtered_dataset_interleaved_read(void)
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
*/
- count[0] = (hsize_t) (INTERLEAVED_READ_FILTERED_DATASET_NROWS / INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS);
- count[1] = (hsize_t) (INTERLEAVED_READ_FILTERED_DATASET_NCOLS / INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS);
- stride[0] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS;
- stride[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS;
- block[0] = 1;
- block[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS;
- start[0] = (hsize_t) mpi_rank;
- start[1] = 0;
+ count[0] =
+ (hsize_t)(INTERLEAVED_READ_FILTERED_DATASET_NROWS / INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS);
+ count[1] =
+ (hsize_t)(INTERLEAVED_READ_FILTERED_DATASET_NCOLS / INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS);
+ stride[0] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS;
+ stride[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS;
+ block[0] = 1;
+ block[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Create property list for collective dataset read */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
read_buf_size = flat_dims[0] * sizeof(*read_buf);
- read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
/*
@@ -3877,38 +3959,44 @@ test_read_filtered_dataset_interleaved_read(void)
* of chunks in the first dimension of the dataset.
*/
{
- size_t loop_count = count[0];
+ size_t loop_count = count[0];
size_t total_recvcounts = 0;
- recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "HDcalloc succeeded");
- displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs));
VRFY((NULL != displs), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++) {
- recvcounts[i] = (int) dataset_dims[1];
- total_recvcounts += (size_t) recvcounts[i];
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ recvcounts[i] = (int)dataset_dims[1];
+ total_recvcounts += (size_t)recvcounts[i];
}
- for (i = 0; i < (size_t) mpi_size; i++)
- displs[i] = (int) (i * dataset_dims[1]);
+ for (i = 0; i < (size_t)mpi_size; i++)
+ displs[i] = (int)(i * dataset_dims[1]);
for (; loop_count; loop_count--) {
- VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(count[0] - loop_count) * dataset_dims[1]], recvcounts[mpi_rank], C_DATATYPE_MPI,
- &global_buf[(count[0] - loop_count) * total_recvcounts], recvcounts, displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(count[0] - loop_count) * dataset_dims[1]],
+ recvcounts[mpi_rank], C_DATATYPE_MPI,
+ &global_buf[(count[0] - loop_count) * total_recvcounts],
+ recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
}
}
- VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (displs) HDfree(displs);
- if (recvcounts) HDfree(recvcounts);
- if (global_buf) HDfree(global_buf);
- if (read_buf) HDfree(read_buf);
- if (correct_buf) HDfree(correct_buf);
+ if (displs)
+ HDfree(displs);
+ if (recvcounts)
+ HDfree(recvcounts);
+ if (global_buf)
+ HDfree(global_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -3935,46 +4023,47 @@ test_read_filtered_dataset_interleaved_read(void)
static void
test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
{
- MPI_Datatype vector_type;
- MPI_Datatype resized_vector_type;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *global_buf = NULL;
- hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
- hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
- hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
- hsize_t start[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
- hsize_t stride[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
- hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
- hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
- hsize_t flat_dims[1];
- size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
-
- if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks on separate pages in 3D dataset");
+ MPI_Datatype vector_type;
+ MPI_Datatype resized_vector_type;
+ C_DATATYPE * read_buf = NULL;
+ C_DATATYPE * correct_buf = NULL;
+ C_DATATYPE * global_buf = NULL;
+ hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t start[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t stride[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS)
+ HDputs("Testing read from unshared filtered chunks on separate pages in 3D dataset");
CHECK_CUR_FILTER_AVAIL();
- dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
- dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
- dataset_dims[2] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH;
+ dataset_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
+ dataset_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
+ dataset_dims[2] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH;
/* Setup the buffer for writing and for comparison */
correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE) ((i % (hsize_t) mpi_size) + (i / (hsize_t) mpi_size));
+ correct_buf[i] = (C_DATATYPE)((i % (hsize_t)mpi_size) + (i / (hsize_t)mpi_size));
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -3982,32 +4071,34 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL);
+ filespace =
+ H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
/* Create chunked dataset */
- chunk_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
- chunk_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
chunk_dims[2] = 1;
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ VRFY(
+ (H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME,
+ HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -4017,11 +4108,10 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -4031,8 +4121,8 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- sel_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
- sel_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
+ sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
+ sel_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
sel_dims[2] = 1;
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
@@ -4049,52 +4139,55 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
*/
- count[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
- count[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
- count[2] = 1;
- stride[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
- stride[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ count[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS /
+ (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ count[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS /
+ (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ count[2] = 1;
+ stride[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ stride[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
stride[2] = 1;
- block[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
- block[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
- block[2] = 1;
- start[0] = 0;
- start[1] = 0;
- start[2] = (hsize_t) mpi_rank;
+ block[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ block[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ block[2] = 1;
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = (hsize_t)mpi_rank;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Create property list for collective dataset read */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
read_buf_size = flat_dims[0] * sizeof(*read_buf);
- read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
/*
* Due to the nature of 3-dimensional reading, create an MPI vector type that allows each
* rank to write to the nth position of the global data buffer, where n is the rank number.
*/
- VRFY((MPI_SUCCESS == MPI_Type_vector((int) flat_dims[0], 1, mpi_size, C_DATATYPE_MPI, &vector_type)),
- "MPI_Type_vector succeeded");
+ VRFY((MPI_SUCCESS == MPI_Type_vector((int)flat_dims[0], 1, mpi_size, C_DATATYPE_MPI, &vector_type)),
+ "MPI_Type_vector succeeded");
VRFY((MPI_SUCCESS == MPI_Type_commit(&vector_type)), "MPI_Type_commit succeeded");
/*
@@ -4102,21 +4195,24 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
* so make it only one MPI_LONG wide
*/
VRFY((MPI_SUCCESS == MPI_Type_create_resized(vector_type, 0, sizeof(long), &resized_vector_type)),
- "MPI_Type_create_resized");
+ "MPI_Type_create_resized");
VRFY((MPI_SUCCESS == MPI_Type_commit(&resized_vector_type)), "MPI_Type_commit succeeded");
- VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, 1, resized_vector_type, comm)),
- "MPI_Allgather succeeded");
+ VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, 1,
+ resized_vector_type, comm)),
+ "MPI_Allgather succeeded");
- VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded");
VRFY((MPI_SUCCESS == MPI_Type_free(&resized_vector_type)), "MPI_Type_free succeeded");
- if (global_buf) HDfree(global_buf);
- if (read_buf) HDfree(read_buf);
- if (correct_buf) HDfree(correct_buf);
+ if (global_buf)
+ HDfree(global_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -4144,9 +4240,9 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
static void
test_read_3d_filtered_dataset_no_overlap_same_pages(void)
{
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *global_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
@@ -4158,35 +4254,34 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
size_t i, read_buf_size, correct_buf_size;
hid_t file_id, dset_id, plist_id;
hid_t filespace, memspace;
- int *recvcounts = NULL;
- int *displs = NULL;
+ int * recvcounts = NULL;
+ int * displs = NULL;
- if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks on the same pages in 3D dataset");
+ if (MAINPROCESS)
+ HDputs("Testing read from unshared filtered chunks on the same pages in 3D dataset");
CHECK_CUR_FILTER_AVAIL();
- dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS;
- dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
- dataset_dims[2] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
+ dataset_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS;
+ dataset_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
+ dataset_dims[2] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
/* Setup the buffer for writing and for comparison */
correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE) (
- (i % (dataset_dims[0] * dataset_dims[1]))
- + (i / (dataset_dims[0] * dataset_dims[1]))
- );
+ correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] * dataset_dims[1])) +
+ (i / (dataset_dims[0] * dataset_dims[1])));
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -4194,32 +4289,34 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL);
+ filespace =
+ H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
/* Create chunked dataset */
- chunk_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
- chunk_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
chunk_dims[2] = 1;
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >=
+ 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME,
+ HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -4229,11 +4326,10 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -4243,9 +4339,9 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- sel_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
- sel_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
- sel_dims[2] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
+ sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ sel_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
+ sel_dims[2] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2];
@@ -4262,69 +4358,76 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
* reads it to the selection in memory
*/
count[0] = 1;
- count[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
- count[2] = (hsize_t) mpi_size;
- stride[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
- stride[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ count[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS /
+ (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ count[2] = (hsize_t)mpi_size;
+ stride[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ stride[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
stride[2] = 1;
- block[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
- block[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
- block[2] = 1;
- start[0] = ((hsize_t) mpi_rank * (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS * count[0]);
- start[1] = 0;
- start[2] = 0;
+ block[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ block[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ block[2] = 1;
+ start[0] = ((hsize_t)mpi_rank * (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS * count[0]);
+ start[1] = 0;
+ start[2] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Create property list for collective dataset read */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
read_buf_size = flat_dims[0] * sizeof(*read_buf);
- read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
/* Collect each piece of data from all ranks into a global buffer on all ranks */
- recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++)
- recvcounts[i] = (int) flat_dims[0];
+ for (i = 0; i < (size_t)mpi_size; i++)
+ recvcounts[i] = (int)flat_dims[0];
- displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs));
VRFY((NULL != displs), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++)
- displs[i] = (int) (i * flat_dims[0]);
+ for (i = 0; i < (size_t)mpi_size; i++)
+ displs[i] = (int)(i * flat_dims[0]);
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts,
+ displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
- VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (displs) HDfree(displs);
- if (recvcounts) HDfree(recvcounts);
- if (global_buf) HDfree(global_buf);
- if (read_buf) HDfree(read_buf);
- if (correct_buf) HDfree(correct_buf);
+ if (displs)
+ HDfree(displs);
+ if (recvcounts)
+ HDfree(recvcounts);
+ if (global_buf)
+ HDfree(global_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -4352,58 +4455,60 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
static void
test_read_3d_filtered_dataset_overlap(void)
{
- MPI_Datatype vector_type;
- MPI_Datatype resized_vector_type;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *global_buf = NULL;
- hsize_t dataset_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
- hsize_t chunk_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
- hsize_t sel_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
- hsize_t start[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
- hsize_t stride[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
- hsize_t count[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
- hsize_t block[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
- hsize_t flat_dims[1];
- size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
-
- if (MAINPROCESS) HDputs("Testing read from shared filtered chunks in 3D dataset");
+ MPI_Datatype vector_type;
+ MPI_Datatype resized_vector_type;
+ C_DATATYPE * read_buf = NULL;
+ C_DATATYPE * correct_buf = NULL;
+ C_DATATYPE * global_buf = NULL;
+ hsize_t dataset_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t chunk_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t sel_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t start[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t stride[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t count[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t block[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS)
+ HDputs("Testing read from shared filtered chunks in 3D dataset");
CHECK_CUR_FILTER_AVAIL();
- dataset_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_NROWS;
- dataset_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_NCOLS;
- dataset_dims[2] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_DEPTH;
+ dataset_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_NROWS;
+ dataset_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_NCOLS;
+ dataset_dims[2] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_DEPTH;
/* Setup the buffer for writing and for comparison */
correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
/* Add the Column Index */
- correct_buf[i] =
- (C_DATATYPE) (
- (i % (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+ correct_buf[i] = (C_DATATYPE)(
+ (i % (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))
- /* Add the Row Index */
- + ((i % (hsize_t) (mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))
- / (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+ /* Add the Row Index */
+ + ((i % (hsize_t)(mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH *
+ READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) /
+ (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))
- /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */
- + ((hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)
- * (i / (hsize_t) (mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)))
- );
+ /* Add the amount that gets added when a rank moves down to its next section vertically in the
+ dataset */
+ + ((hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS) *
+ (i / (hsize_t)(mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH *
+ READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))));
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -4415,28 +4520,28 @@ test_read_3d_filtered_dataset_overlap(void)
VRFY((filespace >= 0), "File dataspace creation succeeded");
/* Create chunked dataset */
- chunk_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
- chunk_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ chunk_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
chunk_dims[2] = 1;
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -4446,11 +4551,10 @@ test_read_3d_filtered_dataset_overlap(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -4460,9 +4564,9 @@ test_read_3d_filtered_dataset_overlap(void)
dset_id = H5Dopen2(file_id, "/" READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- sel_dims[0] = (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size);
- sel_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_NCOLS;
- sel_dims[2] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_DEPTH;
+ sel_dims[0] = (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size);
+ sel_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_NCOLS;
+ sel_dims[2] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_DEPTH;
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2];
@@ -4478,79 +4582,87 @@ test_read_3d_filtered_dataset_overlap(void)
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
*/
- count[0] = (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_NROWS / READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS);
- count[1] = (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_NCOLS / READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS);
- count[2] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_DEPTH;
- stride[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
- stride[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ count[0] = (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_NROWS / READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS);
+ count[1] = (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_NCOLS / READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS);
+ count[2] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_DEPTH;
+ stride[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
+ stride[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
stride[2] = 1;
- block[0] = 1;
- block[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
- block[2] = 1;
- start[0] = (hsize_t) mpi_rank;
- start[1] = 0;
- start[2] = 0;
+ block[0] = 1;
+ block[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ block[2] = 1;
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
+ start[2] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Create property list for collective dataset read */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
read_buf_size = flat_dims[0] * sizeof(*read_buf);
- read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
{
- size_t run_length = (size_t) (READ_SHARED_FILTERED_CHUNKS_3D_NCOLS * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH);
- size_t num_blocks = (size_t) (READ_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size);
+ size_t run_length =
+ (size_t)(READ_SHARED_FILTERED_CHUNKS_3D_NCOLS * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH);
+ size_t num_blocks = (size_t)(READ_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size);
/*
* Due to the nature of 3-dimensional reading, create an MPI vector type that allows each
* rank to write to the nth position of the global data buffer, where n is the rank number.
*/
- VRFY((MPI_SUCCESS == MPI_Type_vector((int) num_blocks, (int) run_length, (int) (mpi_size * (int) run_length), C_DATATYPE_MPI, &vector_type)),
- "MPI_Type_vector succeeded");
+ VRFY(
+ (MPI_SUCCESS == MPI_Type_vector((int)num_blocks, (int)run_length,
+ (int)(mpi_size * (int)run_length), C_DATATYPE_MPI, &vector_type)),
+ "MPI_Type_vector succeeded");
VRFY((MPI_SUCCESS == MPI_Type_commit(&vector_type)), "MPI_Type_commit succeeded");
/*
* Resize the type to allow interleaving,
* so make it "run_length" MPI_LONGs wide
*/
- VRFY((MPI_SUCCESS == MPI_Type_create_resized(vector_type, 0, (MPI_Aint) (run_length * sizeof(long)), &resized_vector_type)),
- "MPI_Type_create_resized");
+ VRFY((MPI_SUCCESS == MPI_Type_create_resized(vector_type, 0, (MPI_Aint)(run_length * sizeof(long)),
+ &resized_vector_type)),
+ "MPI_Type_create_resized");
VRFY((MPI_SUCCESS == MPI_Type_commit(&resized_vector_type)), "MPI_Type_commit succeeded");
}
- VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, 1, resized_vector_type, comm)),
- "MPI_Allgatherv succeeded");
+ VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, 1,
+ resized_vector_type, comm)),
+ "MPI_Allgatherv succeeded");
- VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded");
VRFY((MPI_SUCCESS == MPI_Type_free(&resized_vector_type)), "MPI_Type_free succeeded");
- if (global_buf) HDfree(global_buf);
- if (read_buf) HDfree(read_buf);
- if (correct_buf) HDfree(correct_buf);
+ if (global_buf)
+ HDfree(global_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -4577,9 +4689,9 @@ test_read_3d_filtered_dataset_overlap(void)
static void
test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
{
- COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *read_buf = NULL;
COMPOUND_C_DATATYPE *correct_buf = NULL;
- COMPOUND_C_DATATYPE *global_buf = NULL;
+ COMPOUND_C_DATATYPE *global_buf = NULL;
hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
@@ -4591,37 +4703,30 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
size_t i, read_buf_size, correct_buf_size;
hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1;
hid_t filespace = -1, memspace = -1;
- int *recvcounts = NULL;
- int *displs = NULL;
+ int * recvcounts = NULL;
+ int * displs = NULL;
- if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset without Datatype conversion");
+ if (MAINPROCESS)
+ HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset without Datatype "
+ "conversion");
CHECK_CUR_FILTER_AVAIL();
- dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS;
- dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS;
+ dataset_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS;
+ dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS;
/* Setup the buffer for writing and for comparison */
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
- correct_buf[i].field1 = (short) (
- (i % dataset_dims[1])
- + (i / dataset_dims[1])
- );
-
- correct_buf[i].field2 = (int) (
- (i % dataset_dims[1])
- + (i / dataset_dims[1])
- );
-
- correct_buf[i].field3 = (long) (
- (i % dataset_dims[1])
- + (i / dataset_dims[1])
- );
+ correct_buf[i].field1 = (short)((i % dataset_dims[1]) + (i / dataset_dims[1]));
+
+ correct_buf[i].field2 = (int)((i % dataset_dims[1]) + (i / dataset_dims[1]));
+
+ correct_buf[i].field3 = (long)((i % dataset_dims[1]) + (i / dataset_dims[1]));
}
/* Create the compound type for memory. */
@@ -4629,18 +4734,18 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((memtype >= 0), "Datatype creation succeeded");
VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -4648,7 +4753,8 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL);
+ filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS,
+ dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
/* Create chunked dataset */
@@ -4658,21 +4764,22 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS,
+ chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME,
+ memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -4682,22 +4789,22 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
+ dset_id =
+ H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- sel_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
- sel_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+ sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ sel_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
@@ -4713,66 +4820,71 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
*/
- count[0] = 1;
- count[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+ count[0] = 1;
+ count[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
stride[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
stride[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
- block[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
- block[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
- start[0] = 0;
- start[1] = ((hsize_t) mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
+ block[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ block[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
+ start[0] = 0;
+ start[1] = ((hsize_t)mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Create property list for collective dataset read */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
read_buf_size = flat_dims[0] * sizeof(*read_buf);
- read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, read_buf_size);
+ read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded");
- global_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ global_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
/* Collect each piece of data from all ranks into a global buffer on all ranks */
- recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++)
- recvcounts[i] = (int) (flat_dims[0] * sizeof(*read_buf));
+ for (i = 0; i < (size_t)mpi_size; i++)
+ recvcounts[i] = (int)(flat_dims[0] * sizeof(*read_buf));
- displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs));
VRFY((NULL != displs), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++)
- displs[i] = (int) (i * flat_dims[0] * sizeof(*read_buf));
+ for (i = 0; i < (size_t)mpi_size; i++)
+ displs[i] = (int)(i * flat_dims[0] * sizeof(*read_buf));
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
- "MPI_Allgatherv succeeded");
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE,
+ global_buf, recvcounts, displs, MPI_BYTE, comm)),
+ "MPI_Allgatherv succeeded");
- VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (displs) HDfree(displs);
- if (recvcounts) HDfree(recvcounts);
- if (global_buf) HDfree(global_buf);
- if (read_buf) HDfree(read_buf);
- if (correct_buf) HDfree(correct_buf);
+ if (displs)
+ HDfree(displs);
+ if (recvcounts)
+ HDfree(recvcounts);
+ if (global_buf)
+ HDfree(global_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -4800,9 +4912,9 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
static void
test_read_cmpd_filtered_dataset_no_conversion_shared(void)
{
- COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *read_buf = NULL;
COMPOUND_C_DATATYPE *correct_buf = NULL;
- COMPOUND_C_DATATYPE *global_buf = NULL;
+ COMPOUND_C_DATATYPE *global_buf = NULL;
hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
@@ -4814,40 +4926,36 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
size_t i, read_buf_size, correct_buf_size;
hid_t file_id, dset_id, plist_id, memtype;
hid_t filespace, memspace;
- int *recvcounts = NULL;
- int *displs = NULL;
+ int * recvcounts = NULL;
+ int * displs = NULL;
- if (MAINPROCESS) HDputs("Testing read from shared filtered chunks in Compound Datatype dataset without Datatype conversion");
+ if (MAINPROCESS)
+ HDputs("Testing read from shared filtered chunks in Compound Datatype dataset without Datatype "
+ "conversion");
CHECK_CUR_FILTER_AVAIL();
- dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS;
- dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS;
+ dataset_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS;
+ dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS;
/* Setup the buffer for writing and for comparison */
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
- correct_buf[i].field1 = (short) (
- (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
- + (i % dataset_dims[1])
- + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
- );
-
- correct_buf[i].field2 = (int) (
- (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
- + (i % dataset_dims[1])
- + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
- );
-
- correct_buf[i].field3 = (long) (
- (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
- + (i % dataset_dims[1])
- + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
- );
+ correct_buf[i].field1 =
+ (short)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
+ (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+
+ correct_buf[i].field2 =
+ (int)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
+ (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+
+ correct_buf[i].field3 =
+ (long)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
+ (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
}
/* Create the compound type for memory. */
@@ -4855,18 +4963,18 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((memtype >= 0), "Datatype creation succeeded");
VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -4874,31 +4982,33 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL);
+ filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS,
+ dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
/* Create chunked dataset */
- chunk_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
- chunk_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+ chunk_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS,
+ chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME,
+ memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -4908,22 +5018,22 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ dset_id =
+ H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- sel_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
- sel_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
+ sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size;
+ sel_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
@@ -4939,66 +5049,71 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
*/
- count[0] = 1;
- count[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
- stride[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
+ count[0] = 1;
+ count[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
+ stride[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
stride[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
- block[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
- block[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
- start[0] = (hsize_t) mpi_rank;
- start[1] = 0;
+ block[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size;
+ block[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Create property list for collective dataset read */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
read_buf_size = flat_dims[0] * sizeof(*read_buf);
- read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, read_buf_size);
+ read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded");
- global_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ global_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
/* Collect each piece of data from all ranks into a global buffer on all ranks */
- recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++)
- recvcounts[i] = (int) (flat_dims[0] * sizeof(*read_buf));
+ for (i = 0; i < (size_t)mpi_size; i++)
+ recvcounts[i] = (int)(flat_dims[0] * sizeof(*read_buf));
- displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs));
VRFY((NULL != displs), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++)
- displs[i] = (int) (i * flat_dims[0] * sizeof(*read_buf));
+ for (i = 0; i < (size_t)mpi_size; i++)
+ displs[i] = (int)(i * flat_dims[0] * sizeof(*read_buf));
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
- "MPI_Allgatherv succeeded");
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE,
+ global_buf, recvcounts, displs, MPI_BYTE, comm)),
+ "MPI_Allgatherv succeeded");
- VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (displs) HDfree(displs);
- if (recvcounts) HDfree(recvcounts);
- if (global_buf) HDfree(global_buf);
- if (read_buf) HDfree(read_buf);
- if (correct_buf) HDfree(correct_buf);
+ if (displs)
+ HDfree(displs);
+ if (recvcounts)
+ HDfree(recvcounts);
+ if (global_buf)
+ HDfree(global_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -5026,9 +5141,9 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
static void
test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
{
- COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *read_buf = NULL;
COMPOUND_C_DATATYPE *correct_buf = NULL;
- COMPOUND_C_DATATYPE *global_buf = NULL;
+ COMPOUND_C_DATATYPE *global_buf = NULL;
hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
@@ -5040,37 +5155,30 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
size_t i, read_buf_size, correct_buf_size;
hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1;
hid_t filespace = -1, memspace = -1;
- int *recvcounts = NULL;
- int *displs = NULL;
+ int * recvcounts = NULL;
+ int * displs = NULL;
- if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset with Datatype conversion");
+ if (MAINPROCESS)
+ HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset with Datatype "
+ "conversion");
CHECK_CUR_FILTER_AVAIL();
- dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS;
- dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS;
+ dataset_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS;
+ dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS;
/* Setup the buffer for writing and for comparison */
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
- correct_buf[i].field1 = (short) (
- (i % dataset_dims[1])
- + (i / dataset_dims[1])
- );
-
- correct_buf[i].field2 = (int) (
- (i % dataset_dims[1])
- + (i / dataset_dims[1])
- );
-
- correct_buf[i].field3 = (long) (
- (i % dataset_dims[1])
- + (i / dataset_dims[1])
- );
+ correct_buf[i].field1 = (short)((i % dataset_dims[1]) + (i / dataset_dims[1]));
+
+ correct_buf[i].field2 = (int)((i % dataset_dims[1]) + (i / dataset_dims[1]));
+
+ correct_buf[i].field3 = (long)((i % dataset_dims[1]) + (i / dataset_dims[1]));
}
/* Create the compound type for memory. */
@@ -5078,29 +5186,26 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((memtype >= 0), "Datatype creation succeeded");
VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
/* Create the compound type for file. */
filetype = H5Tcreate(H5T_COMPOUND, 32);
VRFY((filetype >= 0), "Datatype creation succeeded");
- VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0),
- "Datatype insertion succeeded");
- VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0),
- "Datatype insertion succeeded");
- VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0),
- "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -5108,7 +5213,8 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL);
+ filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS,
+ dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
/* Create chunked dataset */
@@ -5118,21 +5224,22 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS,
+ chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, filetype, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
+ filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -5142,22 +5249,22 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
+ H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- sel_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
- sel_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+ sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ sel_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
@@ -5173,66 +5280,71 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
*/
- count[0] = 1;
- count[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+ count[0] = 1;
+ count[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
stride[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
stride[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
- block[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
- block[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
- start[0] = 0;
- start[1] = ((hsize_t) mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
+ block[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ block[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
+ start[0] = 0;
+ start[1] = ((hsize_t)mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Create property list for collective dataset read */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
read_buf_size = flat_dims[0] * sizeof(*read_buf);
- read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, read_buf_size);
+ read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded");
- global_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ global_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
/* Collect each piece of data from all ranks into a global buffer on all ranks */
- recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++)
- recvcounts[i] = (int) (flat_dims[0] * sizeof(*read_buf));
+ for (i = 0; i < (size_t)mpi_size; i++)
+ recvcounts[i] = (int)(flat_dims[0] * sizeof(*read_buf));
- displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs));
VRFY((NULL != displs), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++)
- displs[i] = (int) (i * flat_dims[0] * sizeof(*read_buf));
+ for (i = 0; i < (size_t)mpi_size; i++)
+ displs[i] = (int)(i * flat_dims[0] * sizeof(*read_buf));
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
- "MPI_Allgatherv succeeded");
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE,
+ global_buf, recvcounts, displs, MPI_BYTE, comm)),
+ "MPI_Allgatherv succeeded");
- VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (displs) HDfree(displs);
- if (recvcounts) HDfree(recvcounts);
- if (global_buf) HDfree(global_buf);
- if (read_buf) HDfree(read_buf);
- if (correct_buf) HDfree(correct_buf);
+ if (displs)
+ HDfree(displs);
+ if (recvcounts)
+ HDfree(recvcounts);
+ if (global_buf)
+ HDfree(global_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -5261,9 +5373,9 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
static void
test_read_cmpd_filtered_dataset_type_conversion_shared(void)
{
- COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *read_buf = NULL;
COMPOUND_C_DATATYPE *correct_buf = NULL;
- COMPOUND_C_DATATYPE *global_buf = NULL;
+ COMPOUND_C_DATATYPE *global_buf = NULL;
hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
@@ -5275,40 +5387,36 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
size_t i, read_buf_size, correct_buf_size;
hid_t file_id, dset_id, plist_id, filetype, memtype;
hid_t filespace, memspace;
- int *recvcounts = NULL;
- int *displs = NULL;
+ int * recvcounts = NULL;
+ int * displs = NULL;
- if (MAINPROCESS) HDputs("Testing read from shared filtered chunks in Compound Datatype dataset with Datatype conversion");
+ if (MAINPROCESS)
+ HDputs(
+ "Testing read from shared filtered chunks in Compound Datatype dataset with Datatype conversion");
CHECK_CUR_FILTER_AVAIL();
- dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS;
- dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS;
+ dataset_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS;
+ dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS;
/* Setup the buffer for writing and for comparison */
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
- correct_buf[i].field1 = (short) (
- (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
- + (i % dataset_dims[1])
- + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
- );
-
- correct_buf[i].field2 = (int) (
- (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
- + (i % dataset_dims[1])
- + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
- );
-
- correct_buf[i].field3 = (long) (
- (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
- + (i % dataset_dims[1])
- + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
- );
+ correct_buf[i].field1 =
+ (short)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
+ (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+
+ correct_buf[i].field2 =
+ (int)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
+ (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+
+ correct_buf[i].field3 =
+ (long)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
+ (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
}
/* Create the compound type for memory. */
@@ -5316,29 +5424,26 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((memtype >= 0), "Datatype creation succeeded");
VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
- "Datatype insertion succeeded");
+ "Datatype insertion succeeded");
/* Create the compound type for file. */
filetype = H5Tcreate(H5T_COMPOUND, 32);
VRFY((filetype >= 0), "Datatype creation succeeded");
- VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0),
- "Datatype insertion succeeded");
- VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0),
- "Datatype insertion succeeded");
- VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0),
- "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -5346,31 +5451,33 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL);
+ filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS,
+ dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
/* Create chunked dataset */
- chunk_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
- chunk_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
+ chunk_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS,
+ chunk_dims) >= 0),
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, filetype, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME,
+ filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -5380,22 +5487,22 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ dset_id =
+ H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- sel_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
- sel_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
+ sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size;
+ sel_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
@@ -5411,66 +5518,71 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
*/
- count[0] = 1;
- count[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
- stride[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
+ count[0] = 1;
+ count[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
+ stride[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
stride[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
- block[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
- block[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
- start[0] = (hsize_t) mpi_rank;
- start[1] = 0;
+ block[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size;
+ block[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Create property list for collective dataset read */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
read_buf_size = flat_dims[0] * sizeof(*read_buf);
- read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, read_buf_size);
+ read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded");
- global_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ global_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
/* Collect each piece of data from all ranks into a global buffer on all ranks */
- recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++)
- recvcounts[i] = (int) (flat_dims[0] * sizeof(*read_buf));
+ for (i = 0; i < (size_t)mpi_size; i++)
+ recvcounts[i] = (int)(flat_dims[0] * sizeof(*read_buf));
- displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs));
VRFY((NULL != displs), "HDcalloc succeeded");
- for (i = 0; i < (size_t) mpi_size; i++)
- displs[i] = (int) (i * flat_dims[0] * sizeof(*read_buf));
+ for (i = 0; i < (size_t)mpi_size; i++)
+ displs[i] = (int)(i * flat_dims[0] * sizeof(*read_buf));
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
- "MPI_Allgatherv succeeded");
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE,
+ global_buf, recvcounts, displs, MPI_BYTE, comm)),
+ "MPI_Allgatherv succeeded");
- VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (displs) HDfree(displs);
- if (recvcounts) HDfree(recvcounts);
- if (global_buf) HDfree(global_buf);
- if (read_buf) HDfree(read_buf);
- if (correct_buf) HDfree(correct_buf);
+ if (displs)
+ HDfree(displs);
+ if (recvcounts)
+ HDfree(recvcounts);
+ if (global_buf)
+ HDfree(global_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -5495,8 +5607,8 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
static void
test_write_serial_read_parallel(void)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
hsize_t dataset_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS];
hsize_t chunk_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS];
@@ -5504,13 +5616,14 @@ test_write_serial_read_parallel(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1;
- if (MAINPROCESS) HDputs("Testing write file serially; read file in parallel");
+ if (MAINPROCESS)
+ HDputs("Testing write file serially; read file in parallel");
CHECK_CUR_FILTER_AVAIL();
- dataset_dims[0] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_NROWS;
- dataset_dims[1] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_NCOLS;
- dataset_dims[2] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_DEPTH;
+ dataset_dims[0] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_NCOLS;
+ dataset_dims[2] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_DEPTH;
/* Write the file on the MAINPROCESS rank */
if (MAINPROCESS) {
@@ -5519,7 +5632,7 @@ test_write_serial_read_parallel(void)
VRFY((plist_id >= 0), "FAPL creation succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -5527,8 +5640,8 @@ test_write_serial_read_parallel(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- chunk_dims[0] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_CH_NROWS;
- chunk_dims[1] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_CH_NCOLS;
+ chunk_dims[0] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_CH_NCOLS;
chunk_dims[2] = 1;
filespace = H5Screate_simple(WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, dataset_dims, NULL);
@@ -5539,13 +5652,13 @@ test_write_serial_read_parallel(void)
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
dset_id = H5Dcreate2(file_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -5553,16 +5666,17 @@ test_write_serial_read_parallel(void)
data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*data);
- data = (C_DATATYPE *) HDcalloc(1, data_size);
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
VRFY((NULL != data), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE) GEN_DATA(i);
+ data[i] = (C_DATATYPE)GEN_DATA(i);
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
- if (data) HDfree(data);
+ if (data)
+ HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -5570,24 +5684,23 @@ test_write_serial_read_parallel(void)
correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
- read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (long) i;
+ correct_buf[i] = (long)i;
/* All ranks open the file and verify their "portion" of the dataset is correct */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -5600,17 +5713,17 @@ test_write_serial_read_parallel(void)
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
- if (correct_buf) HDfree(correct_buf);
- if (read_buf) HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
@@ -5633,8 +5746,8 @@ test_write_serial_read_parallel(void)
static void
test_write_parallel_read_serial(void)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
hsize_t dataset_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
hsize_t chunk_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
@@ -5647,7 +5760,8 @@ test_write_parallel_read_serial(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) HDputs("Testing write file in parallel; read serially");
+ if (MAINPROCESS)
+ HDputs("Testing write file in parallel; read serially");
CHECK_CUR_FILTER_AVAIL();
@@ -5655,11 +5769,10 @@ test_write_parallel_read_serial(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -5667,15 +5780,15 @@ test_write_parallel_read_serial(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NROWS;
- dataset_dims[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NCOLS;
- dataset_dims[2] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_DEPTH;
- chunk_dims[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
- chunk_dims[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
- chunk_dims[2] = 1;
- sel_dims[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
- sel_dims[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NCOLS;
- sel_dims[2] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_DEPTH;
+ dataset_dims[0] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_NCOLS;
+ dataset_dims[2] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_DEPTH;
+ chunk_dims[0] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
+ chunk_dims[2] = 1;
+ sel_dims[0] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
+ sel_dims[1] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_NCOLS;
+ sel_dims[2] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_DEPTH;
filespace = H5Screate_simple(WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -5688,13 +5801,13 @@ test_write_parallel_read_serial(void)
VRFY((plist_id >= 0), "DCPL creation succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
dset_id = H5Dcreate2(file_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -5703,22 +5816,26 @@ test_write_parallel_read_serial(void)
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
- count[0] = 1;
- count[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NCOLS / (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
- count[2] = (hsize_t) mpi_size;
- stride[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
- stride[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
+ count[0] = 1;
+ count[1] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_NCOLS / (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
+ count[2] = (hsize_t)mpi_size;
+ stride[0] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
stride[2] = 1;
- block[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
- block[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
- block[2] = 1;
- offset[0] = ((hsize_t) mpi_rank * (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS * count[0]);
+ block[0] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
+ block[1] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
+ block[2] = 1;
+ offset[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NROWS * count[0]);
offset[1] = 0;
offset[2] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], offset[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]);
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], offset[ %" PRIuHSIZE
+ ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ " ]\n",
+ mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0],
+ offset[1], offset[2], block[0], block[1], block[2]);
HDfflush(stdout);
}
@@ -5726,29 +5843,29 @@ test_write_parallel_read_serial(void)
filespace = H5Dget_space(dset_id);
VRFY((filespace >= 0), "File dataspace retrieval succeeded");
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride,
- count, block) >= 0), "Hyperslab selection succeeded");
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
/* Fill data buffer */
data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
- data = (C_DATATYPE *) HDcalloc(1, data_size);
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
VRFY((NULL != data), "HDcalloc succeeded");
for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE) GEN_DATA(i);
+ data[i] = (C_DATATYPE)GEN_DATA(i);
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
- if (data) HDfree(data);
+ if (data)
+ HDfree(data);
VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -5761,7 +5878,7 @@ test_write_parallel_read_serial(void)
VRFY((plist_id >= 0), "FAPL creation succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -5773,23 +5890,20 @@ test_write_parallel_read_serial(void)
correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
- correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
- read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE) (
- (i % (dataset_dims[0] * dataset_dims[1]))
- + (i / (dataset_dims[0] * dataset_dims[1]))
- );
+ correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] * dataset_dims[1])) +
+ (i / (dataset_dims[0] * dataset_dims[1])));
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) >= 0),
- "Dataset read succeeded");
+ "Dataset read succeeded");
- VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
- "Data verification succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -5813,19 +5927,20 @@ test_write_parallel_read_serial(void)
static void
test_shrinking_growing_chunks(void)
{
- double *data = NULL;
- hsize_t dataset_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
- hsize_t chunk_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
- hsize_t sel_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
- hsize_t start[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
- hsize_t stride[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
- hsize_t count[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
- hsize_t block[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
- size_t i, data_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
-
- if (MAINPROCESS) HDputs("Testing continually shrinking/growing chunks");
+ double *data = NULL;
+ hsize_t dataset_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t start[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t stride[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t count[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t block[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ size_t i, data_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS)
+ HDputs("Testing continually shrinking/growing chunks");
CHECK_CUR_FILTER_AVAIL();
@@ -5833,11 +5948,10 @@ test_shrinking_growing_chunks(void)
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
- "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -5845,12 +5959,12 @@ test_shrinking_growing_chunks(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
/* Create the dataspace for the dataset */
- dataset_dims[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_NROWS;
- dataset_dims[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_NCOLS;
- chunk_dims[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NCOLS;
- sel_dims[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS;
- sel_dims[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_NCOLS;
+ dataset_dims[0] = (hsize_t)SHRINKING_GROWING_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t)SHRINKING_GROWING_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t)SHRINKING_GROWING_CHUNKS_NCOLS;
filespace = H5Screate_simple(SHRINKING_GROWING_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -5862,14 +5976,13 @@ test_shrinking_growing_chunks(void)
plist_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((plist_id >= 0), "DCPL creation succeeded");
- VRFY((H5Pset_chunk(plist_id, SHRINKING_GROWING_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ VRFY((H5Pset_chunk(plist_id, SHRINKING_GROWING_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
dset_id = H5Dcreate2(file_id, SHRINKING_GROWING_CHUNKS_DATASET_NAME, H5T_NATIVE_DOUBLE, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
@@ -5879,18 +5992,20 @@ test_shrinking_growing_chunks(void)
* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
- count[0] = 1;
- count[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_NCOLS / (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NCOLS;
- stride[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS;
- stride[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NCOLS;
- block[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS;
- block[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NCOLS;
- start[0] = ((hsize_t) mpi_rank * (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS * count[0]);
- start[1] = 0;
+ count[0] = 1;
+ count[1] = (hsize_t)SHRINKING_GROWING_CHUNKS_NCOLS / (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NCOLS;
+ start[0] = ((hsize_t)mpi_rank * (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NROWS * count[0]);
+ start[1] = 0;
if (VERBOSE_MED) {
- HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
HDfflush(stdout);
}
@@ -5899,18 +6014,17 @@ test_shrinking_growing_chunks(void)
VRFY((dset_id >= 0), "File dataspace retrieval succeeded");
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ "Hyperslab selection succeeded");
/* Create property list for collective dataset write */
plist_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((plist_id >= 0), "DXPL creation succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
- "Set DXPL MPIO succeeded");
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
data_size = sel_dims[0] * sel_dims[1] * sizeof(double);
- data = (double *) HDcalloc(1, data_size);
+ data = (double *)HDcalloc(1, data_size);
VRFY((NULL != data), "HDcalloc succeeded");
for (i = 0; i < SHRINKING_GROWING_CHUNKS_NLOOPS; i++) {
@@ -5920,15 +6034,16 @@ test_shrinking_growing_chunks(void)
else {
size_t j;
for (j = 0; j < data_size / sizeof(*data); j++) {
- data[j] = (float) ( rand() / (double) (RAND_MAX / (double) 1.0L) );
+ data[j] = (float)(rand() / (double)(RAND_MAX / (double)1.0L));
}
}
VRFY((H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, plist_id, data) >= 0),
- "Dataset write succeeded");
+ "Dataset write succeeded");
}
- if (data) HDfree(data);
+ if (data)
+ HDfree(data);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -5941,7 +6056,7 @@ test_shrinking_growing_chunks(void)
#endif
int
-main(int argc, char** argv)
+main(int argc, char **argv)
{
size_t i;
hid_t file_id = -1, fapl = -1;
@@ -5975,7 +6090,8 @@ main(int argc, char** argv)
HDprintf("==========================\n\n");
}
- if (VERBOSE_MED) h5_show_hostname();
+ if (VERBOSE_MED)
+ h5_show_hostname();
ALARM_ON;
@@ -5986,10 +6102,10 @@ main(int argc, char** argv)
VRFY((H5Pset_fapl_mpio(fapl, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
VRFY((h5_fixname(FILENAME[0], fapl, filenames[0], sizeof(filenames[0])) != NULL),
- "Test file name created");
+ "Test file name created");
file_id = H5Fcreate(filenames[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
VRFY((file_id >= 0), "Test file creation succeeded");
@@ -6001,7 +6117,8 @@ main(int argc, char** argv)
(*tests[i])();
}
else {
- if (MAINPROCESS) MESG("MPI_Barrier failed");
+ if (MAINPROCESS)
+ MESG("MPI_Barrier failed");
nerrors++;
}
}
@@ -6020,7 +6137,7 @@ main(int argc, char** argv)
VRFY((H5Pset_fapl_mpio(fapl, comm, info) >= 0), "Set FAPL MPIO succeeded");
VRFY((H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ "Set libver bounds succeeded");
file_id = H5Fcreate(filenames[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
VRFY((file_id >= 0), "Test file creation succeeded");
@@ -6038,20 +6155,22 @@ main(int argc, char** argv)
(*tests[i])();
}
else {
- if (MAINPROCESS) MESG("MPI_Barrier failed");
+ if (MAINPROCESS)
+ MESG("MPI_Barrier failed");
nerrors++;
}
}
- if (nerrors) goto exit;
+ if (nerrors)
+ goto exit;
- if (MAINPROCESS) HDputs("All Parallel Filters tests passed\n");
+ if (MAINPROCESS)
+ HDputs("All Parallel Filters tests passed\n");
exit:
if (nerrors)
if (MAINPROCESS)
- HDprintf("*** %d TEST ERROR%s OCCURRED ***\n", nerrors,
- nerrors > 1 ? "S" : "");
+ HDprintf("*** %d TEST ERROR%s OCCURRED ***\n", nerrors, nerrors > 1 ? "S" : "");
ALARM_OFF;
diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h
index 9543508..3804c09 100644
--- a/testpar/t_filters_parallel.h
+++ b/testpar/t_filters_parallel.h
@@ -32,10 +32,10 @@
/* Used to load other filters than GZIP */
/* #define DYNAMIC_FILTER */ /* Uncomment and define the fields below to use a dynamically loaded filter */
-#define FILTER_NUM_CDVALUES 1
-const unsigned int cd_values[FILTER_NUM_CDVALUES] = { 0 };
+#define FILTER_NUM_CDVALUES 1
+const unsigned int cd_values[FILTER_NUM_CDVALUES] = {0};
H5Z_filter_t filter_id;
-unsigned int flags = 0;
+unsigned int flags = 0;
size_t cd_nelmts = FILTER_NUM_CDVALUES;
/* Utility Macros */
@@ -49,13 +49,15 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES;
#define HDF5_DATATYPE_NAME H5T_NATIVE_LONG
/* Macro used to generate data for datasets for later verification */
-#define GEN_DATA(i) INCREMENTAL_DATA(i)
+#define GEN_DATA(i) INCREMENTAL_DATA(i)
/* For experimental purposes only, will cause tests to fail data verification phase - JTH */
-/* #define GEN_DATA(i) RANK_DATA(i) */ /* Given an index value i, generates test data based upon selected mode */
+/* #define GEN_DATA(i) RANK_DATA(i) */ /* Given an index value i, generates test data based upon
+ selected mode */
-#define INCREMENTAL_DATA(i) ((size_t) mpi_rank + i) /* Generates incremental test data */
-#define RANK_DATA(i) (mpi_rank) /* Generates test data to visibly show which rank wrote to which parts of the dataset */
+#define INCREMENTAL_DATA(i) ((size_t)mpi_rank + i) /* Generates incremental test data */
+#define RANK_DATA(i) \
+ (mpi_rank) /* Generates test data to visibly show which rank wrote to which parts of the dataset */
#define DEFAULT_DEFLATE_LEVEL 6
@@ -64,18 +66,20 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES;
/* Struct type for the compound datatype filtered dataset tests */
typedef struct {
- short field1;
- int field2;
- long field3;
+ short field1;
+ int field2;
+ long field3;
} COMPOUND_C_DATATYPE;
/* Defines for the one-chunk filtered dataset write test */
-#define WRITE_ONE_CHUNK_FILTERED_DATASET_NAME "one_chunk_filtered_dataset_write"
-#define WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS 2
-#define WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS (mpi_size * DIM0_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
-#define WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS (mpi_size * DIM1_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
-#define WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS
-#define WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_NAME "one_chunk_filtered_dataset_write"
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS 2
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS \
+ (mpi_size * DIM0_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS \
+ (mpi_size * DIM1_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS
/* Defines for the unshared filtered chunks write test */
#define WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME "unshared_filtered_chunks_write"
@@ -94,55 +98,69 @@ typedef struct {
#define WRITE_SHARED_FILTERED_CHUNKS_NCOLS (WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR)
/* Defines for the filtered chunks write test where a process has no selection */
-#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks_write"
-#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
-#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
-#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
-#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS (WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
-#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS (WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks_write"
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS \
+ (WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS \
+ (WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC (mpi_size - 1)
/* Defines for the filtered chunks write test where no process has a selection */
-#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "all_no_selection_filtered_chunks_write"
-#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
-#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
-#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
-#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS (WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
-#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS (WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "all_no_selection_filtered_chunks_write"
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS \
+ (WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS \
+ (WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
/* Defines for the filtered chunks write test with a point selection */
#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME "point_selection_filtered_chunks_write"
#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
-#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS (WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
-#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS (WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
+#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS \
+ (WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
+#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS \
+ (WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
/* Defines for the filtered dataset interleaved write test */
-#define INTERLEAVED_WRITE_FILTERED_DATASET_NAME "filtered_dataset_interleaved_write"
-#define INTERLEAVED_WRITE_FILTERED_DATASET_DIMS 2
-#define INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS (mpi_size)
-#define INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS (DIM1_SCALE_FACTOR)
-#define INTERLEAVED_WRITE_FILTERED_DATASET_NROWS (INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS * DIM0_SCALE_FACTOR)
-#define INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS (INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS * DIM1_SCALE_FACTOR)
+#define INTERLEAVED_WRITE_FILTERED_DATASET_NAME "filtered_dataset_interleaved_write"
+#define INTERLEAVED_WRITE_FILTERED_DATASET_DIMS 2
+#define INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS (mpi_size)
+#define INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define INTERLEAVED_WRITE_FILTERED_DATASET_NROWS \
+ (INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS * DIM0_SCALE_FACTOR)
+#define INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS \
+ (INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS * DIM1_SCALE_FACTOR)
/* Defines for the 3D unshared filtered dataset separate page write test */
-#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_separate_pages_write"
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME \
+ "3D_unshared_filtered_chunks_separate_pages_write"
#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS 3
#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR)
#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH (mpi_size)
-#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / mpi_size)
-#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / mpi_size)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS \
+ (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / mpi_size)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS \
+ (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / mpi_size)
/* Defines for the 3D unshared filtered dataset same page write test */
-#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_same_pages_write"
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME \
+ "3D_unshared_filtered_chunks_same_pages_write"
#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS 3
#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR)
#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH (mpi_size)
-#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS / mpi_size)
-#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / mpi_size)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS \
+ (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS / mpi_size)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS \
+ (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / mpi_size)
/* Defines for the 3d shared filtered dataset write test */
#define WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME "3D_shared_filtered_chunks_write"
@@ -154,48 +172,58 @@ typedef struct {
#define WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH (mpi_size)
/* Defines for the compound datatype filtered dataset no conversion write test with unshared chunks */
-#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_no_conversion_write"
-#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS 2
-#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS 1
-#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS mpi_size
-#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS 1
-#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS 1
-#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC (WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS / mpi_size)
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME \
+ "compound_unshared_filtered_chunks_no_conversion_write"
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS 2
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC \
+ (WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS / mpi_size)
/* Defines for the compound datatype filtered dataset no conversion write test with shared chunks */
-#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_no_conversion_write"
-#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS 2
-#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS mpi_size
-#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS mpi_size
-#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS mpi_size
-#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS 1
-#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME \
+ "compound_shared_filtered_chunks_no_conversion_write"
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS 2
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC \
+ WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS
/* Defines for the compound datatype filtered dataset type conversion write test with unshared chunks */
-#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_type_conversion_write"
-#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS 2
-#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS 1
-#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS mpi_size
-#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS 1
-#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS 1
-#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC (WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS / mpi_size)
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME \
+ "compound_unshared_filtered_chunks_type_conversion_write"
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS 2
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC \
+ (WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS / mpi_size)
/* Defines for the compound datatype filtered dataset type conversion write test with shared chunks */
-#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_type_conversion_write"
-#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS 2
-#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS mpi_size
-#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS mpi_size
-#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS mpi_size
-#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS 1
-#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME \
+ "compound_shared_filtered_chunks_type_conversion_write"
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS 2
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC \
+ WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS
/* Defines for the one-chunk filtered dataset read test */
-#define READ_ONE_CHUNK_FILTERED_DATASET_NAME "one_chunk_filtered_dataset_read"
-#define READ_ONE_CHUNK_FILTERED_DATASET_DIMS 2
-#define READ_ONE_CHUNK_FILTERED_DATASET_NROWS (mpi_size * DIM0_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
-#define READ_ONE_CHUNK_FILTERED_DATASET_NCOLS (mpi_size * DIM1_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
-#define READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS READ_ONE_CHUNK_FILTERED_DATASET_NROWS
-#define READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS READ_ONE_CHUNK_FILTERED_DATASET_NCOLS
+#define READ_ONE_CHUNK_FILTERED_DATASET_NAME "one_chunk_filtered_dataset_read"
+#define READ_ONE_CHUNK_FILTERED_DATASET_DIMS 2
+#define READ_ONE_CHUNK_FILTERED_DATASET_NROWS \
+ (mpi_size * DIM0_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
+#define READ_ONE_CHUNK_FILTERED_DATASET_NCOLS \
+ (mpi_size * DIM1_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
+#define READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS READ_ONE_CHUNK_FILTERED_DATASET_NROWS
+#define READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS READ_ONE_CHUNK_FILTERED_DATASET_NCOLS
/* Defines for the unshared filtered chunks read test */
#define READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME "unshared_filtered_chunks_read"
@@ -214,21 +242,25 @@ typedef struct {
#define READ_SHARED_FILTERED_CHUNKS_NCOLS (READ_SHARED_FILTERED_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR)
/* Defines for the filtered chunks read test where a process has no selection */
-#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks_read"
-#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
-#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
-#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
-#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
-#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks_read"
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS \
+ (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS \
+ (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC (mpi_size - 1)
/* Defines for the filtered chunks read test where no process has a selection */
-#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "all_no_selection_filtered_chunks_read"
-#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
-#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
-#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
-#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS (READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
-#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS (READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "all_no_selection_filtered_chunks_read"
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS \
+ (READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS \
+ (READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
/* Defines for the filtered chunks read test with a point selection */
#define READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME "point_selection_filtered_chunks_read"
@@ -239,21 +271,26 @@ typedef struct {
#define READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS (READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
/* Defines for the filtered dataset interleaved read test */
-#define INTERLEAVED_READ_FILTERED_DATASET_NAME "filtered_dataset_interleaved_read"
-#define INTERLEAVED_READ_FILTERED_DATASET_DIMS 2
-#define INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS (mpi_size)
-#define INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS (DIM1_SCALE_FACTOR)
-#define INTERLEAVED_READ_FILTERED_DATASET_NROWS (INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS * DIM0_SCALE_FACTOR)
-#define INTERLEAVED_READ_FILTERED_DATASET_NCOLS (INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS * DIM1_SCALE_FACTOR)
+#define INTERLEAVED_READ_FILTERED_DATASET_NAME "filtered_dataset_interleaved_read"
+#define INTERLEAVED_READ_FILTERED_DATASET_DIMS 2
+#define INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS (mpi_size)
+#define INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define INTERLEAVED_READ_FILTERED_DATASET_NROWS \
+ (INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS * DIM0_SCALE_FACTOR)
+#define INTERLEAVED_READ_FILTERED_DATASET_NCOLS \
+ (INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS * DIM1_SCALE_FACTOR)
/* Defines for the 3D unshared filtered dataset separate page read test */
-#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_separate_pages_read"
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME \
+ "3D_unshared_filtered_chunks_separate_pages_read"
#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS 3
#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR)
#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH (mpi_size)
-#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS (READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / mpi_size)
-#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS (READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / mpi_size)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS \
+ (READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / mpi_size)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS \
+ (READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / mpi_size)
/* Defines for the 3D unshared filtered dataset same page read test */
#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_same_pages_read"
@@ -261,8 +298,10 @@ typedef struct {
#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR)
#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH (mpi_size)
-#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS (READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS / mpi_size)
-#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS (READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / mpi_size)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS \
+ (READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS / mpi_size)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS \
+ (READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / mpi_size)
/* Defines for the 3d shared filtered dataset read test */
#define READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME "3D_shared_filtered_chunks_read"
@@ -274,40 +313,48 @@ typedef struct {
#define READ_SHARED_FILTERED_CHUNKS_3D_DEPTH (mpi_size)
/* Defines for the compound datatype filtered dataset no conversion read test with unshared chunks */
-#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_no_conversion_read"
-#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS 2
-#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS 1
-#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS mpi_size
-#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS 1
-#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS 1
-#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC (READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS / mpi_size)
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME \
+ "compound_unshared_filtered_chunks_no_conversion_read"
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS 2
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC \
+ (READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS / mpi_size)
/* Defines for the compound datatype filtered dataset no conversion read test with shared chunks */
-#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_no_conversion_read"
-#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS 2
-#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS mpi_size
-#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS mpi_size
-#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS mpi_size
-#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS 1
-#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME \
+ "compound_shared_filtered_chunks_no_conversion_read"
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS 2
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC \
+ READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS
/* Defines for the compound datatype filtered dataset type conversion read test with unshared chunks */
-#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_type_conversion_read"
-#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS 2
-#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS 1
-#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS mpi_size
-#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS 1
-#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS 1
-#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC (READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS / mpi_size)
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME \
+ "compound_unshared_filtered_chunks_type_conversion_read"
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS 2
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC \
+ (READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS / mpi_size)
/* Defines for the compound datatype filtered dataset type conversion read test with shared chunks */
-#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_type_conversion_read"
-#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS 2
-#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS mpi_size
-#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS mpi_size
-#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS mpi_size
-#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS 1
-#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME \
+ "compound_shared_filtered_chunks_type_conversion_read"
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS 2
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC \
+ READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS
/* Defines for the write file serially/read in parallel test */
#define WRITE_SERIAL_READ_PARALLEL_DATASET_NAME "write_serial_read_parallel"
diff --git a/testpar/t_init_term.c b/testpar/t_init_term.c
index 0e40fe4..e2157ba 100644
--- a/testpar/t_init_term.c
+++ b/testpar/t_init_term.c
@@ -21,26 +21,23 @@
#include "testphdf5.h"
-int nerrors = 0; /* errors count */
+int nerrors = 0; /* errors count */
-const char *FILENAME[] = {
- "after_mpi_fin",
- NULL
-};
+const char *FILENAME[] = {"after_mpi_fin", NULL};
int
-main (int argc, char **argv)
+main(int argc, char **argv)
{
- int mpi_size, mpi_rank;
- MPI_Comm comm = MPI_COMM_WORLD;
+ int mpi_size, mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
/* Initialize and finalize MPI */
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
MPI_Comm_rank(comm, &mpi_rank);
- if(MAINPROCESS)
- TESTING("Usage of Serial HDF5 after MPI_Finalize() is called");
+ if (MAINPROCESS)
+ TESTING("Usage of Serial HDF5 after MPI_Finalize() is called");
MPI_Finalize();
@@ -50,9 +47,9 @@ main (int argc, char **argv)
and create a file serially */
H5open();
- if(mpi_rank == 0) {
- char filename[1024];
- hid_t file_id;
+ if (mpi_rank == 0) {
+ char filename[1024];
+ hid_t file_id;
h5_fixname(FILENAME[0], H5P_DEFAULT, filename, sizeof filename);
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
@@ -63,12 +60,12 @@ main (int argc, char **argv)
H5close();
- if(MAINPROCESS) {
- if(0 == nerrors)
+ if (MAINPROCESS) {
+ if (0 == nerrors)
PASSED();
else
- H5_FAILED()
+ H5_FAILED()
}
- return (nerrors!=0);
+ return (nerrors != 0);
}
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index db0d059..a4bc26d 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -15,13 +15,12 @@
#include "H5Dprivate.h"
#include "H5private.h"
-#define DIM 2
-#define SIZE 32
-#define NDATASET 4
+#define DIM 2
+#define SIZE 32
+#define NDATASET 4
#define GROUP_DEPTH 128
enum obj_type { is_group, is_dset };
-
static int get_size(void);
static void write_dataset(hid_t, hid_t, hid_t);
static int read_dataset(hid_t, hid_t, hid_t);
@@ -33,7 +32,6 @@ static int read_attribute(hid_t, int, int);
static int check_value(DATATYPE *, DATATYPE *, int);
static void get_slab(hsize_t[], hsize_t[], hsize_t[], hsize_t[], int);
-
/*
* The size value computed by this function is used extensively in
* configuring tests for the current number of processes.
@@ -54,10 +52,11 @@ get_size(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* needed for VRFY */
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- if(mpi_size > size ) {
- if((mpi_size % 2) == 0 ) {
+ if (mpi_size > size) {
+ if ((mpi_size % 2) == 0) {
size = mpi_size;
- } else {
+ }
+ else {
size = mpi_size + 1;
}
}
@@ -65,7 +64,7 @@ get_size(void)
VRFY((mpi_size <= size), "mpi_size <= size");
VRFY(((size % 2) == 0), "size isn't even");
- return(size);
+ return (size);
} /* get_size() */
@@ -73,14 +72,15 @@ get_size(void)
* Example of using PHDF5 to create a zero sized dataset.
*
*/
-void zero_dim_dset(void)
+void
+zero_dim_dset(void)
{
- int mpi_size, mpi_rank;
- const char *filename;
- hid_t fid, plist, dcpl, dsid, sid;
- hsize_t dim, chunk_dim;
- herr_t ret;
- int data[1];
+ int mpi_size, mpi_rank;
+ const char *filename;
+ hid_t fid, plist, dcpl, dsid, sid;
+ hsize_t dim, chunk_dim;
+ herr_t ret;
+ int data[1];
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -88,37 +88,37 @@ void zero_dim_dset(void)
filename = GetTestParameters();
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- VRFY((plist>=0), "create_faccess_plist succeeded");
+ VRFY((plist >= 0), "create_faccess_plist succeeded");
fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
- VRFY((fid>=0), "H5Fcreate succeeded");
+ VRFY((fid >= 0), "H5Fcreate succeeded");
ret = H5Pclose(plist);
- VRFY((ret>=0), "H5Pclose succeeded");
+ VRFY((ret >= 0), "H5Pclose succeeded");
dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl>=0), "failed H5Pcreate");
+ VRFY((dcpl >= 0), "failed H5Pcreate");
/* Set 1 chunk size */
chunk_dim = 1;
- ret = H5Pset_chunk(dcpl, 1, &chunk_dim);
- VRFY((ret>=0), "failed H5Pset_chunk");
+ ret = H5Pset_chunk(dcpl, 1, &chunk_dim);
+ VRFY((ret >= 0), "failed H5Pset_chunk");
/* Create 1D dataspace with 0 dim size */
dim = 0;
sid = H5Screate_simple(1, &dim, NULL);
- VRFY((sid>=0), "failed H5Screate_simple");
+ VRFY((sid >= 0), "failed H5Screate_simple");
/* Create chunked dataset */
dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dsid>=0), "failed H5Dcreate2");
+ VRFY((dsid >= 0), "failed H5Dcreate2");
/* write 0 elements from dataset */
ret = H5Dwrite(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data);
- VRFY((ret>=0), "failed H5Dwrite");
+ VRFY((ret >= 0), "failed H5Dwrite");
/* Read 0 elements from dataset */
ret = H5Dread(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data);
- VRFY((ret>=0), "failed H5Dread");
+ VRFY((ret >= 0), "failed H5Dread");
H5Pclose(dcpl);
H5Dclose(dsid);
@@ -136,24 +136,25 @@ void zero_dim_dset(void)
*
* JRM - 8/11/04
*/
-void multiple_dset_write(void)
+void
+multiple_dset_write(void)
{
- int i, j, n, mpi_size, mpi_rank, size;
- hid_t iof, plist, dataset, memspace, filespace;
- hid_t dcpl; /* Dataset creation property list */
- hsize_t chunk_origin [DIM];
- hsize_t chunk_dims [DIM], file_dims [DIM];
- hsize_t count[DIM]={1,1};
- double *outme = NULL;
- double fill=1.0; /* Fill value */
- char dname [100];
- herr_t ret;
- const H5Ptest_param_t *pt;
- char *filename;
- int ndatasets;
-
- pt = GetTestParameters();
- filename = pt->name;
+ int i, j, n, mpi_size, mpi_rank, size;
+ hid_t iof, plist, dataset, memspace, filespace;
+ hid_t dcpl; /* Dataset creation property list */
+ hsize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], file_dims[DIM];
+ hsize_t count[DIM] = {1, 1};
+ double * outme = NULL;
+ double fill = 1.0; /* Fill value */
+ char dname[100];
+ herr_t ret;
+ const H5Ptest_param_t *pt;
+ char * filename;
+ int ndatasets;
+
+ pt = GetTestParameters();
+ filename = pt->name;
ndatasets = pt->count;
size = get_size();
@@ -166,45 +167,45 @@ void multiple_dset_write(void)
VRFY((outme != NULL), "HDmalloc succeeded for outme");
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- VRFY((plist>=0), "create_faccess_plist succeeded");
+ VRFY((plist >= 0), "create_faccess_plist succeeded");
iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
- VRFY((iof>=0), "H5Fcreate succeeded");
+ VRFY((iof >= 0), "H5Fcreate succeeded");
ret = H5Pclose(plist);
- VRFY((ret>=0), "H5Pclose succeeded");
+ VRFY((ret >= 0), "H5Pclose succeeded");
/* decide the hyperslab according to process number. */
get_slab(chunk_origin, chunk_dims, count, file_dims, size);
- memspace = H5Screate_simple(DIM, chunk_dims, NULL);
+ memspace = H5Screate_simple(DIM, chunk_dims, NULL);
filespace = H5Screate_simple(DIM, file_dims, NULL);
- ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
- VRFY((ret>=0), "mdata hyperslab selection");
+ ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+ VRFY((ret >= 0), "mdata hyperslab selection");
/* Create a dataset creation property list */
dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl>=0), "dataset creation property list succeeded");
+ VRFY((dcpl >= 0), "dataset creation property list succeeded");
ret = H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill);
- VRFY((ret>=0), "set fill-value succeeded");
+ VRFY((ret >= 0), "set fill-value succeeded");
- for(n = 0; n < ndatasets; n++) {
- HDsprintf(dname, "dataset %d", n);
- dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dataset > 0), dname);
+ for (n = 0; n < ndatasets; n++) {
+ HDsprintf(dname, "dataset %d", n);
+ dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset > 0), dname);
- /* calculate data to write */
- for(i = 0; i < size; i++)
- for(j = 0; j < size; j++)
- outme [(i * size) + j] = n*1000 + mpi_rank;
+ /* calculate data to write */
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ outme[(i * size) + j] = n * 1000 + mpi_rank;
- H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme);
+ H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme);
- H5Dclose(dataset);
+ H5Dclose(dataset);
#ifdef BARRIER_CHECKS
- if(!((n+1) % 10)) {
- HDprintf("created %d datasets\n", n+1);
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ if (!((n + 1) % 10)) {
+ HDprintf("created %d datasets\n", n + 1);
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
#endif /* BARRIER_CHECKS */
}
@@ -216,7 +217,6 @@ void multiple_dset_write(void)
HDfree(outme);
}
-
/* Example of using PHDF5 to create, write, and read compact dataset.
*
* Changes: Updated function to use a dynamically calculated size,
@@ -225,15 +225,16 @@ void multiple_dset_write(void)
*
* JRM - 8/11/04
*/
-void compact_dataset(void)
+void
+compact_dataset(void)
{
- int i, j, mpi_size, mpi_rank, size, err_num=0;
- hid_t iof, plist, dcpl, dxpl, dataset, filespace;
- hsize_t file_dims [DIM];
- double *outme;
- double *inme;
- char dname[]="dataset";
- herr_t ret;
+ int i, j, mpi_size, mpi_rank, size, err_num = 0;
+ hid_t iof, plist, dcpl, dxpl, dataset, filespace;
+ hsize_t file_dims[DIM];
+ double * outme;
+ double * inme;
+ char dname[] = "dataset";
+ herr_t ret;
const char *filename;
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
hbool_t prop_value;
@@ -241,7 +242,7 @@ void compact_dataset(void)
size = get_size();
- for(i = 0; i < DIM; i++ )
+ for (i = 0; i < DIM; i++)
file_dims[i] = (hsize_t)size;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -257,14 +258,14 @@ void compact_dataset(void)
VRFY((mpi_size <= size), "mpi_size <= size");
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+ iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
/* Define data space */
filespace = H5Screate_simple(DIM, file_dims, NULL);
/* Create a compact dataset */
dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl>=0), "dataset creation property list succeeded");
+ VRFY((dcpl >= 0), "dataset creation property list succeeded");
ret = H5Pset_layout(dcpl, H5D_COMPACT);
VRFY((dcpl >= 0), "set property list for compact dataset");
ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY);
@@ -278,16 +279,15 @@ void compact_dataset(void)
VRFY((dxpl >= 0), "");
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* Recalculate data to write. Each process writes the same data. */
- for(i = 0; i < size; i++)
- for(j = 0; j < size; j++)
- outme[(i * size) + j] =(i + j) * 1000;
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ outme[(i * size) + j] = (i + j) * 1000;
ret = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, outme);
VRFY((ret >= 0), "H5Dwrite succeeded");
@@ -300,7 +300,7 @@ void compact_dataset(void)
/* Open the file and dataset, read and compare the data. */
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- iof = H5Fopen(filename, H5F_ACC_RDONLY, plist);
+ iof = H5Fopen(filename, H5F_ACC_RDONLY, plist);
VRFY((iof >= 0), "H5Fopen succeeded");
/* set up the collective transfer properties list */
@@ -308,9 +308,9 @@ void compact_dataset(void)
VRFY((dxpl >= 0), "");
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
dataset = H5Dopen2(iof, dname, H5P_DEFAULT);
@@ -318,8 +318,8 @@ void compact_dataset(void)
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
- ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value,
- NULL, NULL, NULL, NULL, NULL, NULL);
+ ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value, NULL,
+ NULL, NULL, NULL, NULL, NULL);
VRFY((ret >= 0), "H5Pinsert2() succeeded");
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
@@ -328,17 +328,19 @@ void compact_dataset(void)
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
prop_value = FALSE;
- ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
VRFY((ret >= 0), "H5Pget succeeded");
- VRFY((prop_value == FALSE && dxfer_coll_type == DXFER_COLLECTIVE_IO),"rank 0 Bcast optimization was performed for a compact dataset");
+ VRFY((prop_value == FALSE && dxfer_coll_type == DXFER_COLLECTIVE_IO),
+ "rank 0 Bcast optimization was performed for a compact dataset");
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
/* Verify data value */
- for(i = 0; i < size; i++)
- for(j = 0; j < size; j++)
- if(!H5_DBL_ABS_EQUAL(inme[(i * size) + j], outme[(i * size) + j]))
- if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- HDprintf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j, outme[(i * size) + j], inme[(i * size) + j]);
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ if (!H5_DBL_ABS_EQUAL(inme[(i * size) + j], outme[(i * size) + j]))
+ if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j,
+ outme[(i * size) + j], inme[(i * size) + j]);
H5Pclose(plist);
H5Pclose(dxpl);
@@ -360,16 +362,17 @@ void compact_dataset(void)
*
* JRM - 8/24/04
*/
-void null_dataset(void)
+void
+null_dataset(void)
{
- int mpi_size, mpi_rank;
- hid_t iof, plist, dxpl, dataset, attr, sid;
- unsigned uval=2; /* Buffer for writing to dataset */
- int val=1; /* Buffer for writing to attribute */
- hssize_t nelem;
- char dname[]="dataset";
- char attr_name[]="attribute";
- herr_t ret;
+ int mpi_size, mpi_rank;
+ hid_t iof, plist, dxpl, dataset, attr, sid;
+ unsigned uval = 2; /* Buffer for writing to dataset */
+ int val = 1; /* Buffer for writing to attribute */
+ hssize_t nelem;
+ char dname[] = "dataset";
+ char attr_name[] = "attribute";
+ herr_t ret;
const char *filename;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -378,7 +381,7 @@ void null_dataset(void)
filename = GetTestParameters();
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+ iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
/* Define data space */
sid = H5Screate(H5S_NULL);
@@ -396,12 +399,11 @@ void null_dataset(void)
VRFY((dxpl >= 0), "");
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* Write "nothing" to the dataset(with type conversion) */
ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, &uval);
VRFY((ret >= 0), "H5Dwrite succeeded");
@@ -422,7 +424,7 @@ void null_dataset(void)
/* Open the file and dataset, read and compare the data. */
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- iof = H5Fopen(filename, H5F_ACC_RDONLY, plist);
+ iof = H5Fopen(filename, H5F_ACC_RDONLY, plist);
VRFY((iof >= 0), "H5Fopen succeeded");
/* set up the collective transfer properties list */
@@ -430,27 +432,27 @@ void null_dataset(void)
VRFY((dxpl >= 0), "");
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
dataset = H5Dopen2(iof, dname, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dopen2 succeeded");
/* Try reading from the dataset(make certain our buffer is unmodified) */
ret = H5Dread(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, dxpl, &uval);
- VRFY((ret>=0), "H5Dread");
- VRFY((uval==2), "H5Dread");
+ VRFY((ret >= 0), "H5Dread");
+ VRFY((uval == 2), "H5Dread");
/* Open the attribute for the dataset */
attr = H5Aopen(dataset, attr_name, H5P_DEFAULT);
VRFY((attr >= 0), "H5Aopen");
- /* Try reading from the attribute(make certain our buffer is unmodified) */ ret = H5Aread(attr, H5T_NATIVE_INT, &val);
- VRFY((ret>=0), "H5Aread");
- VRFY((val==1), "H5Aread");
+ /* Try reading from the attribute(make certain our buffer is unmodified) */ ret =
+ H5Aread(attr, H5T_NATIVE_INT, &val);
+ VRFY((ret >= 0), "H5Aread");
+ VRFY((val == 1), "H5Aread");
H5Pclose(plist);
H5Pclose(dxpl);
@@ -472,17 +474,18 @@ void null_dataset(void)
*
* JRM - 8/11/04
*/
-void big_dataset(void)
+void
+big_dataset(void)
{
- int mpi_size, mpi_rank; /* MPI info */
- hid_t iof, /* File ID */
- fapl, /* File access property list ID */
- dataset, /* Dataset ID */
- filespace; /* Dataset's dataspace ID */
- hsize_t file_dims [4]; /* Dimensions of dataspace */
- char dname[]="dataset"; /* Name of dataset */
- MPI_Offset file_size; /* Size of file on disk */
- herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank; /* MPI info */
+ hid_t iof, /* File ID */
+ fapl, /* File access property list ID */
+ dataset, /* Dataset ID */
+ filespace; /* Dataset's dataspace ID */
+ hsize_t file_dims[4]; /* Dimensions of dataspace */
+ char dname[] = "dataset"; /* Name of dataset */
+ MPI_Offset file_size; /* Size of file on disk */
+ herr_t ret; /* Generic return value */
const char *filename;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -503,11 +506,11 @@ void big_dataset(void)
VRFY((iof >= 0), "H5Fcreate succeeded");
/* Define dataspace for 2GB dataspace */
- file_dims[0]= 2;
- file_dims[1]= 1024;
- file_dims[2]= 1024;
- file_dims[3]= 1024;
- filespace = H5Screate_simple(4, file_dims, NULL);
+ file_dims[0] = 2;
+ file_dims[1] = 1024;
+ file_dims[2] = 1024;
+ file_dims[3] = 1024;
+ filespace = H5Screate_simple(4, file_dims, NULL);
VRFY((filespace >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -532,11 +535,11 @@ void big_dataset(void)
VRFY((iof >= 0), "H5Fcreate succeeded");
/* Define dataspace for 4GB dataspace */
- file_dims[0]= 4;
- file_dims[1]= 1024;
- file_dims[2]= 1024;
- file_dims[3]= 1024;
- filespace = H5Screate_simple(4, file_dims, NULL);
+ file_dims[0] = 4;
+ file_dims[1] = 1024;
+ file_dims[2] = 1024;
+ file_dims[3] = 1024;
+ filespace = H5Screate_simple(4, file_dims, NULL);
VRFY((filespace >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -561,11 +564,11 @@ void big_dataset(void)
VRFY((iof >= 0), "H5Fcreate succeeded");
/* Define dataspace for 8GB dataspace */
- file_dims[0]= 8;
- file_dims[1]= 1024;
- file_dims[2]= 1024;
- file_dims[3]= 1024;
- filespace = H5Screate_simple(4, file_dims, NULL);
+ file_dims[0] = 8;
+ file_dims[1] = 1024;
+ file_dims[2] = 1024;
+ file_dims[3] = 1024;
+ filespace = H5Screate_simple(4, file_dims, NULL);
VRFY((filespace >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -602,25 +605,26 @@ void big_dataset(void)
*
* JRM - 8/11/04
*/
-void dataset_fillvalue(void)
+void
+dataset_fillvalue(void)
{
- int mpi_size, mpi_rank; /* MPI info */
- int err_num; /* Number of errors */
- hid_t iof, /* File ID */
- fapl, /* File access property list ID */
- dxpl, /* Data transfer property list ID */
- dataset, /* Dataset ID */
- memspace, /* Memory dataspace ID */
- filespace; /* Dataset's dataspace ID */
- char dname[]="dataset"; /* Name of dataset */
+ int mpi_size, mpi_rank; /* MPI info */
+ int err_num; /* Number of errors */
+ hid_t iof, /* File ID */
+ fapl, /* File access property list ID */
+ dxpl, /* Data transfer property list ID */
+ dataset, /* Dataset ID */
+ memspace, /* Memory dataspace ID */
+ filespace; /* Dataset's dataspace ID */
+ char dname[] = "dataset"; /* Name of dataset */
hsize_t dset_dims[4] = {0, 6, 7, 8};
hsize_t req_start[4] = {0, 0, 0, 0};
hsize_t req_count[4] = {1, 6, 7, 8};
- hsize_t dset_size; /* Dataset size */
- int *rdata, *wdata; /* Buffers for data to read and write */
- int *twdata, *trdata; /* Temporary pointer into buffer */
- int acc, i, ii, j, k, l; /* Local index variables */
- herr_t ret; /* Generic return value */
+ hsize_t dset_size; /* Dataset size */
+ int * rdata, *wdata; /* Buffers for data to read and write */
+ int * twdata, *trdata; /* Temporary pointer into buffer */
+ int acc, i, ii, j, k, l; /* Local index variables */
+ herr_t ret; /* Generic return value */
const char *filename;
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
hbool_t prop_value;
@@ -633,13 +637,13 @@ void dataset_fillvalue(void)
/* Set the dataset dimension to be one row more than number of processes */
/* and calculate the actual dataset size. */
- dset_dims[0]=(hsize_t)(mpi_size+1);
- dset_size=dset_dims[0]*dset_dims[1]*dset_dims[2]*dset_dims[3];
+ dset_dims[0] = (hsize_t)(mpi_size + 1);
+ dset_size = dset_dims[0] * dset_dims[1] * dset_dims[2] * dset_dims[3];
/* Allocate space for the buffers */
- rdata=HDmalloc((size_t)(dset_size*sizeof(int)));
+ rdata = HDmalloc((size_t)(dset_size * sizeof(int)));
VRFY((rdata != NULL), "HDcalloc succeeded for read buffer");
- wdata=HDmalloc((size_t)(dset_size*sizeof(int)));
+ wdata = HDmalloc((size_t)(dset_size * sizeof(int)));
VRFY((wdata != NULL), "HDmalloc succeeded for write buffer");
fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
@@ -669,53 +673,54 @@ void dataset_fillvalue(void)
VRFY((dxpl >= 0), "H5Pcreate succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
- ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value,
- NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((ret >= 0),"testing property list inserted succeeded");
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value, NULL,
+ NULL, NULL, NULL, NULL, NULL);
+ VRFY((ret >= 0), "testing property list inserted succeeded");
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
- for(ii = 0; ii < 2; ii++) {
+ for (ii = 0; ii < 2; ii++) {
- if(ii == 0)
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
- else
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (ii == 0)
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
+ else
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- /* set entire read buffer with the constant 2 */
- HDmemset(rdata,2,(size_t)(dset_size*sizeof(int)));
+ /* set entire read buffer with the constant 2 */
+ HDmemset(rdata, 2, (size_t)(dset_size * sizeof(int)));
- /* Read the entire dataset back */
- ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata);
- VRFY((ret >= 0), "H5Dread succeeded");
+ /* Read the entire dataset back */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata);
+ VRFY((ret >= 0), "H5Dread succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = FALSE;
- ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
- VRFY((ret >= 0), "testing property list get succeeded");
- if(ii == 0)
- VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
- else
- VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
+ prop_value = FALSE;
+ ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), "testing property list get succeeded");
+ if (ii == 0)
+ VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
+ else
+ VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
- /* Verify all data read are the fill value 0 */
- trdata = rdata;
- err_num = 0;
- for(i = 0; i < (int)dset_dims[0]; i++)
- for(j = 0; j < (int)dset_dims[1]; j++)
- for(k = 0; k < (int)dset_dims[2]; k++)
- for(l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++)
- if(*trdata != 0)
- if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i, j, k, l, *trdata);
- if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
- HDprintf("[more errors ...]\n");
- if(err_num) {
- HDprintf("%d errors found in check_value\n", err_num);
- nerrors++;
- }
+ /* Verify all data read are the fill value 0 */
+ trdata = rdata;
+ err_num = 0;
+ for (i = 0; i < (int)dset_dims[0]; i++)
+ for (j = 0; j < (int)dset_dims[1]; j++)
+ for (k = 0; k < (int)dset_dims[2]; k++)
+ for (l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++)
+ if (*trdata != 0)
+ if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,
+ j, k, l, *trdata);
+ if (err_num > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if (err_num) {
+ HDprintf("%d errors found in check_value\n", err_num);
+ nerrors++;
+ }
}
/* Barrier to ensure all processes have completed the above test. */
@@ -725,26 +730,25 @@ void dataset_fillvalue(void)
* Each process writes 1 row of data. Thus last row is not written.
*/
/* Create hyperslabs in memory and file dataspaces */
- req_start[0]=(hsize_t)mpi_rank;
- ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, req_start, NULL, req_count, NULL);
+ req_start[0] = (hsize_t)mpi_rank;
+ ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, req_start, NULL, req_count, NULL);
VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace");
ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, req_start, NULL, req_count, NULL);
VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace");
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* Fill write buffer with some values */
- twdata=wdata;
- for(i=0, acc=0; i<(int)dset_dims[0]; i++)
- for(j=0; j<(int)dset_dims[1]; j++)
- for(k=0; k<(int)dset_dims[2]; k++)
- for(l=0; l<(int)dset_dims[3]; l++)
+ twdata = wdata;
+ for (i = 0, acc = 0; i < (int)dset_dims[0]; i++)
+ for (j = 0; j < (int)dset_dims[1]; j++)
+ for (k = 0; k < (int)dset_dims[2]; k++)
+ for (l = 0; l < (int)dset_dims[3]; l++)
*twdata++ = acc++;
/* Collectively write a hyperslab of data to the dataset */
@@ -759,60 +763,62 @@ void dataset_fillvalue(void)
*/
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
- ret = H5Pset(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
- VRFY((ret >= 0), " H5Pset succeeded");
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ ret = H5Pset(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), " H5Pset succeeded");
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
- for(ii = 0; ii < 2; ii++) {
+ for (ii = 0; ii < 2; ii++) {
- if(ii == 0)
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
- else
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (ii == 0)
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
+ else
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- /* set entire read buffer with the constant 2 */
- HDmemset(rdata,2,(size_t)(dset_size*sizeof(int)));
+ /* set entire read buffer with the constant 2 */
+ HDmemset(rdata, 2, (size_t)(dset_size * sizeof(int)));
- /* Read the entire dataset back */
- ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata);
- VRFY((ret >= 0), "H5Dread succeeded");
+ /* Read the entire dataset back */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata);
+ VRFY((ret >= 0), "H5Dread succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = FALSE;
- ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
- VRFY((ret >= 0), "testing property list get succeeded");
- if(ii == 0)
- VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
- else
- VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
+ prop_value = FALSE;
+ ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), "testing property list get succeeded");
+ if (ii == 0)
+ VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
+ else
+ VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
- /* Verify correct data read */
- twdata=wdata;
- trdata=rdata;
- err_num=0;
- for(i=0; i<(int)dset_dims[0]; i++)
- for(j=0; j<(int)dset_dims[1]; j++)
- for(k=0; k<(int)dset_dims[2]; k++)
- for(l=0; l<(int)dset_dims[3]; l++, twdata++, trdata++)
- if(i<mpi_size) {
- if(*twdata != *trdata )
- if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n", i,j,k,l, *twdata, *trdata);
- } /* end if */
- else {
- if(*trdata != 0)
- if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,j,k,l, *trdata);
- } /* end else */
- if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
- HDprintf("[more errors ...]\n");
- if(err_num){
- HDprintf("%d errors found in check_value\n", err_num);
- nerrors++;
- }
+ /* Verify correct data read */
+ twdata = wdata;
+ trdata = rdata;
+ err_num = 0;
+ for (i = 0; i < (int)dset_dims[0]; i++)
+ for (j = 0; j < (int)dset_dims[1]; j++)
+ for (k = 0; k < (int)dset_dims[2]; k++)
+ for (l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++)
+ if (i < mpi_size) {
+ if (*twdata != *trdata)
+ if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n",
+ i, j, k, l, *twdata, *trdata);
+ } /* end if */
+ else {
+ if (*trdata != 0)
+ if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n",
+ i, j, k, l, *trdata);
+ } /* end else */
+ if (err_num > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if (err_num) {
+ HDprintf("%d errors found in check_value\n", err_num);
+ nerrors++;
+ }
}
/* Close all file objects */
@@ -841,7 +847,8 @@ void dataset_fillvalue(void)
}
/* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */
-void collective_group_write_independent_group_read(void)
+void
+collective_group_write_independent_group_read(void)
{
collective_group_write();
independent_group_read();
@@ -856,38 +863,39 @@ void collective_group_write_independent_group_read(void)
*
* JRM - 8/16/04
*/
-void collective_group_write(void)
+void
+collective_group_write(void)
{
- int mpi_rank, mpi_size, size;
- int i, j, m;
- char gname[64], dname[32];
- hid_t fid, gid, did, plist, dcpl, memspace, filespace;
- DATATYPE *outme = NULL;
- hsize_t chunk_origin[DIM];
- hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
- hsize_t chunk_size[2]; /* Chunk dimensions - computed shortly */
- herr_t ret1, ret2;
+ int mpi_rank, mpi_size, size;
+ int i, j, m;
+ char gname[64], dname[32];
+ hid_t fid, gid, did, plist, dcpl, memspace, filespace;
+ DATATYPE * outme = NULL;
+ hsize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
+ hsize_t chunk_size[2]; /* Chunk dimensions - computed shortly */
+ herr_t ret1, ret2;
const H5Ptest_param_t *pt;
- char *filename;
- int ngroups;
+ char * filename;
+ int ngroups;
- pt = GetTestParameters();
+ pt = GetTestParameters();
filename = pt->name;
- ngroups = pt->count;
+ ngroups = pt->count;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
size = get_size();
- chunk_size[0] =(hsize_t)(size / 2);
- chunk_size[1] =(hsize_t)(size / 2);
+ chunk_size[0] = (hsize_t)(size / 2);
+ chunk_size[1] = (hsize_t)(size / 2);
outme = HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
VRFY((outme != NULL), "HDmalloc succeeded for outme");
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
H5Pclose(plist);
/* decide the hyperslab according to process number. */
@@ -896,24 +904,22 @@ void collective_group_write(void)
/* select hyperslab in memory and file spaces. These two operations are
* identical since the datasets are the same. */
memspace = H5Screate_simple(DIM, file_dims, NULL);
- ret1 = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin,
- chunk_dims, count, chunk_dims);
- filespace = H5Screate_simple(DIM, file_dims, NULL);
- ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin,
- chunk_dims, count, chunk_dims);
- VRFY((memspace>=0), "memspace");
- VRFY((filespace>=0), "filespace");
- VRFY((ret1>=0), "mgroup memspace selection");
- VRFY((ret2>=0), "mgroup filespace selection");
+ ret1 = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+ filespace = H5Screate_simple(DIM, file_dims, NULL);
+ ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+ VRFY((memspace >= 0), "memspace");
+ VRFY((filespace >= 0), "filespace");
+ VRFY((ret1 >= 0), "mgroup memspace selection");
+ VRFY((ret2 >= 0), "mgroup filespace selection");
dcpl = H5Pcreate(H5P_DATASET_CREATE);
ret1 = H5Pset_chunk(dcpl, 2, chunk_size);
- VRFY((dcpl>=0), "dataset creation property");
- VRFY((ret1>=0), "set chunk for dataset creation property");
+ VRFY((dcpl >= 0), "dataset creation property");
+ VRFY((ret1 >= 0), "set chunk for dataset creation property");
/* creates ngroups groups under the root group, writes chunked
* datasets in parallel. */
- for(m = 0; m < ngroups; m++) {
+ for (m = 0; m < ngroups; m++) {
HDsprintf(gname, "group%d", m);
gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((gid > 0), gname);
@@ -922,9 +928,9 @@ void collective_group_write(void)
did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((did > 0), dname);
- for(i = 0; i < size; i++)
- for(j = 0; j < size; j++)
- outme[(i * size) + j] =(i + j) * 1000 + mpi_rank;
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ outme[(i * size) + j] = (i + j) * 1000 + mpi_rank;
H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
@@ -932,8 +938,8 @@ void collective_group_write(void)
H5Gclose(gid);
#ifdef BARRIER_CHECKS
- if(!((m+1) % 10)) {
- HDprintf("created %d groups\n", m+1);
+ if (!((m + 1) % 10)) {
+ HDprintf("created %d groups\n", m + 1);
MPI_Barrier(MPI_COMM_WORLD);
}
#endif /* BARRIER_CHECKS */
@@ -950,17 +956,18 @@ void collective_group_write(void)
/* Let two sets of processes open and read different groups and chunked
* datasets independently.
*/
-void independent_group_read(void)
+void
+independent_group_read(void)
{
- int mpi_rank, m;
- hid_t plist, fid;
+ int mpi_rank, m;
+ hid_t plist, fid;
const H5Ptest_param_t *pt;
- char *filename;
- int ngroups;
+ char * filename;
+ int ngroups;
- pt = GetTestParameters();
+ pt = GetTestParameters();
filename = pt->name;
- ngroups = pt->count;
+ ngroups = pt->count;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -973,11 +980,12 @@ void independent_group_read(void)
/* open groups and read datasets. Odd number processes read even number
* groups from the end; even number processes read odd number groups
* from the beginning. */
- if(mpi_rank%2==0) {
- for(m=ngroups-1; m==0; m-=2)
+ if (mpi_rank % 2 == 0) {
+ for (m = ngroups - 1; m == 0; m -= 2)
group_dataset_read(fid, mpi_rank, m);
- } else {
- for(m=0; m<ngroups; m+=2)
+ }
+ else {
+ for (m = 0; m < ngroups; m += 2)
group_dataset_read(fid, mpi_rank, m);
}
@@ -999,18 +1007,18 @@ void independent_group_read(void)
static void
group_dataset_read(hid_t fid, int mpi_rank, int m)
{
- int ret, i, j, size;
- char gname[64], dname[32];
- hid_t gid, did;
+ int ret, i, j, size;
+ char gname[64], dname[32];
+ hid_t gid, did;
DATATYPE *outdata = NULL;
- DATATYPE *indata = NULL;
+ DATATYPE *indata = NULL;
size = get_size();
- indata =(DATATYPE*)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
+ indata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
VRFY((indata != NULL), "HDmalloc succeeded for indata");
- outdata =(DATATYPE*)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
+ outdata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
VRFY((outdata != NULL), "HDmalloc succeeded for outdata");
/* open every group under root group. */
@@ -1021,19 +1029,19 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
/* check the data. */
HDsprintf(dname, "dataset%d", m);
did = H5Dopen2(gid, dname, H5P_DEFAULT);
- VRFY((did>0), dname);
+ VRFY((did > 0), dname);
H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, indata);
/* this is the original value */
- for(i=0; i<size; i++)
- for(j=0; j<size; j++) {
- outdata[(i * size) + j] =(i+j)*1000 + mpi_rank;
- }
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++) {
+ outdata[(i * size) + j] = (i + j) * 1000 + mpi_rank;
+ }
/* compare the original value(outdata) to the value in file(indata).*/
ret = check_value(indata, outdata, size);
- VRFY((ret==0), "check the data");
+ VRFY((ret == 0), "check the data");
H5Dclose(did);
H5Gclose(gid);
@@ -1074,22 +1082,23 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
*
* JRM - 8/16/04
*/
-void multiple_group_write(void)
+void
+multiple_group_write(void)
{
- int mpi_rank, mpi_size, size;
- int m;
- char gname[64];
- hid_t fid, gid, plist, memspace, filespace;
- hsize_t chunk_origin[DIM];
- hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
- herr_t ret;
+ int mpi_rank, mpi_size, size;
+ int m;
+ char gname[64];
+ hid_t fid, gid, plist, memspace, filespace;
+ hsize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
+ herr_t ret;
const H5Ptest_param_t *pt;
- char *filename;
- int ngroups;
+ char * filename;
+ int ngroups;
- pt = GetTestParameters();
+ pt = GetTestParameters();
filename = pt->name;
- ngroups = pt->count;
+ ngroups = pt->count;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -1097,7 +1106,7 @@ void multiple_group_write(void)
size = get_size();
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
H5Pclose(plist);
/* decide the hyperslab according to process number. */
@@ -1105,38 +1114,36 @@ void multiple_group_write(void)
/* select hyperslab in memory and file spaces. These two operations are
* identical since the datasets are the same. */
- memspace = H5Screate_simple(DIM, file_dims, NULL);
- VRFY((memspace>=0), "memspace");
- ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin,
- chunk_dims, count, chunk_dims);
- VRFY((ret>=0), "mgroup memspace selection");
+ memspace = H5Screate_simple(DIM, file_dims, NULL);
+ VRFY((memspace >= 0), "memspace");
+ ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+ VRFY((ret >= 0), "mgroup memspace selection");
- filespace = H5Screate_simple(DIM, file_dims, NULL);
- VRFY((filespace>=0), "filespace");
- ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin,
- chunk_dims, count, chunk_dims);
- VRFY((ret>=0), "mgroup filespace selection");
+ filespace = H5Screate_simple(DIM, file_dims, NULL);
+ VRFY((filespace >= 0), "filespace");
+ ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+ VRFY((ret >= 0), "mgroup filespace selection");
/* creates ngroups groups under the root group, writes datasets in
* parallel. */
- for(m = 0; m < ngroups; m++) {
+ for (m = 0; m < ngroups; m++) {
HDsprintf(gname, "group%d", m);
gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((gid > 0), gname);
/* create attribute for these groups. */
- write_attribute(gid, is_group, m);
+ write_attribute(gid, is_group, m);
- if(m != 0)
- write_dataset(memspace, filespace, gid);
+ if (m != 0)
+ write_dataset(memspace, filespace, gid);
H5Gclose(gid);
#ifdef BARRIER_CHECKS
- if(!((m+1) % 10)) {
- HDprintf("created %d groups\n", m+1);
+ if (!((m + 1) % 10)) {
+ HDprintf("created %d groups\n", m + 1);
MPI_Barrier(MPI_COMM_WORLD);
- }
+ }
#endif /* BARRIER_CHECKS */
}
@@ -1144,14 +1151,14 @@ void multiple_group_write(void)
gid = H5Gopen2(fid, "group0", H5P_DEFAULT);
create_group_recursive(memspace, filespace, gid, 0);
ret = H5Gclose(gid);
- VRFY((ret>=0), "H5Gclose");
+ VRFY((ret >= 0), "H5Gclose");
ret = H5Sclose(filespace);
- VRFY((ret>=0), "H5Sclose");
+ VRFY((ret >= 0), "H5Sclose");
ret = H5Sclose(memspace);
- VRFY((ret>=0), "H5Sclose");
+ VRFY((ret >= 0), "H5Sclose");
ret = H5Fclose(fid);
- VRFY((ret>=0), "H5Fclose");
+ VRFY((ret >= 0), "H5Fclose");
}
/*
@@ -1181,21 +1188,21 @@ write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
outme = HDmalloc((size_t)size * (size_t)size * sizeof(double));
VRFY((outme != NULL), "HDmalloc succeeded for outme");
- for(n = 0; n < NDATASET; n++) {
- HDsprintf(dname, "dataset%d", n);
- did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((did > 0), dname);
+ for (n = 0; n < NDATASET; n++) {
+ HDsprintf(dname, "dataset%d", n);
+ did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((did > 0), dname);
- for(i = 0; i < size; i++)
- for(j = 0; j < size; j++)
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
outme[(i * size) + j] = n * 1000 + mpi_rank;
- H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
+ H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
- /* create attribute for these datasets.*/
- write_attribute(did, is_dset, n);
+ /* create attribute for these datasets.*/
+ write_attribute(did, is_dset, n);
- H5Dclose(did);
+ H5Dclose(did);
}
HDfree(outme);
}
@@ -1207,30 +1214,30 @@ write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
static void
create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter)
{
- hid_t child_gid;
- int mpi_rank;
- char gname[64];
+ hid_t child_gid;
+ int mpi_rank;
+ char gname[64];
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
#ifdef BARRIER_CHECKS
- if(!((counter+1) % 10)) {
- HDprintf("created %dth child groups\n", counter+1);
+ if (!((counter + 1) % 10)) {
+ HDprintf("created %dth child groups\n", counter + 1);
MPI_Barrier(MPI_COMM_WORLD);
- }
+ }
#endif /* BARRIER_CHECKS */
- HDsprintf(gname, "%dth_child_group", counter+1);
- child_gid = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((child_gid > 0), gname);
+ HDsprintf(gname, "%dth_child_group", counter + 1);
+ child_gid = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((child_gid > 0), gname);
- /* write datasets in parallel. */
- write_dataset(memspace, filespace, gid);
+ /* write datasets in parallel. */
+ write_dataset(memspace, filespace, gid);
- if(counter < GROUP_DEPTH )
- create_group_recursive(memspace, filespace, child_gid, counter+1);
+ if (counter < GROUP_DEPTH)
+ create_group_recursive(memspace, filespace, child_gid, counter + 1);
- H5Gclose(child_gid);
+ H5Gclose(child_gid);
}
/*
@@ -1243,21 +1250,22 @@ create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter)
*
* JRM - 8/11/04
*/
-void multiple_group_read(void)
+void
+multiple_group_read(void)
{
- int mpi_rank, mpi_size, error_num, size;
- int m;
- char gname[64];
- hid_t plist, fid, gid, memspace, filespace;
- hsize_t chunk_origin[DIM];
- hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
+ int mpi_rank, mpi_size, error_num, size;
+ int m;
+ char gname[64];
+ hid_t plist, fid, gid, memspace, filespace;
+ hsize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
const H5Ptest_param_t *pt;
- char *filename;
- int ngroups;
+ char * filename;
+ int ngroups;
- pt = GetTestParameters();
+ pt = GetTestParameters();
filename = pt->name;
- ngroups = pt->count;
+ ngroups = pt->count;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -1265,54 +1273,51 @@ void multiple_group_read(void)
size = get_size();
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
H5Pclose(plist);
/* decide hyperslab for each process */
get_slab(chunk_origin, chunk_dims, count, file_dims, size);
/* select hyperslab for memory and file space */
- memspace = H5Screate_simple(DIM, file_dims, NULL);
- H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims,
- count, chunk_dims);
+ memspace = H5Screate_simple(DIM, file_dims, NULL);
+ H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
filespace = H5Screate_simple(DIM, file_dims, NULL);
- H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims,
- count, chunk_dims);
+ H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
/* open every group under root group. */
- for(m=0; m<ngroups; m++) {
+ for (m = 0; m < ngroups; m++) {
HDsprintf(gname, "group%d", m);
gid = H5Gopen2(fid, gname, H5P_DEFAULT);
VRFY((gid > 0), gname);
/* check the data. */
- if(m != 0)
- if((error_num = read_dataset(memspace, filespace, gid))>0)
- nerrors += error_num;
+ if (m != 0)
+ if ((error_num = read_dataset(memspace, filespace, gid)) > 0)
+ nerrors += error_num;
/* check attribute.*/
error_num = 0;
- if((error_num = read_attribute(gid, is_group, m))>0 )
- nerrors += error_num;
+ if ((error_num = read_attribute(gid, is_group, m)) > 0)
+ nerrors += error_num;
H5Gclose(gid);
#ifdef BARRIER_CHECKS
- if(!((m+1)%10))
+ if (!((m + 1) % 10))
MPI_Barrier(MPI_COMM_WORLD);
#endif /* BARRIER_CHECKS */
}
/* open all the groups in vertical direction. */
gid = H5Gopen2(fid, "group0", H5P_DEFAULT);
- VRFY((gid>0), "group0");
+ VRFY((gid > 0), "group0");
recursive_read_group(memspace, filespace, gid, 0);
H5Gclose(gid);
H5Sclose(filespace);
H5Sclose(memspace);
H5Fclose(fid);
-
}
/*
@@ -1328,7 +1333,7 @@ void multiple_group_read(void)
static int
read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
{
- int i, j, n, mpi_rank, mpi_size, size, attr_errors=0, vrfy_errors=0;
+ int i, j, n, mpi_rank, mpi_size, size, attr_errors = 0, vrfy_errors = 0;
char dname[32];
DATATYPE *outdata = NULL, *indata = NULL;
hid_t did;
@@ -1338,32 +1343,32 @@ read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
size = get_size();
- indata =(DATATYPE*)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
+ indata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
VRFY((indata != NULL), "HDmalloc succeeded for indata");
- outdata =(DATATYPE*)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
+ outdata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
VRFY((outdata != NULL), "HDmalloc succeeded for outdata");
- for(n=0; n<NDATASET; n++) {
+ for (n = 0; n < NDATASET; n++) {
HDsprintf(dname, "dataset%d", n);
did = H5Dopen2(gid, dname, H5P_DEFAULT);
- VRFY((did>0), dname);
+ VRFY((did > 0), dname);
H5Dread(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, indata);
/* this is the original value */
- for(i=0; i<size; i++)
- for(j=0; j<size; j++) {
- *outdata = n*1000 + mpi_rank;
- outdata++;
- }
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++) {
+ *outdata = n * 1000 + mpi_rank;
+ outdata++;
+ }
outdata -= size * size;
/* compare the original value(outdata) to the value in file(indata).*/
vrfy_errors = check_value(indata, outdata, size);
/* check attribute.*/
- if((attr_errors = read_attribute(did, is_dset, n))>0 )
+ if ((attr_errors = read_attribute(did, is_dset, n)) > 0)
vrfy_errors += attr_errors;
H5Dclose(did);
@@ -1383,23 +1388,23 @@ static void
recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid, int counter)
{
hid_t child_gid;
- int mpi_rank, err_num=0;
+ int mpi_rank, err_num = 0;
char gname[64];
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
#ifdef BARRIER_CHECKS
- if((counter+1) % 10)
+ if ((counter + 1) % 10)
MPI_Barrier(MPI_COMM_WORLD);
#endif /* BARRIER_CHECKS */
- if((err_num = read_dataset(memspace, filespace, gid)) )
+ if ((err_num = read_dataset(memspace, filespace, gid)))
nerrors += err_num;
- if(counter < GROUP_DEPTH ) {
- HDsprintf(gname, "%dth_child_group", counter+1);
+ if (counter < GROUP_DEPTH) {
+ HDsprintf(gname, "%dth_child_group", counter + 1);
child_gid = H5Gopen2(gid, gname, H5P_DEFAULT);
- VRFY((child_gid>0), gname);
- recursive_read_group(memspace, filespace, child_gid, counter+1);
+ VRFY((child_gid > 0), gname);
+ recursive_read_group(memspace, filespace, child_gid, counter + 1);
H5Gclose(child_gid);
}
}
@@ -1411,23 +1416,23 @@ static void
write_attribute(hid_t obj_id, int this_type, int num)
{
hid_t sid, aid;
- hsize_t dspace_dims[1]={8};
- int i, mpi_rank, attr_data[8], dspace_rank=1;
+ hsize_t dspace_dims[1] = {8};
+ int i, mpi_rank, attr_data[8], dspace_rank = 1;
char attr_name[32];
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- if(this_type == is_group) {
+ if (this_type == is_group) {
HDsprintf(attr_name, "Group Attribute %d", num);
sid = H5Screate(H5S_SCALAR);
aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
- H5Awrite(aid, H5T_NATIVE_INT, &num);
+ H5Awrite(aid, H5T_NATIVE_INT, &num);
H5Aclose(aid);
H5Sclose(sid);
} /* end if */
- else if(this_type == is_dset) {
+ else if (this_type == is_dset) {
HDsprintf(attr_name, "Dataset Attribute %d", num);
- for(i=0; i<8; i++)
+ for (i = 0; i < 8; i++)
attr_data[i] = i;
sid = H5Screate_simple(dspace_rank, dspace_dims, NULL);
aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
@@ -1435,35 +1440,34 @@ write_attribute(hid_t obj_id, int this_type, int num)
H5Aclose(aid);
H5Sclose(sid);
} /* end else-if */
-
}
/* Read and verify attribute for group or dataset. */
static int
read_attribute(hid_t obj_id, int this_type, int num)
{
- hid_t aid;
- hsize_t group_block[2]={1,1}, dset_block[2]={1, 8};
- int i, mpi_rank, in_num, in_data[8], out_data[8], vrfy_errors = 0;
- char attr_name[32];
+ hid_t aid;
+ hsize_t group_block[2] = {1, 1}, dset_block[2] = {1, 8};
+ int i, mpi_rank, in_num, in_data[8], out_data[8], vrfy_errors = 0;
+ char attr_name[32];
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- if(this_type == is_group) {
+ if (this_type == is_group) {
HDsprintf(attr_name, "Group Attribute %d", num);
aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
- if(MAINPROCESS) {
+ if (MAINPROCESS) {
H5Aread(aid, H5T_NATIVE_INT, &in_num);
- vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num);
+ vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num);
}
H5Aclose(aid);
}
- else if(this_type == is_dset) {
+ else if (this_type == is_dset) {
HDsprintf(attr_name, "Dataset Attribute %d", num);
- for(i=0; i<8; i++)
+ for (i = 0; i < 8; i++)
out_data[i] = i;
aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
- if(MAINPROCESS) {
+ if (MAINPROCESS) {
H5Aread(aid, H5T_NATIVE_INT, in_data);
vrfy_errors = dataset_vrfy(NULL, NULL, NULL, dset_block, in_data, out_data);
}
@@ -1485,27 +1489,29 @@ read_attribute(hid_t obj_id, int this_type, int num)
static int
check_value(DATATYPE *indata, DATATYPE *outdata, int size)
{
- int mpi_rank, mpi_size, err_num=0;
- hsize_t i, j;
- hsize_t chunk_origin[DIM];
- hsize_t chunk_dims[DIM], count[DIM];
+ int mpi_rank, mpi_size, err_num = 0;
+ hsize_t i, j;
+ hsize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], count[DIM];
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
get_slab(chunk_origin, chunk_dims, count, NULL, size);
- indata += chunk_origin[0]*(hsize_t)size;
- outdata += chunk_origin[0]*(hsize_t)size;
- for(i=chunk_origin[0]; i<(chunk_origin[0]+chunk_dims[0]); i++)
- for(j=chunk_origin[1]; j<(chunk_origin[1]+chunk_dims[1]); j++) {
- if(*indata != *outdata )
- if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n",(unsigned long)i,(unsigned long)j,(unsigned long)i,(unsigned long)j, *outdata, *indata);
+ indata += chunk_origin[0] * (hsize_t)size;
+ outdata += chunk_origin[0] * (hsize_t)size;
+ for (i = chunk_origin[0]; i < (chunk_origin[0] + chunk_dims[0]); i++)
+ for (j = chunk_origin[1]; j < (chunk_origin[1] + chunk_dims[1]); j++) {
+ if (*indata != *outdata)
+ if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j, (unsigned long)i, (unsigned long)j, *outdata,
+ *indata);
}
- if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
+ if (err_num > MAX_ERR_REPORT && !VERBOSE_MED)
HDprintf("[more errors ...]\n");
- if(err_num)
+ if (err_num)
HDprintf("%d errors found in check_value\n", err_num);
return err_num;
}
@@ -1520,25 +1526,24 @@ check_value(DATATYPE *indata, DATATYPE *outdata, int size)
*/
static void
-get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[],
- hsize_t file_dims[], int size)
+get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[], hsize_t file_dims[], int size)
{
int mpi_rank, mpi_size;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- if(chunk_origin != NULL) {
- chunk_origin[0] = (hsize_t)mpi_rank * (hsize_t)(size/mpi_size);
+ if (chunk_origin != NULL) {
+ chunk_origin[0] = (hsize_t)mpi_rank * (hsize_t)(size / mpi_size);
chunk_origin[1] = 0;
}
- if(chunk_dims != NULL) {
- chunk_dims[0] = (hsize_t)(size/mpi_size);
- chunk_dims[1] = (hsize_t)size;
+ if (chunk_dims != NULL) {
+ chunk_dims[0] = (hsize_t)(size / mpi_size);
+ chunk_dims[1] = (hsize_t)size;
}
- if(file_dims != NULL)
+ if (file_dims != NULL)
file_dims[0] = file_dims[1] = (hsize_t)size;
- if(count != NULL)
+ if (count != NULL)
count[0] = count[1] = 1;
}
@@ -1565,24 +1570,24 @@ get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[],
#define N 4
-void io_mode_confusion(void)
+void
+io_mode_confusion(void)
{
/*
* HDF5 APIs definitions
*/
- const int rank = 1;
+ const int rank = 1;
const char *dataset_name = "IntArray";
- hid_t file_id, dset_id; /* file and dataset identifiers */
- hid_t filespace, memspace; /* file and memory dataspace */
- /* identifiers */
- hsize_t dimsf[1]; /* dataset dimensions */
- int data[N] = {1}; /* pointer to data buffer to write */
- hsize_t coord[N] = {0L,1L,2L,3L};
- hid_t plist_id; /* property list identifier */
- herr_t status;
-
+ hid_t file_id, dset_id; /* file and dataset identifiers */
+ hid_t filespace, memspace; /* file and memory dataspace */
+ /* identifiers */
+ hsize_t dimsf[1]; /* dataset dimensions */
+ int data[N] = {1}; /* pointer to data buffer to write */
+ hsize_t coord[N] = {0L, 1L, 2L, 3L};
+ hid_t plist_id; /* property list identifier */
+ herr_t status;
/*
* MPI variables
@@ -1590,18 +1595,16 @@ void io_mode_confusion(void)
int mpi_size, mpi_rank;
-
/*
* test bed related variables
*/
- const char * fcn_name = "io_mode_confusion";
- const hbool_t verbose = FALSE;
- const H5Ptest_param_t * pt;
- char * filename;
-
+ const char * fcn_name = "io_mode_confusion";
+ const hbool_t verbose = FALSE;
+ const H5Ptest_param_t *pt;
+ char * filename;
- pt = GetTestParameters();
+ pt = GetTestParameters();
filename = pt->name;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -1611,183 +1614,154 @@ void io_mode_confusion(void)
* Set up file access property list with parallel I/O access
*/
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Setting up property list.\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name);
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id != -1), "H5Pcreate() failed");
status = H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL);
- VRFY((status >= 0 ), "H5Pset_fapl_mpio() failed");
-
+ VRFY((status >= 0), "H5Pset_fapl_mpio() failed");
/*
* Create a new file collectively and release property list identifier.
*/
- if(verbose )
+ if (verbose)
HDfprintf(stdout, "%0d:%s: Creating new file.\n", mpi_rank, fcn_name);
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
- VRFY((file_id >= 0 ), "H5Fcreate() failed");
+ VRFY((file_id >= 0), "H5Fcreate() failed");
status = H5Pclose(plist_id);
- VRFY((status >= 0 ), "H5Pclose() failed");
-
+ VRFY((status >= 0), "H5Pclose() failed");
/*
* Create the dataspace for the dataset.
*/
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Creating the dataspace for the dataset.\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Creating the dataspace for the dataset.\n", mpi_rank, fcn_name);
- dimsf[0] = N;
+ dimsf[0] = N;
filespace = H5Screate_simple(rank, dimsf, NULL);
- VRFY((filespace >= 0 ), "H5Screate_simple() failed.");
-
+ VRFY((filespace >= 0), "H5Screate_simple() failed.");
/*
* Create the dataset with default properties and close filespace.
*/
- if(verbose )
- HDfprintf(stdout,
- "%0d:%s: Creating the dataset, and closing filespace.\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Creating the dataset, and closing filespace.\n", mpi_rank, fcn_name);
- dset_id = H5Dcreate2(file_id, dataset_name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dset_id >= 0 ), "H5Dcreate2() failed");
+ dset_id =
+ H5Dcreate2(file_id, dataset_name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2() failed");
status = H5Sclose(filespace);
- VRFY((status >= 0 ), "H5Sclose() failed");
-
+ VRFY((status >= 0), "H5Sclose() failed");
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Calling H5Screate_simple().\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Screate_simple().\n", mpi_rank, fcn_name);
memspace = H5Screate_simple(rank, dimsf, NULL);
- VRFY((memspace >= 0 ), "H5Screate_simple() failed.");
-
+ VRFY((memspace >= 0), "H5Screate_simple() failed.");
- if(mpi_rank == 0 ) {
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Calling H5Sselect_all(memspace).\n",
- mpi_rank, fcn_name);
+ if (mpi_rank == 0) {
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Sselect_all(memspace).\n", mpi_rank, fcn_name);
status = H5Sselect_all(memspace);
- VRFY((status >= 0 ), "H5Sselect_all() failed");
- } else {
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none(memspace).\n",
- mpi_rank, fcn_name);
+ VRFY((status >= 0), "H5Sselect_all() failed");
+ }
+ else {
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none(memspace).\n", mpi_rank, fcn_name);
status = H5Sselect_none(memspace);
- VRFY((status >= 0 ), "H5Sselect_none() failed");
+ VRFY((status >= 0), "H5Sselect_none() failed");
}
-
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", mpi_rank, fcn_name);
MPI_Barrier(MPI_COMM_WORLD);
-
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Calling H5Dget_space().\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Dget_space().\n", mpi_rank, fcn_name);
filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0 ), "H5Dget_space() failed");
-
+ VRFY((filespace >= 0), "H5Dget_space() failed");
/* select all */
- if(mpi_rank == 0 ) {
- if(verbose )
- HDfprintf(stdout,
- "%0d:%s: Calling H5Sselect_elements() -- set up hang?\n",
- mpi_rank, fcn_name);
+ if (mpi_rank == 0) {
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Sselect_elements() -- set up hang?\n", mpi_rank, fcn_name);
status = H5Sselect_elements(filespace, H5S_SELECT_SET, N, (const hsize_t *)&coord);
- VRFY((status >= 0 ), "H5Sselect_elements() failed");
- } else { /* select nothing */
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none().\n",
- mpi_rank, fcn_name);
+ VRFY((status >= 0), "H5Sselect_elements() failed");
+ }
+ else { /* select nothing */
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none().\n", mpi_rank, fcn_name);
status = H5Sselect_none(filespace);
- VRFY((status >= 0 ), "H5Sselect_none() failed");
+ VRFY((status >= 0), "H5Sselect_none() failed");
}
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", mpi_rank, fcn_name);
MPI_Barrier(MPI_COMM_WORLD);
-
- if(verbose )
+ if (verbose)
HDfprintf(stdout, "%0d:%s: Calling H5Pcreate().\n", mpi_rank, fcn_name);
plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id != -1 ), "H5Pcreate() failed");
-
+ VRFY((plist_id != -1), "H5Pcreate() failed");
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Calling H5Pset_dxpl_mpio().\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Pset_dxpl_mpio().\n", mpi_rank, fcn_name);
status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
- VRFY((status >= 0 ), "H5Pset_dxpl_mpio() failed");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ VRFY((status >= 0), "H5Pset_dxpl_mpio() failed");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
status = H5Pset_dxpl_mpio_collective_opt(plist_id, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((status>= 0),"set independent IO collectively succeeded");
+ VRFY((status >= 0), "set independent IO collectively succeeded");
}
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Dwrite() -- hang here?.\n", mpi_rank, fcn_name);
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data);
-
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Calling H5Dwrite() -- hang here?.\n",
- mpi_rank, fcn_name);
-
- status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace,
- plist_id, data);
-
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Returned from H5Dwrite(), status=%d.\n",
- mpi_rank, fcn_name, status);
- VRFY((status >= 0 ), "H5Dwrite() failed");
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Returned from H5Dwrite(), status=%d.\n", mpi_rank, fcn_name, status);
+ VRFY((status >= 0), "H5Dwrite() failed");
/*
* Close/release resources.
*/
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Cleaning up from test.\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Cleaning up from test.\n", mpi_rank, fcn_name);
status = H5Dclose(dset_id);
- VRFY((status >= 0 ), "H5Dclose() failed");
+ VRFY((status >= 0), "H5Dclose() failed");
status = H5Sclose(filespace);
- VRFY((status >= 0 ), "H5Dclose() failed");
+ VRFY((status >= 0), "H5Dclose() failed");
status = H5Sclose(memspace);
- VRFY((status >= 0 ), "H5Sclose() failed");
+ VRFY((status >= 0), "H5Sclose() failed");
status = H5Pclose(plist_id);
- VRFY((status >= 0 ), "H5Pclose() failed");
+ VRFY((status >= 0), "H5Pclose() failed");
status = H5Fclose(file_id);
- VRFY((status >= 0 ), "H5Fclose() failed");
-
+ VRFY((status >= 0), "H5Fclose() failed");
- if(verbose )
+ if (verbose)
HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
return;
@@ -1841,62 +1815,46 @@ void io_mode_confusion(void)
* the correctness of the writer. AKC -- 2010/10/27
*/
-#define NUM_DATA_SETS 4
-#define LOCAL_DATA_SIZE 4
-#define LARGE_ATTR_SIZE 256
+#define NUM_DATA_SETS 4
+#define LOCAL_DATA_SIZE 4
+#define LARGE_ATTR_SIZE 256
/* Since all even and odd processes are split into writer and reader comm
* respectively, process 0 and 1 in COMM_WORLD become the root process of
* the writer and reader comm respectively.
*/
-#define Writer_Root 0
-#define Reader_Root 1
-#define Reader_wait(mpi_err, xsteps) \
- mpi_err = MPI_Bcast(&xsteps, 1, MPI_INT, Writer_Root, MPI_COMM_WORLD)
-#define Reader_result(mpi_err, xsteps_done) \
+#define Writer_Root 0
+#define Reader_Root 1
+#define Reader_wait(mpi_err, xsteps) mpi_err = MPI_Bcast(&xsteps, 1, MPI_INT, Writer_Root, MPI_COMM_WORLD)
+#define Reader_result(mpi_err, xsteps_done) \
mpi_err = MPI_Bcast(&xsteps_done, 1, MPI_INT, Reader_Root, MPI_COMM_WORLD)
-#define Reader_check(mpi_err, xsteps, xsteps_done) \
- { Reader_wait(mpi_err, xsteps); \
- Reader_result(mpi_err, xsteps_done);}
+#define Reader_check(mpi_err, xsteps, xsteps_done) \
+ { \
+ Reader_wait(mpi_err, xsteps); \
+ Reader_result(mpi_err, xsteps_done); \
+ }
/* object names used by both rr_obj_hdr_flush_confusion and
* rr_obj_hdr_flush_confusion_reader.
*/
-const char * dataset_name[NUM_DATA_SETS] =
- {
- "dataset_0",
- "dataset_1",
- "dataset_2",
- "dataset_3"
- };
-const char * att_name[NUM_DATA_SETS] =
- {
- "attribute_0",
- "attribute_1",
- "attribute_2",
- "attribute_3"
- };
-const char * lg_att_name[NUM_DATA_SETS] =
- {
- "large_attribute_0",
- "large_attribute_1",
- "large_attribute_2",
- "large_attribute_3"
- };
-
-void rr_obj_hdr_flush_confusion(void)
+const char *dataset_name[NUM_DATA_SETS] = {"dataset_0", "dataset_1", "dataset_2", "dataset_3"};
+const char *att_name[NUM_DATA_SETS] = {"attribute_0", "attribute_1", "attribute_2", "attribute_3"};
+const char *lg_att_name[NUM_DATA_SETS] = {"large_attribute_0", "large_attribute_1", "large_attribute_2",
+ "large_attribute_3"};
+
+void
+rr_obj_hdr_flush_confusion(void)
{
/* MPI variables */
/* private communicator size and rank */
- int mpi_size;
- int mpi_rank;
- int mrc; /* mpi error code */
- int is_reader; /* 1 for reader process; 0 for writer process. */
+ int mpi_size;
+ int mpi_rank;
+ int mrc; /* mpi error code */
+ int is_reader; /* 1 for reader process; 0 for writer process. */
MPI_Comm comm;
-
/* test bed related variables */
- const char * fcn_name = "rr_obj_hdr_flush_confusion";
- const hbool_t verbose = FALSE;
+ const char * fcn_name = "rr_obj_hdr_flush_confusion";
+ const hbool_t verbose = FALSE;
/* Create two new private communicators from MPI_COMM_WORLD.
* Even and odd ranked processes go to comm_writers and comm_readers
@@ -1908,8 +1866,8 @@ void rr_obj_hdr_flush_confusion(void)
HDassert(mpi_size > 2);
is_reader = mpi_rank % 2;
- mrc = MPI_Comm_split(MPI_COMM_WORLD, is_reader, mpi_rank, &comm);
- VRFY((mrc==MPI_SUCCESS), "MPI_Comm_split");
+ mrc = MPI_Comm_split(MPI_COMM_WORLD, is_reader, mpi_rank, &comm);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_split");
/* The reader proocesses branches off to do reading
* while the writer processes continues to do writing
@@ -1919,32 +1877,33 @@ void rr_obj_hdr_flush_confusion(void)
* step. When all steps are done, they inform readers to end.
*/
if (is_reader)
- rr_obj_hdr_flush_confusion_reader(comm);
+ rr_obj_hdr_flush_confusion_reader(comm);
else
- rr_obj_hdr_flush_confusion_writer(comm);
+ rr_obj_hdr_flush_confusion_writer(comm);
MPI_Comm_free(&comm);
- if(verbose )
+ if (verbose)
HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
return;
} /* rr_obj_hdr_flush_confusion() */
-void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
+void
+rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
{
- int i;
- int j;
- hid_t file_id = -1;
- hid_t fapl_id = -1;
- hid_t dxpl_id = -1;
- hid_t att_id[NUM_DATA_SETS];
- hid_t att_space[NUM_DATA_SETS];
- hid_t lg_att_id[NUM_DATA_SETS];
- hid_t lg_att_space[NUM_DATA_SETS];
- hid_t disk_space[NUM_DATA_SETS];
- hid_t mem_space[NUM_DATA_SETS];
- hid_t dataset[NUM_DATA_SETS];
+ int i;
+ int j;
+ hid_t file_id = -1;
+ hid_t fapl_id = -1;
+ hid_t dxpl_id = -1;
+ hid_t att_id[NUM_DATA_SETS];
+ hid_t att_space[NUM_DATA_SETS];
+ hid_t lg_att_id[NUM_DATA_SETS];
+ hid_t lg_att_space[NUM_DATA_SETS];
+ hid_t disk_space[NUM_DATA_SETS];
+ hid_t mem_space[NUM_DATA_SETS];
+ hid_t dataset[NUM_DATA_SETS];
hsize_t att_size[1];
hsize_t lg_att_size[1];
hsize_t disk_count[1];
@@ -1953,10 +1912,10 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
hsize_t mem_count[1];
hsize_t mem_size[1];
hsize_t mem_start[1];
- herr_t err;
- double data[LOCAL_DATA_SIZE];
- double att[LOCAL_DATA_SIZE];
- double lg_att[LARGE_ATTR_SIZE];
+ herr_t err;
+ double data[LOCAL_DATA_SIZE];
+ double att[LOCAL_DATA_SIZE];
+ double lg_att[LARGE_ATTR_SIZE];
/* MPI variables */
/* world communication size and rank */
@@ -1967,20 +1926,20 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
int mpi_rank;
int mrc; /* mpi error code */
/* steps to verify and have been verified */
- int steps = 0;
+ int steps = 0;
int steps_done = 0;
/* test bed related variables */
- const char *fcn_name = "rr_obj_hdr_flush_confusion_writer";
- const hbool_t verbose = FALSE;
+ const char * fcn_name = "rr_obj_hdr_flush_confusion_writer";
+ const hbool_t verbose = FALSE;
const H5Ptest_param_t *pt;
- char *filename;
+ char * filename;
/*
* setup test bed related variables:
*/
- pt = (const H5Ptest_param_t *)GetTestParameters();
+ pt = (const H5Ptest_param_t *)GetTestParameters();
filename = pt->name;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank);
@@ -1992,50 +1951,45 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
* Set up file access property list with parallel I/O access
*/
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Setting up property list.\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name);
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed");
err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL);
- VRFY((err >= 0 ), "H5Pset_fapl_mpio() failed");
-
+ VRFY((err >= 0), "H5Pset_fapl_mpio() failed");
/*
* Create a new file collectively and release property list identifier.
*/
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Creating new file \"%s\".\n",
- mpi_rank, fcn_name, filename);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Creating new file \"%s\".\n", mpi_rank, fcn_name, filename);
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
- VRFY((file_id >= 0 ), "H5Fcreate() failed");
+ VRFY((file_id >= 0), "H5Fcreate() failed");
err = H5Pclose(fapl_id);
- VRFY((err >= 0 ), "H5Pclose(fapl_id) failed");
-
+ VRFY((err >= 0), "H5Pclose(fapl_id) failed");
/*
* Step 1: create the data sets and write data.
*/
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Creating the datasets.\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Creating the datasets.\n", mpi_rank, fcn_name);
disk_size[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_size);
- mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ for (i = 0; i < NUM_DATA_SETS; i++) {
disk_space[i] = H5Screate_simple(1, disk_size, NULL);
- VRFY((disk_space[i] >= 0), "H5Screate_simple(1) failed.\n");
+ VRFY((disk_space[i] >= 0), "H5Screate_simple(1) failed.\n");
- dataset[i] = H5Dcreate2(file_id, dataset_name[i], H5T_NATIVE_DOUBLE,
- disk_space[i], H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dataset[i] = H5Dcreate2(file_id, dataset_name[i], H5T_NATIVE_DOUBLE, disk_space[i], H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset[i] >= 0), "H5Dcreate(1) failed.\n");
}
@@ -2044,45 +1998,41 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
* setup data transfer property list
*/
- if(verbose )
+ if (verbose)
HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name);
dxpl_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
- VRFY((err >= 0),
- "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
+ VRFY((err >= 0), "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
/*
* write data to the data sets
*/
- if(verbose )
+ if (verbose)
HDfprintf(stdout, "%0d:%s: Writing datasets.\n", mpi_rank, fcn_name);
disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank);
- mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
- mem_start[0] = (hsize_t)(0);
+ mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ mem_start[0] = (hsize_t)(0);
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
data[j] = (double)(mpi_rank + 1);
}
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start,
- NULL, disk_count, NULL);
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, NULL, disk_count, NULL);
VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
mem_space[i] = H5Screate_simple(1, mem_size, NULL);
VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
- err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET,
- mem_start, NULL, mem_count, NULL);
+ err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, mem_start, NULL, mem_count, NULL);
VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
- err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i],
- disk_space[i], dxpl_id, data);
+ err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], disk_space[i], dxpl_id, data);
VRFY((err >= 0), "H5Dwrite(1) failed.\n");
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
+ for (j = 0; j < LOCAL_DATA_SIZE; j++)
data[j] *= 10.0;
}
@@ -2090,10 +2040,10 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
* close the data spaces
*/
- if(verbose )
+ if (verbose)
HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name);
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ for (i = 0; i < NUM_DATA_SETS; i++) {
err = H5Sclose(disk_space[i]);
VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n");
err = H5Sclose(mem_space[i]);
@@ -2106,9 +2056,8 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
* flush the metadata cache
*/
- if(verbose )
- HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name);
err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((err >= 0), "H5Fflush(1) failed.\n");
@@ -2120,23 +2069,23 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
* Step 2: write attributes to each dataset
*/
- if(verbose )
+ if (verbose)
HDfprintf(stdout, "%0d:%s: writing attributes.\n", mpi_rank, fcn_name);
att_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
att[j] = (double)(j + 1);
}
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ for (i = 0; i < NUM_DATA_SETS; i++) {
att_space[i] = H5Screate_simple(1, att_size, NULL);
VRFY((att_space[i] >= 0), "H5Screate_simple(3) failed.\n");
- att_id[i] = H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE,
- att_space[i], H5P_DEFAULT, H5P_DEFAULT);
+ att_id[i] =
+ H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE, att_space[i], H5P_DEFAULT, H5P_DEFAULT);
VRFY((att_id[i] >= 0), "H5Acreate(1) failed.\n");
err = H5Awrite(att_id[i], H5T_NATIVE_DOUBLE, att);
VRFY((err >= 0), "H5Awrite(1) failed.\n");
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
att[j] /= 10.0;
}
}
@@ -2145,11 +2094,10 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
* close attribute IDs and spaces
*/
- if(verbose )
- HDfprintf(stdout, "%0d:%s: closing attr ids and spaces .\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing attr ids and spaces .\n", mpi_rank, fcn_name);
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ for (i = 0; i < NUM_DATA_SETS; i++) {
err = H5Sclose(att_space[i]);
VRFY((err >= 0), "H5Sclose(att_space[i]) failed.\n");
err = H5Aclose(att_id[i]);
@@ -2162,9 +2110,8 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
* flush the metadata cache again
*/
- if(verbose )
- HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name);
err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((err >= 0), "H5Fflush(2) failed.\n");
@@ -2176,25 +2123,24 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
* Step 3: write large attributes to each dataset
*/
- if(verbose )
- HDfprintf(stdout, "%0d:%s: writing large attributes.\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: writing large attributes.\n", mpi_rank, fcn_name);
lg_att_size[0] = (hsize_t)(LARGE_ATTR_SIZE);
- for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
lg_att[j] = (double)(j + 1);
}
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ for (i = 0; i < NUM_DATA_SETS; i++) {
lg_att_space[i] = H5Screate_simple(1, lg_att_size, NULL);
VRFY((lg_att_space[i] >= 0), "H5Screate_simple(4) failed.\n");
- lg_att_id[i] = H5Acreate2(dataset[i], lg_att_name[i], H5T_NATIVE_DOUBLE,
- lg_att_space[i], H5P_DEFAULT, H5P_DEFAULT);
+ lg_att_id[i] = H5Acreate2(dataset[i], lg_att_name[i], H5T_NATIVE_DOUBLE, lg_att_space[i], H5P_DEFAULT,
+ H5P_DEFAULT);
VRFY((lg_att_id[i] >= 0), "H5Acreate(2) failed.\n");
err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att);
VRFY((err >= 0), "H5Awrite(2) failed.\n");
- for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
lg_att[j] /= 10.0;
}
}
@@ -2211,9 +2157,8 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
* code is going to change a lot in the near future.
*/
- if(verbose )
- HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name);
err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((err >= 0), "H5Fflush(3) failed.\n");
@@ -2225,18 +2170,17 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
* Step 4: write different large attributes to each dataset
*/
- if(verbose )
- HDfprintf(stdout, "%0d:%s: writing different large attributes.\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: writing different large attributes.\n", mpi_rank, fcn_name);
- for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
lg_att[j] = (double)(j + 2);
}
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ for (i = 0; i < NUM_DATA_SETS; i++) {
err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att);
VRFY((err >= 0), "H5Awrite(2) failed.\n");
- for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
lg_att[j] /= 10.0;
}
}
@@ -2246,9 +2190,8 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
/*
* flush the metadata cache again
*/
- if(verbose )
- HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name);
err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((err >= 0), "H5Fflush(3) failed.\n");
@@ -2262,11 +2205,10 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
* close large attribute IDs and spaces
*/
- if(verbose )
- HDfprintf(stdout, "%0d:%s: closing large attr ids and spaces .\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing large attr ids and spaces .\n", mpi_rank, fcn_name);
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ for (i = 0; i < NUM_DATA_SETS; i++) {
err = H5Sclose(lg_att_space[i]);
VRFY((err >= 0), "H5Sclose(lg_att_space[i]) failed.\n");
@@ -2274,15 +2216,14 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
VRFY((err >= 0), "H5Aclose(lg_att_id[i]) failed.\n");
}
-
/*
* close the data sets
*/
- if(verbose )
+ if (verbose)
HDfprintf(stdout, "%0d:%s: closing datasets .\n", mpi_rank, fcn_name);
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ for (i = 0; i < NUM_DATA_SETS; i++) {
err = H5Dclose(dataset[i]);
VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n");
}
@@ -2291,88 +2232,87 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
* close the data transfer property list.
*/
- if(verbose )
+ if (verbose)
HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name);
err = H5Pclose(dxpl_id);
VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n");
-
/*
* Close file.
*/
- if(verbose )
+ if (verbose)
HDfprintf(stdout, "%0d:%s: closing file.\n", mpi_rank, fcn_name);
err = H5Fclose(file_id);
- VRFY((err >= 0 ), "H5Fclose(1) failed");
+ VRFY((err >= 0), "H5Fclose(1) failed");
/* End of Step 5: Close all objects and the file */
/* Tell the reader to check the file up to steps. */
steps++;
Reader_check(mrc, steps, steps_done);
-
/* All done. Inform reader to end. */
- steps=0;
+ steps = 0;
Reader_check(mrc, steps, steps_done);
- if(verbose )
+ if (verbose)
HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
return;
} /* rr_obj_hdr_flush_confusion_writer() */
-void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
+void
+rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
{
- int i;
- int j;
- hid_t file_id = -1;
- hid_t fapl_id = -1;
- hid_t dxpl_id = -1;
- hid_t lg_att_id[NUM_DATA_SETS];
- hid_t lg_att_type[NUM_DATA_SETS];
- hid_t disk_space[NUM_DATA_SETS];
- hid_t mem_space[NUM_DATA_SETS];
- hid_t dataset[NUM_DATA_SETS];
+ int i;
+ int j;
+ hid_t file_id = -1;
+ hid_t fapl_id = -1;
+ hid_t dxpl_id = -1;
+ hid_t lg_att_id[NUM_DATA_SETS];
+ hid_t lg_att_type[NUM_DATA_SETS];
+ hid_t disk_space[NUM_DATA_SETS];
+ hid_t mem_space[NUM_DATA_SETS];
+ hid_t dataset[NUM_DATA_SETS];
hsize_t disk_count[1];
hsize_t disk_start[1];
hsize_t mem_count[1];
hsize_t mem_size[1];
hsize_t mem_start[1];
- herr_t err;
- htri_t tri_err;
- double data[LOCAL_DATA_SIZE];
- double data_read[LOCAL_DATA_SIZE];
- double att[LOCAL_DATA_SIZE];
- double att_read[LOCAL_DATA_SIZE];
- double lg_att[LARGE_ATTR_SIZE];
- double lg_att_read[LARGE_ATTR_SIZE];
+ herr_t err;
+ htri_t tri_err;
+ double data[LOCAL_DATA_SIZE];
+ double data_read[LOCAL_DATA_SIZE];
+ double att[LOCAL_DATA_SIZE];
+ double att_read[LOCAL_DATA_SIZE];
+ double lg_att[LARGE_ATTR_SIZE];
+ double lg_att_read[LARGE_ATTR_SIZE];
/* MPI variables */
/* world communication size and rank */
- int mpi_world_size;
- int mpi_world_rank;
+ int mpi_world_size;
+ int mpi_world_rank;
/* private communicator size and rank */
- int mpi_size;
- int mpi_rank;
- int mrc; /* mpi error code */
- int steps = -1; /* How far (steps) to verify the file */
- int steps_done = -1; /* How far (steps) have been verified */
+ int mpi_size;
+ int mpi_rank;
+ int mrc; /* mpi error code */
+ int steps = -1; /* How far (steps) to verify the file */
+ int steps_done = -1; /* How far (steps) have been verified */
/* test bed related variables */
- const char *fcn_name = "rr_obj_hdr_flush_confusion_reader";
- const hbool_t verbose = FALSE;
+ const char * fcn_name = "rr_obj_hdr_flush_confusion_reader";
+ const hbool_t verbose = FALSE;
const H5Ptest_param_t *pt;
- char *filename;
+ char * filename;
/*
* setup test bed related variables:
*/
- pt = (const H5Ptest_param_t *)GetTestParameters();
+ pt = (const H5Ptest_param_t *)GetTestParameters();
filename = pt->name;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank);
@@ -2382,53 +2322,50 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
/* Repeatedly re-open the file and verify its contents until it is */
/* told to end (when steps=0). */
- while (steps_done != 0){
+ while (steps_done != 0) {
Reader_wait(mrc, steps);
VRFY((mrc >= 0), "Reader_wait failed");
steps_done = 0;
- if (steps > 0 ){
+ if (steps > 0) {
/*
- * Set up file access property list with parallel I/O access
- */
+ * Set up file access property list with parallel I/O access
+ */
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Setting up property list.\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name);
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed");
err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL);
- VRFY((err >= 0 ), "H5Pset_fapl_mpio() failed");
+ VRFY((err >= 0), "H5Pset_fapl_mpio() failed");
/*
- * Create a new file collectively and release property list identifier.
- */
+ * Create a new file collectively and release property list identifier.
+ */
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Re-open file \"%s\".\n",
- mpi_rank, fcn_name, filename);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Re-open file \"%s\".\n", mpi_rank, fcn_name, filename);
file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id);
- VRFY((file_id >= 0 ), "H5Fopen() failed");
+ VRFY((file_id >= 0), "H5Fopen() failed");
err = H5Pclose(fapl_id);
- VRFY((err >= 0 ), "H5Pclose(fapl_id) failed");
+ VRFY((err >= 0), "H5Pclose(fapl_id) failed");
#if 1
- if (steps >= 1){
+ if (steps >= 1) {
/*=====================================================*
- * Step 1: open the data sets and read data.
- *=====================================================*/
+ * Step 1: open the data sets and read data.
+ *=====================================================*/
- if(verbose )
- HDfprintf(stdout, "%0d:%s: opening the datasets.\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: opening the datasets.\n", mpi_rank, fcn_name);
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ for (i = 0; i < NUM_DATA_SETS; i++) {
dataset[i] = -1;
}
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ for (i = 0; i < NUM_DATA_SETS; i++) {
dataset[i] = H5Dopen2(file_id, dataset_name[i], H5P_DEFAULT);
VRFY((dataset[i] >= 0), "H5Dopen(1) failed.\n");
disk_space[i] = H5Dget_space(dataset[i]);
@@ -2436,23 +2373,22 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
}
/*
- * setup data transfer property list
- */
+ * setup data transfer property list
+ */
- if(verbose )
+ if (verbose)
HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name);
dxpl_id = H5Pcreate(H5P_DATASET_XFER);
VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
- VRFY((err >= 0),
- "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
+ VRFY((err >= 0), "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
/*
- * read data from the data sets
- */
+ * read data from the data sets
+ */
- if(verbose )
+ if (verbose)
HDfprintf(stdout, "%0d:%s: Reading datasets.\n", mpi_rank, fcn_name);
disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
@@ -2464,44 +2400,43 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
mem_start[0] = (hsize_t)(0);
/* set up expected data for verification */
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
data[j] = (double)(mpi_rank + 1);
}
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start,
- NULL, disk_count, NULL);
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, NULL, disk_count,
+ NULL);
VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
mem_space[i] = H5Screate_simple(1, mem_size, NULL);
VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
- err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET,
- mem_start, NULL, mem_count, NULL);
+ err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, mem_start, NULL, mem_count, NULL);
VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
- err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i],
- disk_space[i], dxpl_id, data_read);
+ err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], disk_space[i], dxpl_id,
+ data_read);
VRFY((err >= 0), "H5Dread(1) failed.\n");
/* compare read data with expected data */
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
- if (!H5_DBL_ABS_EQUAL(data_read[j], data[j])){
+ for (j = 0; j < LOCAL_DATA_SIZE; j++)
+ if (!H5_DBL_ABS_EQUAL(data_read[j], data[j])) {
HDfprintf(stdout,
- "%0d:%s: Reading datasets value failed in "
- "Dataset %d, at position %d: expect %f, got %f.\n",
- mpi_rank, fcn_name, i, j, data[j], data_read[j]);
+ "%0d:%s: Reading datasets value failed in "
+ "Dataset %d, at position %d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, data[j], data_read[j]);
nerrors++;
}
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
+ for (j = 0; j < LOCAL_DATA_SIZE; j++)
data[j] *= 10.0;
}
/*
- * close the data spaces
- */
+ * close the data spaces
+ */
- if(verbose )
+ if (verbose)
HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name);
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ for (i = 0; i < NUM_DATA_SETS; i++) {
err = H5Sclose(disk_space[i]);
VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n");
err = H5Sclose(mem_space[i]);
@@ -2514,18 +2449,18 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
#if 1
/*=====================================================*
- * Step 2: reading attributes from each dataset
- *=====================================================*/
+ * Step 2: reading attributes from each dataset
+ *=====================================================*/
- if (steps >= 2){
- if(verbose )
+ if (steps >= 2) {
+ if (verbose)
HDfprintf(stdout, "%0d:%s: reading attributes.\n", mpi_rank, fcn_name);
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
att[j] = (double)(j + 1);
}
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ for (i = 0; i < NUM_DATA_SETS; i++) {
hid_t att_id, att_type;
att_id = H5Aopen(dataset[i], att_name[i], H5P_DEFAULT);
@@ -2534,10 +2469,9 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
VRFY((att_type >= 0), "H5Aget_type failed.\n");
tri_err = H5Tequal(att_type, H5T_NATIVE_DOUBLE);
VRFY((tri_err >= 0), "H5Tequal failed.\n");
- if (tri_err==0){
- HDfprintf(stdout,
- "%0d:%s: Mismatched Attribute type of Dataset %d.\n",
- mpi_rank, fcn_name, i);
+ if (tri_err == 0) {
+ HDfprintf(stdout, "%0d:%s: Mismatched Attribute type of Dataset %d.\n", mpi_rank,
+ fcn_name, i);
nerrors++;
}
else {
@@ -2545,14 +2479,15 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
err = H5Aread(att_id, H5T_NATIVE_DOUBLE, att_read);
VRFY((err >= 0), "H5Aread failed.\n");
/* compare read attribute data with expected data */
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
- if (!H5_DBL_ABS_EQUAL(att_read[j], att[j])){
+ for (j = 0; j < LOCAL_DATA_SIZE; j++)
+ if (!H5_DBL_ABS_EQUAL(att_read[j], att[j])) {
HDfprintf(stdout,
- "%0d:%s: Mismatched attribute data read in Dataset %d, at position %d: expect %f, got %f.\n",
- mpi_rank, fcn_name, i, j, att[j], att_read[j]);
+ "%0d:%s: Mismatched attribute data read in Dataset %d, at position "
+ "%d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, att[j], att_read[j]);
nerrors++;
}
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
att[j] /= 10.0;
}
}
@@ -2564,47 +2499,46 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
/* End of Step 2: reading attributes from each dataset */
#endif
-
#if 1
/*=====================================================*
- * Step 3 or 4: read large attributes from each dataset.
- * Step 4 has different attribute value from step 3.
- *=====================================================*/
+ * Step 3 or 4: read large attributes from each dataset.
+ * Step 4 has different attribute value from step 3.
+ *=====================================================*/
- if (steps >= 3){
- if(verbose )
+ if (steps >= 3) {
+ if (verbose)
HDfprintf(stdout, "%0d:%s: reading large attributes.\n", mpi_rank, fcn_name);
- for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
- lg_att[j] = (steps==3) ? (double)(j + 1) : (double)(j+2);
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
+ lg_att[j] = (steps == 3) ? (double)(j + 1) : (double)(j + 2);
}
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ for (i = 0; i < NUM_DATA_SETS; i++) {
lg_att_id[i] = H5Aopen(dataset[i], lg_att_name[i], H5P_DEFAULT);
VRFY((lg_att_id[i] >= 0), "H5Aopen(2) failed.\n");
lg_att_type[i] = H5Aget_type(lg_att_id[i]);
VRFY((err >= 0), "H5Aget_type failed.\n");
tri_err = H5Tequal(lg_att_type[i], H5T_NATIVE_DOUBLE);
VRFY((tri_err >= 0), "H5Tequal failed.\n");
- if (tri_err==0){
- HDfprintf(stdout,
- "%0d:%s: Mismatched Large attribute type of Dataset %d.\n",
- mpi_rank, fcn_name, i);
+ if (tri_err == 0) {
+ HDfprintf(stdout, "%0d:%s: Mismatched Large attribute type of Dataset %d.\n",
+ mpi_rank, fcn_name, i);
nerrors++;
}
- else{
+ else {
/* should verify large attribute size before H5Aread */
err = H5Aread(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att_read);
VRFY((err >= 0), "H5Aread failed.\n");
/* compare read attribute data with expected data */
- for ( j = 0; j < LARGE_ATTR_SIZE; j++ )
- if (!H5_DBL_ABS_EQUAL(lg_att_read[j], lg_att[j])){
+ for (j = 0; j < LARGE_ATTR_SIZE; j++)
+ if (!H5_DBL_ABS_EQUAL(lg_att_read[j], lg_att[j])) {
HDfprintf(stdout,
- "%0d:%s: Mismatched large attribute data read in Dataset %d, at position %d: expect %f, got %f.\n",
- mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]);
+ "%0d:%s: Mismatched large attribute data read in Dataset %d, at "
+ "position %d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]);
nerrors++;
}
- for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
lg_att[j] /= 10.0;
}
@@ -2615,61 +2549,58 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
VRFY((err >= 0), "H5Aclose failed.\n");
}
/* Both step 3 and 4 use this same read checking code. */
- steps_done = (steps==3) ? 3 : 4;
+ steps_done = (steps == 3) ? 3 : 4;
}
/* End of Step 3 or 4: read large attributes from each dataset */
#endif
-
/*=====================================================*
- * Step 5: read all objects from the file
- *=====================================================*/
- if (steps>=5){
+ * Step 5: read all objects from the file
+ *=====================================================*/
+ if (steps >= 5) {
/* nothing extra to verify. The file is closed normally. */
/* Just increment steps_done */
steps_done++;
}
/*
- * Close the data sets
- */
+ * Close the data sets
+ */
- if(verbose )
- HDfprintf(stdout, "%0d:%s: closing datasets again.\n",
- mpi_rank, fcn_name);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing datasets again.\n", mpi_rank, fcn_name);
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- if ( dataset[i] >= 0 ) {
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ if (dataset[i] >= 0) {
err = H5Dclose(dataset[i]);
VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n");
}
}
/*
- * close the data transfer property list.
- */
+ * close the data transfer property list.
+ */
- if(verbose )
+ if (verbose)
HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name);
err = H5Pclose(dxpl_id);
VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n");
/*
- * Close the file
- */
- if(verbose)
- HDfprintf(stdout, "%0d:%s: closing file again.\n",
- mpi_rank, fcn_name);
+ * Close the file
+ */
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing file again.\n", mpi_rank, fcn_name);
err = H5Fclose(file_id);
- VRFY((err >= 0 ), "H5Fclose(1) failed");
+ VRFY((err >= 0), "H5Fclose(1) failed");
} /* else if (steps_done==0) */
Reader_result(mrc, steps_done);
} /* end while(1) */
- if(verbose )
+ if (verbose)
HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
return;
@@ -2684,27 +2615,27 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
#undef Writer_Root
#undef Reader_Root
-
/*
* Test creating a chunked dataset in parallel in a file with an alignment set
* and an alignment threshold large enough to avoid aligning the chunks but
* small enough that the raw data aggregator will be aligned if it is treated as
* an object that must be aligned by the library
*/
-#define CHUNK_SIZE 72
-#define NCHUNKS 32
-#define AGGR_SIZE 2048
+#define CHUNK_SIZE 72
+#define NCHUNKS 32
+#define AGGR_SIZE 2048
#define EXTRA_ALIGN 100
- void chunk_align_bug_1(void)
- {
- int mpi_rank;
- hid_t file_id, dset_id, fapl_id, dcpl_id, space_id;
- hsize_t dims = CHUNK_SIZE * NCHUNKS, cdims = CHUNK_SIZE;
- h5_stat_size_t file_size;
- hsize_t align;
- herr_t ret;
- const char *filename;
+void
+chunk_align_bug_1(void)
+{
+ int mpi_rank;
+ hid_t file_id, dset_id, fapl_id, dcpl_id, space_id;
+ hsize_t dims = CHUNK_SIZE * NCHUNKS, cdims = CHUNK_SIZE;
+ h5_stat_size_t file_size;
+ hsize_t align;
+ herr_t ret;
+ const char * filename;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -2769,8 +2700,6 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
return;
} /* end chunk_align_bug_1() */
-
/*=============================================================================
* End of t_mdset.c
*===========================================================================*/
-
diff --git a/testpar/t_mpi.c b/testpar/t_mpi.c
index 0719ca6..fe73ba0 100644
--- a/testpar/t_mpi.c
+++ b/testpar/t_mpi.c
@@ -28,33 +28,34 @@
#include "testpar.h"
/* FILENAME and filenames must have the same number of names */
-const char *FILENAME[2] = { "MPItest", NULL };
-char filenames[2][200];
-int nerrors = 0;
-hid_t fapl; /* file access property list */
+const char *FILENAME[2] = {"MPItest", NULL};
+char filenames[2][200];
+int nerrors = 0;
+hid_t fapl; /* file access property list */
/* protocols */
static int errors_sum(int nerrs);
-#define MPIO_TEST_WRITE_SIZE 1024*1024 /* 1 MB */
-
-static int test_mpio_overlap_writes(char *filename) {
- int mpi_size, mpi_rank;
- MPI_Comm comm;
- MPI_Info info = MPI_INFO_NULL;
- int color, mrc;
- MPI_File fh;
- int i;
- int vrfyerrs, nerrs;
+#define MPIO_TEST_WRITE_SIZE 1024 * 1024 /* 1 MB */
+
+static int
+test_mpio_overlap_writes(char *filename)
+{
+ int mpi_size, mpi_rank;
+ MPI_Comm comm;
+ MPI_Info info = MPI_INFO_NULL;
+ int color, mrc;
+ MPI_File fh;
+ int i;
+ int vrfyerrs, nerrs;
unsigned char buf[4093]; /* use some prime number for size */
- int bufsize = sizeof(buf);
- MPI_Offset stride;
- MPI_Offset mpi_off;
- MPI_Status mpi_stat;
+ int bufsize = sizeof(buf);
+ MPI_Offset stride;
+ MPI_Offset mpi_off;
+ MPI_Status mpi_stat;
if (VERBOSE_MED)
- HDprintf("MPIO independent overlapping writes test on file %s\n",
- filename);
+ HDprintf("MPIO independent overlapping writes test on file %s\n", filename);
nerrs = 0;
/* set up MPI parameters */
@@ -71,16 +72,15 @@ static int test_mpio_overlap_writes(char *filename) {
/* splits processes 0 to n-2 into one comm. and the last one into another */
color = ((mpi_rank < (mpi_size - 1)) ? 0 : 1);
- mrc = MPI_Comm_split(MPI_COMM_WORLD, color, mpi_rank, &comm);
+ mrc = MPI_Comm_split(MPI_COMM_WORLD, color, mpi_rank, &comm);
VRFY((mrc == MPI_SUCCESS), "Comm_split succeeded");
if (color == 0) {
/* First n-1 processes (color==0) open a file and write it */
- mrc = MPI_File_open(comm, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
- info, &fh);
+ mrc = MPI_File_open(comm, filename, MPI_MODE_CREATE | MPI_MODE_RDWR, info, &fh);
VRFY((mrc == MPI_SUCCESS), "");
- stride = 1;
+ stride = 1;
mpi_off = mpi_rank * stride;
while (mpi_off < MPIO_TEST_WRITE_SIZE) {
/* make sure the write does not exceed the TEST_WRITE_SIZE */
@@ -89,9 +89,8 @@ static int test_mpio_overlap_writes(char *filename) {
/* set data to some trivial pattern for easy verification */
for (i = 0; i < stride; i++)
- buf[i] = (unsigned char) (mpi_off + i);
- mrc = MPI_File_write_at(fh, mpi_off, buf, (int) stride, MPI_BYTE,
- &mpi_stat);
+ buf[i] = (unsigned char)(mpi_off + i);
+ mrc = MPI_File_write_at(fh, mpi_off, buf, (int)stride, MPI_BYTE, &mpi_stat);
VRFY((mrc == MPI_SUCCESS), "");
/* move the offset pointer to last byte written by all processes */
@@ -114,10 +113,11 @@ static int test_mpio_overlap_writes(char *filename) {
/* sync with the other waiting processes */
mrc = MPI_Barrier(MPI_COMM_WORLD);
VRFY((mrc == MPI_SUCCESS), "Sync after writes");
- } else {
+ }
+ else {
/* last process waits till writes are done,
- * then opens file to verify data.
- */
+ * then opens file to verify data.
+ */
mrc = MPI_Barrier(MPI_COMM_WORLD);
VRFY((mrc == MPI_SUCCESS), "Sync after writes");
@@ -129,18 +129,15 @@ static int test_mpio_overlap_writes(char *filename) {
/* make sure it does not read beyond end of data */
if (mpi_off + stride > MPIO_TEST_WRITE_SIZE)
stride = MPIO_TEST_WRITE_SIZE - mpi_off;
- mrc = MPI_File_read_at(fh, mpi_off, buf, (int) stride, MPI_BYTE,
- &mpi_stat);
+ mrc = MPI_File_read_at(fh, mpi_off, buf, (int)stride, MPI_BYTE, &mpi_stat);
VRFY((mrc == MPI_SUCCESS), "");
vrfyerrs = 0;
for (i = 0; i < stride; i++) {
unsigned char expected;
- expected = (unsigned char) (mpi_off + i);
- if ((expected != buf[i])
- && (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)) {
- HDprintf(
- "proc %d: found data error at [%ld], expect %u, got %u\n",
- mpi_rank, (long) (mpi_off + i), expected, buf[i]);
+ expected = (unsigned char)(mpi_off + i);
+ if ((expected != buf[i]) && (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)) {
+ HDprintf("proc %d: found data error at [%ld], expect %u, got %u\n", mpi_rank,
+ (long)(mpi_off + i), expected, buf[i]);
}
}
if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
@@ -157,18 +154,18 @@ static int test_mpio_overlap_writes(char *filename) {
}
/*
- * one more sync to ensure all processes have done reading
- * before ending this test.
- */
+ * one more sync to ensure all processes have done reading
+ * before ending this test.
+ */
mrc = MPI_Barrier(MPI_COMM_WORLD);
VRFY((mrc == MPI_SUCCESS), "Sync before leaving test");
return (nerrs);
}
-#define MB 1048576 /* 1024*1024 == 2**20 */
-#define GB 1073741824 /* 1024**3 == 2**30 */
-#define TWO_GB_LESS1 2147483647 /* 2**31 - 1 */
-#define FOUR_GB_LESS1 4294967295L /* 2**32 - 1 */
+#define MB 1048576 /* 1024*1024 == 2**20 */
+#define GB 1073741824 /* 1024**3 == 2**30 */
+#define TWO_GB_LESS1 2147483647 /* 2**31 - 1 */
+#define FOUR_GB_LESS1 4294967295L /* 2**32 - 1 */
/*
* Verify that MPI_Offset exceeding 2**31 can be computed correctly.
* Print any failure as information only, not as an error so that this
@@ -180,23 +177,25 @@ static int test_mpio_overlap_writes(char *filename) {
* Then reads the file back in by reverse order, that is process 0
* reads the data of process n-1 and vice versa.
*/
-static int test_mpio_gb_file(char *filename) {
- int mpi_size, mpi_rank;
- MPI_Info info = MPI_INFO_NULL;
- int mrc;
- MPI_File fh;
- int i, j, n;
- int vrfyerrs;
- int writerrs; /* write errors */
- int nerrs;
- int ntimes; /* how many times */
- char *buf = NULL;
- char expected;
+static int
+test_mpio_gb_file(char *filename)
+{
+ int mpi_size, mpi_rank;
+ MPI_Info info = MPI_INFO_NULL;
+ int mrc;
+ MPI_File fh;
+ int i, j, n;
+ int vrfyerrs;
+ int writerrs; /* write errors */
+ int nerrs;
+ int ntimes; /* how many times */
+ char * buf = NULL;
+ char expected;
MPI_Offset size;
MPI_Offset mpi_off;
MPI_Offset mpi_off_old;
MPI_Status mpi_stat;
- int is_signed, sizeof_mpi_offset;
+ int is_signed, sizeof_mpi_offset;
nerrs = 0;
/* set up MPI parameters */
@@ -207,73 +206,74 @@ static int test_mpio_gb_file(char *filename) {
HDprintf("MPI_Offset range test\n");
/* figure out the signness and sizeof MPI_Offset */
- mpi_off = 0;
- is_signed = ((MPI_Offset)(mpi_off - 1)) < 0;
- sizeof_mpi_offset = (int) (sizeof(MPI_Offset));
+ mpi_off = 0;
+ is_signed = ((MPI_Offset)(mpi_off - 1)) < 0;
+ sizeof_mpi_offset = (int)(sizeof(MPI_Offset));
/*
- * Verify the sizeof MPI_Offset and correctness of handling multiple GB
- * sizes.
- */
+ * Verify the sizeof MPI_Offset and correctness of handling multiple GB
+ * sizes.
+ */
if (MAINPROCESS) { /* only process 0 needs to check it*/
- HDprintf("MPI_Offset is %s %d bytes integeral type\n",
- is_signed ? "signed" : "unsigned", (int) sizeof(MPI_Offset));
+ HDprintf("MPI_Offset is %s %d bytes integeral type\n", is_signed ? "signed" : "unsigned",
+ (int)sizeof(MPI_Offset));
if (sizeof_mpi_offset <= 4 && is_signed) {
HDprintf("Skipped 2GB range test "
- "because MPI_Offset cannot support it\n");
- } else {
+ "because MPI_Offset cannot support it\n");
+ }
+ else {
/* verify correctness of assigning 2GB sizes */
- mpi_off = 2 * 1024 * (MPI_Offset) MB;
+ mpi_off = 2 * 1024 * (MPI_Offset)MB;
INFO((mpi_off > 0), "2GB OFFSET assignment no overflow");
- INFO((mpi_off-1)==TWO_GB_LESS1, "2GB OFFSET assignment succeed");
+ INFO((mpi_off - 1) == TWO_GB_LESS1, "2GB OFFSET assignment succeed");
/* verify correctness of increasing from below 2 GB to above 2GB */
mpi_off = TWO_GB_LESS1;
for (i = 0; i < 3; i++) {
mpi_off_old = mpi_off;
- mpi_off = mpi_off + 1;
+ mpi_off = mpi_off + 1;
/* no overflow */
INFO((mpi_off > 0), "2GB OFFSET increment no overflow");
/* correct inc. */
- INFO((mpi_off - 1) == mpi_off_old,
- "2GB OFFSET increment succeed");
+ INFO((mpi_off - 1) == mpi_off_old, "2GB OFFSET increment succeed");
}
}
if (sizeof_mpi_offset <= 4) {
HDprintf("Skipped 4GB range test "
- "because MPI_Offset cannot support it\n");
- } else {
+ "because MPI_Offset cannot support it\n");
+ }
+ else {
/* verify correctness of assigning 4GB sizes */
- mpi_off = 4 * 1024 * (MPI_Offset) MB;
+ mpi_off = 4 * 1024 * (MPI_Offset)MB;
INFO((mpi_off > 0), "4GB OFFSET assignment no overflow");
- INFO((mpi_off-1)==FOUR_GB_LESS1, "4GB OFFSET assignment succeed");
+ INFO((mpi_off - 1) == FOUR_GB_LESS1, "4GB OFFSET assignment succeed");
/* verify correctness of increasing from below 4 GB to above 4 GB */
mpi_off = FOUR_GB_LESS1;
for (i = 0; i < 3; i++) {
mpi_off_old = mpi_off;
- mpi_off = mpi_off + 1;
+ mpi_off = mpi_off + 1;
/* no overflow */
INFO((mpi_off > 0), "4GB OFFSET increment no overflow");
/* correct inc. */
- INFO((mpi_off - 1) == mpi_off_old,
- "4GB OFFSET increment succeed");
+ INFO((mpi_off - 1) == mpi_off_old, "4GB OFFSET increment succeed");
}
}
}
/*
- * Verify if we can write to a file of multiple GB sizes.
- */
+ * Verify if we can write to a file of multiple GB sizes.
+ */
if (VERBOSE_MED)
HDprintf("MPIO GB file test %s\n", filename);
if (sizeof_mpi_offset <= 4) {
HDprintf("Skipped GB file range test "
- "because MPI_Offset cannot support it\n");
- } else {
- buf = (char *) HDmalloc(MB);
+ "because MPI_Offset cannot support it\n");
+ }
+ else {
+ buf = (char *)HDmalloc(MB);
VRFY((buf != NULL), "malloc succeed");
/* open a new file. Remove it first in case it exists. */
@@ -282,34 +282,29 @@ static int test_mpio_gb_file(char *filename) {
MPI_File_delete(filename, MPI_INFO_NULL);
MPI_Barrier(MPI_COMM_WORLD); /* prevent racing condition */
- mrc = MPI_File_open(MPI_COMM_WORLD, filename,
- MPI_MODE_CREATE | MPI_MODE_RDWR, info, &fh);
+ mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR, info, &fh);
VRFY((mrc == MPI_SUCCESS), "MPI_FILE_OPEN");
HDprintf("MPIO GB file write test %s\n", filename);
/* instead of writing every bytes of the file, we will just write
- * some data around the 2 and 4 GB boundaries. That should cover
- * potential integer overflow and filesystem size limits.
- */
+ * some data around the 2 and 4 GB boundaries. That should cover
+ * potential integer overflow and filesystem size limits.
+ */
writerrs = 0;
for (n = 2; n <= 4; n += 2) {
ntimes = GB / MB * n / mpi_size + 1;
for (i = ntimes - 2; i <= ntimes; i++) {
- mpi_off = (i * mpi_size + mpi_rank) * (MPI_Offset) MB;
+ mpi_off = (i * mpi_size + mpi_rank) * (MPI_Offset)MB;
if (VERBOSE_MED)
- HDfprintf(stdout,
- "proc %d: write to mpi_off=%016llx, %lld\n",
- mpi_rank, mpi_off, mpi_off);
+ HDfprintf(stdout, "proc %d: write to mpi_off=%016llx, %lld\n", mpi_rank, mpi_off,
+ mpi_off);
/* set data to some trivial pattern for easy verification */
for (j = 0; j < MB; j++)
*(buf + j) = (int8_t)(i * mpi_size + mpi_rank);
if (VERBOSE_MED)
- HDfprintf(stdout,
- "proc %d: writing %d bytes at offset %lld\n",
- mpi_rank, MB, mpi_off);
- mrc = MPI_File_write_at(fh, mpi_off, buf, MB, MPI_BYTE,
- &mpi_stat);
+ HDfprintf(stdout, "proc %d: writing %d bytes at offset %lld\n", mpi_rank, MB, mpi_off);
+ mrc = MPI_File_write_at(fh, mpi_off, buf, MB, MPI_BYTE, &mpi_stat);
INFO((mrc == MPI_SUCCESS), "GB size file write");
if (mrc != MPI_SUCCESS)
writerrs++;
@@ -324,42 +319,34 @@ static int test_mpio_gb_file(char *filename) {
VRFY((mrc == MPI_SUCCESS), "Sync after writes");
/*
- * Verify if we can read the multiple GB file just created.
- */
+ * Verify if we can read the multiple GB file just created.
+ */
/* open it again to verify the data written */
/* but only if there was no write errors */
HDprintf("MPIO GB file read test %s\n", filename);
if (errors_sum(writerrs) > 0) {
- HDprintf("proc %d: Skip read test due to previous write errors\n",
- mpi_rank);
+ HDprintf("proc %d: Skip read test due to previous write errors\n", mpi_rank);
goto finish;
}
- mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info,
- &fh);
+ mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info, &fh);
VRFY((mrc == MPI_SUCCESS), "");
/* Only read back parts of the file that have been written. */
for (n = 2; n <= 4; n += 2) {
ntimes = GB / MB * n / mpi_size + 1;
for (i = ntimes - 2; i <= ntimes; i++) {
- mpi_off = (i * mpi_size + (mpi_size - mpi_rank - 1))
- * (MPI_Offset) MB;
+ mpi_off = (i * mpi_size + (mpi_size - mpi_rank - 1)) * (MPI_Offset)MB;
if (VERBOSE_MED)
- HDfprintf(stdout,
- "proc %d: read from mpi_off=%016llx, %lld\n",
- mpi_rank, mpi_off, mpi_off);
- mrc = MPI_File_read_at(fh, mpi_off, buf, MB, MPI_BYTE,
- &mpi_stat);
+ HDfprintf(stdout, "proc %d: read from mpi_off=%016llx, %lld\n", mpi_rank, mpi_off,
+ mpi_off);
+ mrc = MPI_File_read_at(fh, mpi_off, buf, MB, MPI_BYTE, &mpi_stat);
INFO((mrc == MPI_SUCCESS), "GB size file read");
expected = (int8_t)(i * mpi_size + (mpi_size - mpi_rank - 1));
vrfyerrs = 0;
for (j = 0; j < MB; j++) {
- if ((*(buf + j) != expected)
- && (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)) {
- HDprintf(
- "proc %d: found data error at [%ld+%d], expect %d, got %d\n",
- mpi_rank, (long) mpi_off, j, expected,
- *(buf + j));
+ if ((*(buf + j) != expected) && (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)) {
+ HDprintf("proc %d: found data error at [%ld+%d], expect %d, got %d\n", mpi_rank,
+ (long)mpi_off, j, expected, *(buf + j));
}
}
if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
@@ -374,23 +361,21 @@ static int test_mpio_gb_file(char *filename) {
VRFY((mrc == MPI_SUCCESS), "MPI_FILE_CLOSE");
/*
- * one more sync to ensure all processes have done reading
- * before ending this test.
- */
+ * one more sync to ensure all processes have done reading
+ * before ending this test.
+ */
mrc = MPI_Barrier(MPI_COMM_WORLD);
VRFY((mrc == MPI_SUCCESS), "Sync before leaving test");
HDprintf("Test if MPI_File_get_size works correctly with %s\n", filename);
- mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info,
- &fh);
+ mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info, &fh);
VRFY((mrc == MPI_SUCCESS), "");
if (MAINPROCESS) { /* only process 0 needs to check it*/
mrc = MPI_File_get_size(fh, &size);
VRFY((mrc == MPI_SUCCESS), "");
- VRFY((size == mpi_off+MB),
- "MPI_File_get_size doesn't return correct file size.");
+ VRFY((size == mpi_off + MB), "MPI_File_get_size doesn't return correct file size.");
}
/* close file and free the communicator */
@@ -398,14 +383,15 @@ static int test_mpio_gb_file(char *filename) {
VRFY((mrc == MPI_SUCCESS), "MPI_FILE_CLOSE");
/*
- * one more sync to ensure all processes have done reading
- * before ending this test.
- */
+ * one more sync to ensure all processes have done reading
+ * before ending this test.
+ */
mrc = MPI_Barrier(MPI_COMM_WORLD);
VRFY((mrc == MPI_SUCCESS), "Sync before leaving test");
}
- finish: if (buf)
+finish:
+ if (buf)
HDfree(buf);
return (nerrs);
}
@@ -427,26 +413,28 @@ static int test_mpio_gb_file(char *filename) {
* Each process writes something, then reads all data back.
*/
-#define DIMSIZE 32 /* Dimension size. */
-#define PRINTID HDprintf("Proc %d: ", mpi_rank)
-#define USENONE 0
-#define USEATOM 1 /* request atomic I/O */
-#define USEFSYNC 2 /* request file_sync */
-
-static int test_mpio_1wMr(char *filename, int special_request) {
- char hostname[128];
- int mpi_size, mpi_rank;
- MPI_File fh;
- char mpi_err_str[MPI_MAX_ERROR_STRING];
- int mpi_err_strlen;
- int mpi_err;
+#define DIMSIZE 32 /* Dimension size. */
+#define PRINTID HDprintf("Proc %d: ", mpi_rank)
+#define USENONE 0
+#define USEATOM 1 /* request atomic I/O */
+#define USEFSYNC 2 /* request file_sync */
+
+static int
+test_mpio_1wMr(char *filename, int special_request)
+{
+ char hostname[128];
+ int mpi_size, mpi_rank;
+ MPI_File fh;
+ char mpi_err_str[MPI_MAX_ERROR_STRING];
+ int mpi_err_strlen;
+ int mpi_err;
unsigned char writedata[DIMSIZE], readdata[DIMSIZE];
unsigned char expect_val;
- int i, irank;
- int nerrs = 0; /* number of errors */
- int atomicity;
- MPI_Offset mpi_off;
- MPI_Status mpi_stat;
+ int i, irank;
+ int nerrs = 0; /* number of errors */
+ int atomicity;
+ MPI_Offset mpi_off;
+ MPI_Status mpi_stat;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -460,7 +448,7 @@ static int test_mpio_1wMr(char *filename, int special_request) {
/* show the hostname so that we can tell where the processes are running */
if (VERBOSE_DEF) {
#ifdef H5_HAVE_GETHOSTNAME
- if(HDgethostname(hostname, sizeof(hostname)) < 0) {
+ if (HDgethostname(hostname, sizeof(hostname)) < 0) {
HDprintf("gethostname failed\n");
hostname[0] = '\0';
}
@@ -478,9 +466,8 @@ static int test_mpio_1wMr(char *filename, int special_request) {
MPI_File_delete(filename, MPI_INFO_NULL);
MPI_Barrier(MPI_COMM_WORLD); /* prevent racing condition */
- if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename,
- MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh))
- != MPI_SUCCESS) {
+ if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL,
+ &fh)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
PRINTID;
HDprintf("MPI_File_open failed (%s)\n", mpi_err_str);
@@ -489,9 +476,9 @@ static int test_mpio_1wMr(char *filename, int special_request) {
if (special_request & USEATOM) {
/* ==================================================
- * Set atomcity to true (1). A POSIX compliant filesystem
- * should not need this.
- * ==================================================*/
+ * Set atomcity to true (1). A POSIX compliant filesystem
+ * should not need this.
+ * ==================================================*/
if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
PRINTID;
@@ -521,26 +508,26 @@ static int test_mpio_1wMr(char *filename, int special_request) {
}
/* ==================================================
- * Each process calculates what to write but
- * only process irank(0) writes.
- * ==================================================*/
+ * Each process calculates what to write but
+ * only process irank(0) writes.
+ * ==================================================*/
irank = 0;
for (i = 0; i < DIMSIZE; i++)
- writedata[i] = (uint8_t)(irank * DIMSIZE + i);
+ writedata[i] = (uint8_t)(irank * DIMSIZE + i);
mpi_off = irank * DIMSIZE;
/* Only one process writes */
if (mpi_rank == irank) {
if (VERBOSE_HI) {
PRINTID;
- HDprintf("wrote %d bytes at %ld\n", DIMSIZE, (long) mpi_off);
+ HDprintf("wrote %d bytes at %ld\n", DIMSIZE, (long)mpi_off);
}
- if ((mpi_err = MPI_File_write_at(fh, mpi_off, writedata, DIMSIZE,
- MPI_BYTE, &mpi_stat)) != MPI_SUCCESS) {
+ if ((mpi_err = MPI_File_write_at(fh, mpi_off, writedata, DIMSIZE, MPI_BYTE, &mpi_stat)) !=
+ MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
PRINTID;
- HDprintf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n",
- (long) mpi_off, DIMSIZE, mpi_err_str);
+ HDprintf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n", (long)mpi_off, DIMSIZE,
+ mpi_err_str);
return 1;
};
};
@@ -555,9 +542,9 @@ static int test_mpio_1wMr(char *filename, int special_request) {
if (special_request & USEFSYNC) {
/* ==================================================
- * Do a file sync. A POSIX compliant filesystem
- * should not need this.
- * ==================================================*/
+ * Do a file sync. A POSIX compliant filesystem
+ * should not need this.
+ * ==================================================*/
if (VERBOSE_HI)
HDprintf("Apply MPI_File_sync\n");
/* call file_sync to force the write out */
@@ -584,24 +571,22 @@ static int test_mpio_1wMr(char *filename, int special_request) {
}
/* ==================================================
- * Each process reads what process 0 wrote and verify.
- * ==================================================*/
- irank = 0;
+ * Each process reads what process 0 wrote and verify.
+ * ==================================================*/
+ irank = 0;
mpi_off = irank * DIMSIZE;
- if ((mpi_err = MPI_File_read_at(fh, mpi_off, readdata, DIMSIZE, MPI_BYTE,
- &mpi_stat)) != MPI_SUCCESS) {
+ if ((mpi_err = MPI_File_read_at(fh, mpi_off, readdata, DIMSIZE, MPI_BYTE, &mpi_stat)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
PRINTID;
- HDprintf("MPI_File_read_at offset(%ld), bytes (%d), failed (%s)\n",
- (long) mpi_off, DIMSIZE, mpi_err_str);
+ HDprintf("MPI_File_read_at offset(%ld), bytes (%d), failed (%s)\n", (long)mpi_off, DIMSIZE,
+ mpi_err_str);
return 1;
};
for (i = 0; i < DIMSIZE; i++) {
expect_val = (uint8_t)(irank * DIMSIZE + i);
if (readdata[i] != expect_val) {
PRINTID;
- HDprintf("read data[%d:%d] got %02x, expect %02x\n", irank, i,
- readdata[i], expect_val);
+ HDprintf("read data[%d:%d] got %02x, expect %02x\n", irank, i, readdata[i], expect_val);
nerrs++;
}
}
@@ -657,41 +642,44 @@ static int test_mpio_1wMr(char *filename, int special_request) {
the following values were obtained:
1,2,0
- The problem is that the displacement of the second derived datatype(datatype2) which formed the final derived datatype(advtype)
- has been put after the basic datatype(MPI_BYTE) of datatype2. This is a bug.
+ The problem is that the displacement of the second derived datatype(datatype2) which formed the final derived
+ datatype(advtype) has been put after the basic datatype(MPI_BYTE) of datatype2. This is a bug.
2. This test will verify whether the complicated derived datatype is working on
the current platform.
- If this bug has been fixed in the previous not-working package, this test will issue a HDprintf message to tell the developer to change
- the configuration specific file of HDF5 so that we can change our configurationsetting to support collective IO for irregular selections.
+ If this bug has been fixed in the previous not-working package, this test will issue a HDprintf message to
+ tell the developer to change the configuration specific file of HDF5 so that we can change our
+ configurationsetting to support collective IO for irregular selections.
- If it turns out that the previous working MPI-IO package no longer works, this test will also issue a message to inform the corresponding failure so that
- we can turn off collective IO support for irregular selections.
+ If it turns out that the previous working MPI-IO package no longer works, this test will also issue a message
+ to inform the corresponding failure so that we can turn off collective IO support for irregular selections.
*/
-static int test_mpio_derived_dtype(char *filename) {
+static int
+test_mpio_derived_dtype(char *filename)
+{
- MPI_File fh;
- char mpi_err_str[MPI_MAX_ERROR_STRING];
- int mpi_err_strlen;
- int mpi_err;
- int i;
+ MPI_File fh;
+ char mpi_err_str[MPI_MAX_ERROR_STRING];
+ int mpi_err_strlen;
+ int mpi_err;
+ int i;
MPI_Datatype etype, filetype;
MPI_Datatype adv_filetype, bas_filetype[2];
MPI_Datatype filetypenew;
- MPI_Offset disp;
- MPI_Status Status;
- MPI_Aint adv_disp[2];
- MPI_Aint offsets[1];
- int blocklens[1], adv_blocklens[2];
- int count, outcount;
- int retcode;
+ MPI_Offset disp;
+ MPI_Status Status;
+ MPI_Aint adv_disp[2];
+ MPI_Aint offsets[1];
+ int blocklens[1], adv_blocklens[2];
+ int count, outcount;
+ int retcode;
int mpi_rank, mpi_size;
- char buf[3], outbuf[3] = { 0 };
+ char buf[3], outbuf[3] = {0};
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -699,23 +687,21 @@ static int test_mpio_derived_dtype(char *filename) {
for (i = 0; i < 3; i++)
buf[i] = (char)(i + 1);
- if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename,
- MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh))
- != MPI_SUCCESS) {
+ if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL,
+ &fh)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
HDprintf("MPI_File_open failed (%s)\n", mpi_err_str);
return 1;
}
- disp = 0;
+ disp = 0;
etype = MPI_BYTE;
- count = 1;
+ count = 1;
blocklens[0] = 1;
- offsets[0] = 0;
+ offsets[0] = 0;
- if ((mpi_err = MPI_Type_create_hindexed(count, blocklens, offsets, MPI_BYTE,
- &filetype)) != MPI_SUCCESS) {
+ if ((mpi_err = MPI_Type_create_hindexed(count, blocklens, offsets, MPI_BYTE, &filetype)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
return 1;
@@ -727,11 +713,11 @@ static int test_mpio_derived_dtype(char *filename) {
return 1;
}
- count = 1;
+ count = 1;
blocklens[0] = 1;
- offsets[0] = 1;
- if ((mpi_err = MPI_Type_create_hindexed(count, blocklens, offsets, MPI_BYTE,
- &filetypenew)) != MPI_SUCCESS) {
+ offsets[0] = 1;
+ if ((mpi_err = MPI_Type_create_hindexed(count, blocklens, offsets, MPI_BYTE, &filetypenew)) !=
+ MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
return 1;
@@ -743,16 +729,16 @@ static int test_mpio_derived_dtype(char *filename) {
return 1;
}
- outcount = 2;
+ outcount = 2;
adv_blocklens[0] = 1;
adv_blocklens[1] = 1;
- adv_disp[0] = 0;
- adv_disp[1] = 1;
- bas_filetype[0] = filetype;
- bas_filetype[1] = filetypenew;
+ adv_disp[0] = 0;
+ adv_disp[1] = 1;
+ bas_filetype[0] = filetype;
+ bas_filetype[1] = filetypenew;
- if ((mpi_err = MPI_Type_create_struct(outcount, adv_blocklens, adv_disp,
- bas_filetype, &adv_filetype)) != MPI_SUCCESS) {
+ if ((mpi_err = MPI_Type_create_struct(outcount, adv_blocklens, adv_disp, bas_filetype, &adv_filetype)) !=
+ MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
HDprintf("MPI_Type_create_struct failed (%s)\n", mpi_err_str);
return 1;
@@ -763,15 +749,14 @@ static int test_mpio_derived_dtype(char *filename) {
return 1;
}
- if ((mpi_err = MPI_File_set_view(fh, disp, etype, adv_filetype, "native",
- MPI_INFO_NULL)) != MPI_SUCCESS) {
+ if ((mpi_err = MPI_File_set_view(fh, disp, etype, adv_filetype, "native", MPI_INFO_NULL)) !=
+ MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
HDprintf("MPI_File_set_view failed (%s)\n", mpi_err_str);
return 1;
}
- if ((mpi_err = MPI_File_write(fh, buf, 3, MPI_BYTE, &Status))
- != MPI_SUCCESS) {
+ if ((mpi_err = MPI_File_write(fh, buf, 3, MPI_BYTE, &Status)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
HDprintf("MPI_File_write failed (%s)\n", mpi_err_str);
return 1;
@@ -783,21 +768,19 @@ static int test_mpio_derived_dtype(char *filename) {
return 1;
}
- if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY,
- MPI_INFO_NULL, &fh)) != MPI_SUCCESS) {
+ if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh)) !=
+ MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
HDprintf("MPI_File_open failed (%s)\n", mpi_err_str);
return 1;
}
- if ((mpi_err = MPI_File_set_view(fh, 0, MPI_BYTE, MPI_BYTE, "native",
- MPI_INFO_NULL)) != MPI_SUCCESS) {
+ if ((mpi_err = MPI_File_set_view(fh, 0, MPI_BYTE, MPI_BYTE, "native", MPI_INFO_NULL)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
HDprintf("MPI_File_set_view failed (%s)\n", mpi_err_str);
return 1;
}
- if ((mpi_err = MPI_File_read(fh, outbuf, 3, MPI_BYTE, &Status))
- != MPI_SUCCESS) {
+ if ((mpi_err = MPI_File_read(fh, outbuf, 3, MPI_BYTE, &Status)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
HDprintf("MPI_File_read failed (%s)\n", mpi_err_str);
return 1;
@@ -805,7 +788,8 @@ static int test_mpio_derived_dtype(char *filename) {
if (outbuf[2] == 2) {
retcode = 0;
- } else {
+ }
+ else {
/* if(mpi_rank == 0) {
HDprintf("complicated derived datatype is NOT working at this platform\n");
HDprintf("go back to hdf5/config and find the corresponding\n");
@@ -824,8 +808,7 @@ static int test_mpio_derived_dtype(char *filename) {
mpi_err = MPI_Barrier(MPI_COMM_WORLD);
if (retcode == -1) {
if (mpi_rank == 0) {
- HDprintf(
- "Complicated derived datatype is NOT working at this platform\n");
+ HDprintf("Complicated derived datatype is NOT working at this platform\n");
HDprintf(" Please report to help@hdfgroup.org about this problem.\n");
}
retcode = 1;
@@ -851,29 +834,33 @@ static int test_mpio_derived_dtype(char *filename) {
2. This test will fail with the MPI-IO package that doesn't support this. For example,
mpich 1.2.6.
- If this bug has been fixed in the previous not-working package, this test will issue a HDprintf message to tell the developer to change
- the configuration specific file of HDF5 so that we can change our configurationsetting to support special collective IO; currently only special collective IO.
+ If this bug has been fixed in the previous not-working package, this test will issue a HDprintf message to
+ tell the developer to change the configuration specific file of HDF5 so that we can change our
+ configurationsetting to support special collective IO; currently only special collective IO.
- If it turns out that the previous working MPI-IO package no longer works, this test will also issue a message to inform the corresponding failure so that
- we can turn off the support for special collective IO; currently only special collective IO.
+ If it turns out that the previous working MPI-IO package no longer works, this test will also issue a message
+ to inform the corresponding failure so that we can turn off the support for special collective IO; currently
+ only special collective IO.
*/
-static int test_mpio_special_collective(char *filename) {
- int mpi_size, mpi_rank;
- MPI_File fh;
+static int
+test_mpio_special_collective(char *filename)
+{
+ int mpi_size, mpi_rank;
+ MPI_File fh;
MPI_Datatype etype, buftype, filetype;
- char mpi_err_str[MPI_MAX_ERROR_STRING];
- int mpi_err_strlen;
- int mpi_err;
- char writedata[2 * DIMSIZE];
- char filerep[7] = "native";
- int i;
- int count, bufcount;
- int blocklens[2];
- MPI_Aint offsets[2];
- MPI_Offset mpi_off = 0;
- MPI_Status mpi_stat;
- int retcode = 0;
+ char mpi_err_str[MPI_MAX_ERROR_STRING];
+ int mpi_err_strlen;
+ int mpi_err;
+ char writedata[2 * DIMSIZE];
+ char filerep[7] = "native";
+ int i;
+ int count, bufcount;
+ int blocklens[2];
+ MPI_Aint offsets[2];
+ MPI_Offset mpi_off = 0;
+ MPI_Status mpi_stat;
+ int retcode = 0;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -881,22 +868,21 @@ static int test_mpio_special_collective(char *filename) {
/* create MPI data type */
etype = MPI_BYTE;
if (mpi_rank == 0 || mpi_rank == 1) {
- count = DIMSIZE;
+ count = DIMSIZE;
bufcount = 1;
} /* end if */
else {
- count = 0;
+ count = 0;
bufcount = 0;
} /* end else */
blocklens[0] = count;
- offsets[0] = mpi_rank * count;
+ offsets[0] = mpi_rank * count;
blocklens[1] = count;
- offsets[1] = (mpi_size + mpi_rank) * count;
+ offsets[1] = (mpi_size + mpi_rank) * count;
if (count != 0) {
- if ((mpi_err = MPI_Type_create_hindexed(2, blocklens, offsets, etype,
- &filetype)) != MPI_SUCCESS) {
+ if ((mpi_err = MPI_Type_create_hindexed(2, blocklens, offsets, etype, &filetype)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
return 1;
@@ -908,8 +894,7 @@ static int test_mpio_special_collective(char *filename) {
return 1;
} /* end if */
- if ((mpi_err = MPI_Type_create_hindexed(2, blocklens, offsets, etype,
- &buftype)) != MPI_SUCCESS) {
+ if ((mpi_err = MPI_Type_create_hindexed(2, blocklens, offsets, etype, &buftype)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
return 1;
@@ -920,16 +905,15 @@ static int test_mpio_special_collective(char *filename) {
HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str);
return 1;
} /* end if */
- } /* end if */
+ } /* end if */
else {
filetype = MPI_BYTE;
- buftype = MPI_BYTE;
+ buftype = MPI_BYTE;
} /* end else */
/* Open a file */
- if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename,
- MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh))
- != MPI_SUCCESS) {
+ if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL,
+ &fh)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
HDprintf("MPI_File_open failed (%s)\n", mpi_err_str);
return 1;
@@ -937,22 +921,22 @@ static int test_mpio_special_collective(char *filename) {
/* each process writes some data */
for (i = 0; i < 2 * DIMSIZE; i++)
- writedata[i] = (char) (mpi_rank * DIMSIZE + i);
+ writedata[i] = (char)(mpi_rank * DIMSIZE + i);
/* Set the file view */
- if ((mpi_err = MPI_File_set_view(fh, mpi_off, MPI_BYTE, filetype, filerep,
- MPI_INFO_NULL)) != MPI_SUCCESS) {
+ if ((mpi_err = MPI_File_set_view(fh, mpi_off, MPI_BYTE, filetype, filerep, MPI_INFO_NULL)) !=
+ MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
HDprintf("MPI_File_set_view failed (%s)\n", mpi_err_str);
return 1;
} /* end if */
/* Collectively write into the file */
- if ((mpi_err = MPI_File_write_at_all(fh, mpi_off, writedata, bufcount,
- buftype, &mpi_stat)) != MPI_SUCCESS) {
+ if ((mpi_err = MPI_File_write_at_all(fh, mpi_off, writedata, bufcount, buftype, &mpi_stat)) !=
+ MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- HDprintf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n",
- (long) mpi_off, bufcount, mpi_err_str);
+ HDprintf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n", (long)mpi_off, bufcount,
+ mpi_err_str);
return 1;
} /* end if */
@@ -980,41 +964,44 @@ static int test_mpio_special_collective(char *filename) {
/*
* parse the command line options
*/
-static int parse_options(int argc, char **argv) {
+static int
+parse_options(int argc, char **argv)
+{
while (--argc) {
if (**(++argv) != '-') {
break;
- } else {
+ }
+ else {
switch (*(*argv + 1)) {
- case 'v':
- if (*((*argv + 1) + 1))
- ParseTestVerbosity((*argv + 1) + 1);
- else
- SetTestVerbosity(VERBO_MED);
- break;
- case 'f':
- if (--argc < 1) {
- nerrors++;
+ case 'v':
+ if (*((*argv + 1) + 1))
+ ParseTestVerbosity((*argv + 1) + 1);
+ else
+ SetTestVerbosity(VERBO_MED);
+ break;
+ case 'f':
+ if (--argc < 1) {
+ nerrors++;
+ return (1);
+ }
+ if (**(++argv) == '-') {
+ nerrors++;
+ return (1);
+ }
+ paraprefix = *argv;
+ break;
+ case 'h': /* print help message--return with nerrors set */
return (1);
- }
- if (**(++argv) == '-') {
+ default:
nerrors++;
return (1);
- }
- paraprefix = *argv;
- break;
- case 'h': /* print help message--return with nerrors set */
- return (1);
- default:
- nerrors++;
- return (1);
}
}
} /*while*/
/* compose the test filenames */
{
- int i, n;
+ int i, n;
hid_t plist;
plist = H5Pcreate(H5P_FILE_ACCESS);
@@ -1022,8 +1009,7 @@ static int parse_options(int argc, char **argv) {
n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */
for (i = 0; i < n; i++)
- if (h5_fixname(FILENAME[i], plist, filenames[i],
- sizeof(filenames[i])) == NULL) {
+ if (h5_fixname(FILENAME[i], plist, filenames[i], sizeof(filenames[i])) == NULL) {
HDprintf("h5_fixname failed\n");
nerrors++;
return (1);
@@ -1042,7 +1028,9 @@ static int parse_options(int argc, char **argv) {
/*
* Show command usage
*/
-static void usage(void) {
+static void
+usage(void)
+{
HDprintf("Usage: t_mpi [-v<verbosity>] [-f <prefix>]\n");
HDprintf("\t-v<verbosity>\tset verbose level (0-9,l,m,h)\n");
HDprintf("\t-f <prefix>\tfilename prefix\n");
@@ -1052,13 +1040,17 @@ static void usage(void) {
/*
* return the sum of all errors.
*/
-static int errors_sum(int nerrs) {
+static int
+errors_sum(int nerrs)
+{
int temp;
MPI_Allreduce(&nerrs, &temp, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
return (temp);
}
-int main(int argc, char **argv) {
+int
+main(int argc, char **argv)
+{
int mpi_size, mpi_rank; /* mpi variables */
int ret_code;
@@ -1067,10 +1059,10 @@ int main(int argc, char **argv) {
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* Attempt to turn off atexit post processing so that in case errors
- * happen during the test and the process is aborted, it will not get
- * hang in the atexit post processing in which it may try to make MPI
- * calls. By then, MPI calls may not work.
- */
+ * happen during the test and the process is aborted, it will not get
+ * hang in the atexit post processing in which it may try to make MPI
+ * calls. By then, MPI calls may not work.
+ */
if (H5dont_atexit() < 0) {
HDprintf("Failed to turn off atexit processing. Continue.\n");
};
@@ -1097,8 +1089,8 @@ int main(int argc, char **argv) {
ALARM_ON;
/*=======================================
- * MPIO 1 write Many read test
- *=======================================*/
+ * MPIO 1 write Many read test
+ *=======================================*/
MPI_BANNER("MPIO 1 write Many read test...");
ret_code = test_mpio_1wMr(filenames[0], USENONE);
ret_code = errors_sum(ret_code);
@@ -1128,8 +1120,8 @@ int main(int argc, char **argv) {
}
/*=======================================
- * MPIO MPIO File size range test
- *=======================================*/
+ * MPIO MPIO File size range test
+ *=======================================*/
MPI_BANNER("MPIO File size range test...");
#ifndef H5_HAVE_WIN32_API
ret_code = test_mpio_gb_file(filenames[0]);
@@ -1139,13 +1131,13 @@ int main(int argc, char **argv) {
nerrors += ret_code;
}
#else
- if (mpi_rank==0)
+ if (mpi_rank == 0)
HDprintf(" will be skipped on Windows (JIRA HDDFV-8064)\n");
#endif
/*=======================================
- * MPIO independent overlapping writes
- *=======================================*/
+ * MPIO independent overlapping writes
+ *=======================================*/
MPI_BANNER("MPIO independent overlapping writes...");
ret_code = test_mpio_overlap_writes(filenames[0]);
ret_code = errors_sum(ret_code);
@@ -1155,8 +1147,8 @@ int main(int argc, char **argv) {
}
/*=======================================
- * MPIO complicated derived datatype test
- *=======================================*/
+ * MPIO complicated derived datatype test
+ *=======================================*/
MPI_BANNER("MPIO complicated derived datatype test...");
ret_code = test_mpio_derived_dtype(filenames[0]);
ret_code = errors_sum(ret_code);
@@ -1166,8 +1158,8 @@ int main(int argc, char **argv) {
}
/*=======================================
- * MPIO special collective IO test
- *=======================================*/
+ * MPIO special collective IO test
+ *=======================================*/
if (mpi_size < 4) {
MPI_BANNER("MPIO special collective io test SKIPPED.");
if (mpi_rank == 0)
@@ -1179,16 +1171,17 @@ int main(int argc, char **argv) {
MPI_BANNER("MPIO special collective io test...");
ret_code = test_mpio_special_collective(filenames[0]);
- sc_finish: ret_code = errors_sum(ret_code);
+sc_finish:
+ ret_code = errors_sum(ret_code);
if (mpi_rank == 0 && ret_code > 0) {
HDprintf("***FAILED with %d total errors\n", ret_code);
nerrors += ret_code;
}
- finish:
+finish:
/* make sure all processes are finished before final report, cleanup
- * and exit.
- */
+ * and exit.
+ */
MPI_Barrier(MPI_COMM_WORLD);
if (MAINPROCESS) { /* only process 0 reports */
HDprintf("===================================\n");
@@ -1213,4 +1206,3 @@ int main(int argc, char **argv) {
/* cannot just return (nerrors) because exit code is limited to 1byte */
return (nerrors != 0);
}
-
diff --git a/testpar/t_pflush1.c b/testpar/t_pflush1.c
index 27b561b..f21726a 100644
--- a/testpar/t_pflush1.c
+++ b/testpar/t_pflush1.c
@@ -23,17 +23,12 @@
*/
#include "h5test.h"
-const char *FILENAME[] = {
- "flush",
- "noflush",
- NULL
-};
+const char *FILENAME[] = {"flush", "noflush", NULL};
-static int data_g[100][100];
+static int data_g[100][100];
-#define N_GROUPS 100
+#define N_GROUPS 100
-
/*-------------------------------------------------------------------------
* Function: create_test_file
*
@@ -50,51 +45,51 @@ static int data_g[100][100];
static hid_t
create_test_file(char *name, hid_t fapl_id)
{
- hid_t fid = H5I_INVALID_HID;
- hid_t dcpl_id = H5I_INVALID_HID;
- hid_t sid = H5I_INVALID_HID;
- hid_t did = H5I_INVALID_HID;
- hid_t top_level_gid = H5I_INVALID_HID;
- hid_t gid = H5I_INVALID_HID;
- hid_t dxpl_id = H5I_INVALID_HID;
- hsize_t dims[2] = {100, 100};
- hsize_t chunk_dims[2] = {5, 5};
- hsize_t i, j;
+ hid_t fid = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t sid = H5I_INVALID_HID;
+ hid_t did = H5I_INVALID_HID;
+ hid_t top_level_gid = H5I_INVALID_HID;
+ hid_t gid = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hsize_t dims[2] = {100, 100};
+ hsize_t chunk_dims[2] = {5, 5};
+ hsize_t i, j;
- if((fid = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0)
+ if ((fid = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0)
goto error;
/* Create a chunked dataset */
- if((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
goto error;
- if(H5Pset_chunk(dcpl_id, 2, chunk_dims) < 0)
+ if (H5Pset_chunk(dcpl_id, 2, chunk_dims) < 0)
goto error;
- if((sid = H5Screate_simple(2, dims, NULL)) < 0)
+ if ((sid = H5Screate_simple(2, dims, NULL)) < 0)
goto error;
- if((did = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ if ((did = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
- if((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
+ if ((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
goto error;
- if(H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0)
+ if (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0)
goto error;
/* Write some data */
- for(i = 0; i < dims[0]; i++)
- for(j = 0; j < dims[1]; j++)
+ for (i = 0; i < dims[0]; i++)
+ for (j = 0; j < dims[1]; j++)
data_g[i][j] = (int)(i + (i * j) + j);
- if(H5Dwrite(did, H5T_NATIVE_INT, sid, sid, dxpl_id, data_g) < 0)
+ if (H5Dwrite(did, H5T_NATIVE_INT, sid, sid, dxpl_id, data_g) < 0)
goto error;
/* Create some groups */
- if((top_level_gid = H5Gcreate2(fid, "some_groups", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ if ((top_level_gid = H5Gcreate2(fid, "some_groups", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
- for(i = 0; i < N_GROUPS; i++) {
+ for (i = 0; i < N_GROUPS; i++) {
HDsprintf(name, "grp%02u", (unsigned)i);
- if((gid = H5Gcreate2(top_level_gid, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ if ((gid = H5Gcreate2(top_level_gid, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
- if(H5Gclose(gid) < 0)
+ if (H5Gclose(gid) < 0)
goto error;
}
@@ -104,7 +99,6 @@ error:
return H5I_INVALID_HID;
} /* end create_test_file() */
-
/*-------------------------------------------------------------------------
* Function: main
*
@@ -118,33 +112,33 @@ error:
*-------------------------------------------------------------------------
*/
int
-main(int argc, char* argv[])
+main(int argc, char *argv[])
{
- hid_t fid1 = H5I_INVALID_HID;
- hid_t fid2 = H5I_INVALID_HID;
- hid_t fapl_id = H5I_INVALID_HID;
- MPI_File *mpifh_p = NULL;
+ hid_t fid1 = H5I_INVALID_HID;
+ hid_t fid2 = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ MPI_File * mpifh_p = NULL;
char name[1024];
- const char *envval = NULL;
+ const char *envval = NULL;
int mpi_size;
int mpi_rank;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
MPI_Comm_rank(comm, &mpi_rank);
- if(mpi_rank == 0)
+ if (mpi_rank == 0)
TESTING("H5Fflush (part1)");
/* Don't run using the split VFD */
envval = HDgetenv("HDF5_DRIVER");
- if(envval == NULL)
+ if (envval == NULL)
envval = "nomatch";
- if(!HDstrcmp(envval, "split")) {
- if(mpi_rank == 0) {
+ if (!HDstrcmp(envval, "split")) {
+ if (mpi_rank == 0) {
SKIPPED();
HDputs(" Test not compatible with current Virtual File Driver");
}
@@ -152,25 +146,25 @@ main(int argc, char* argv[])
HDexit(EXIT_FAILURE);
}
- if((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0)
goto error;
- if(H5Pset_fapl_mpio(fapl_id, comm, info) < 0)
+ if (H5Pset_fapl_mpio(fapl_id, comm, info) < 0)
goto error;
/* Create the file */
h5_fixname(FILENAME[0], fapl_id, name, sizeof(name));
- if((fid1 = create_test_file(name, fapl_id)) < 0)
+ if ((fid1 = create_test_file(name, fapl_id)) < 0)
goto error;
/* Flush and exit without closing the library */
- if(H5Fflush(fid1, H5F_SCOPE_GLOBAL) < 0)
+ if (H5Fflush(fid1, H5F_SCOPE_GLOBAL) < 0)
goto error;
/* Create the other file which will not be flushed */
h5_fixname(FILENAME[1], fapl_id, name, sizeof(name));
- if((fid2 = create_test_file(name, fapl_id)) < 0)
+ if ((fid2 = create_test_file(name, fapl_id)) < 0)
goto error;
- if(mpi_rank == 0)
+ if (mpi_rank == 0)
PASSED();
HDfflush(stdout);
@@ -184,15 +178,15 @@ main(int argc, char* argv[])
*/
/* Close file 1 */
- if(H5Fget_vfd_handle(fid1, fapl_id, (void **)&mpifh_p) < 0)
+ if (H5Fget_vfd_handle(fid1, fapl_id, (void **)&mpifh_p) < 0)
goto error;
- if(MPI_File_close(mpifh_p) != MPI_SUCCESS)
+ if (MPI_File_close(mpifh_p) != MPI_SUCCESS)
goto error;
/* Close file 2 */
- if(H5Fget_vfd_handle(fid2, fapl_id, (void **)&mpifh_p) < 0)
+ if (H5Fget_vfd_handle(fid2, fapl_id, (void **)&mpifh_p) < 0)
goto error;
- if(MPI_File_close(mpifh_p) != MPI_SUCCESS)
+ if (MPI_File_close(mpifh_p) != MPI_SUCCESS)
goto error;
HDfflush(stdout);
@@ -215,4 +209,3 @@ error:
HDprintf("THERE WAS A REAL ERROR IN t_pflush1.\n");
HD_exit(EXIT_FAILURE);
} /* end main() */
-
diff --git a/testpar/t_pflush2.c b/testpar/t_pflush2.c
index f4589c8..20f8a95 100644
--- a/testpar/t_pflush2.c
+++ b/testpar/t_pflush2.c
@@ -24,15 +24,11 @@
#include "h5test.h"
-const char *FILENAME[] = {
- "flush",
- "noflush",
- NULL
-};
+const char *FILENAME[] = {"flush", "noflush", NULL};
-static int data_g[100][100];
+static int data_g[100][100];
-#define N_GROUPS 100
+#define N_GROUPS 100
/*-------------------------------------------------------------------------
* Function: check_test_file
@@ -47,41 +43,41 @@ static int data_g[100][100];
*-------------------------------------------------------------------------
*/
static herr_t
-check_test_file(char* name, hid_t fapl_id)
+check_test_file(char *name, hid_t fapl_id)
{
- hid_t fid = H5I_INVALID_HID;
- hid_t sid = H5I_INVALID_HID;
- hid_t did = H5I_INVALID_HID;
- hid_t top_level_gid = H5I_INVALID_HID;
- hid_t gid = H5I_INVALID_HID;
- hid_t dxpl_id = H5I_INVALID_HID;
- hsize_t dims[2];
- int val;
- hsize_t i, j;
+ hid_t fid = H5I_INVALID_HID;
+ hid_t sid = H5I_INVALID_HID;
+ hid_t did = H5I_INVALID_HID;
+ hid_t top_level_gid = H5I_INVALID_HID;
+ hid_t gid = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hsize_t dims[2];
+ int val;
+ hsize_t i, j;
- if((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
+ if ((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
goto error;
- if(H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0)
+ if (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0)
goto error;
- if((fid = H5Fopen(name, H5F_ACC_RDONLY, fapl_id)) < 0)
+ if ((fid = H5Fopen(name, H5F_ACC_RDONLY, fapl_id)) < 0)
goto error;
/* Open the dataset */
- if((did = H5Dopen2(fid, "dset", H5P_DEFAULT)) < 0)
+ if ((did = H5Dopen2(fid, "dset", H5P_DEFAULT)) < 0)
goto error;
- if((sid = H5Dget_space(did)) < 0)
+ if ((sid = H5Dget_space(did)) < 0)
goto error;
- if(H5Sget_simple_extent_dims(sid, dims, NULL) < 0)
+ if (H5Sget_simple_extent_dims(sid, dims, NULL) < 0)
goto error;
HDassert(100 == dims[0] && 100 == dims[1]);
/* Read some data */
- if(H5Dread(did, H5T_NATIVE_INT, sid, sid, dxpl_id, data_g) < 0)
+ if (H5Dread(did, H5T_NATIVE_INT, sid, sid, dxpl_id, data_g) < 0)
goto error;
- for(i = 0; i < dims[0]; i++) {
- for(j = 0; j < dims[1]; j++) {
+ for (i = 0; i < dims[0]; i++) {
+ for (j = 0; j < dims[1]; j++) {
val = (int)(i + (i * j) + j);
- if(data_g[i][j] != val) {
+ if (data_g[i][j] != val) {
H5_FAILED();
HDprintf(" data_g[%lu][%lu] = %d\n", (unsigned long)i, (unsigned long)j, data_g[i][j]);
HDprintf(" should be %d\n", val);
@@ -90,38 +86,40 @@ check_test_file(char* name, hid_t fapl_id)
}
/* Open some groups */
- if((top_level_gid = H5Gopen2(fid, "some_groups", H5P_DEFAULT)) < 0)
+ if ((top_level_gid = H5Gopen2(fid, "some_groups", H5P_DEFAULT)) < 0)
goto error;
- for(i = 0; i < N_GROUPS; i++) {
+ for (i = 0; i < N_GROUPS; i++) {
HDsprintf(name, "grp%02u", (unsigned)i);
- if((gid = H5Gopen2(top_level_gid, name, H5P_DEFAULT)) < 0)
+ if ((gid = H5Gopen2(top_level_gid, name, H5P_DEFAULT)) < 0)
goto error;
- if(H5Gclose(gid) < 0)
+ if (H5Gclose(gid) < 0)
goto error;
}
- if(H5Gclose(top_level_gid) < 0)
+ if (H5Gclose(top_level_gid) < 0)
goto error;
- if(H5Dclose(did) < 0)
+ if (H5Dclose(did) < 0)
goto error;
- if(H5Fclose(fid) < 0)
+ if (H5Fclose(fid) < 0)
goto error;
- if(H5Pclose(dxpl_id) < 0)
+ if (H5Pclose(dxpl_id) < 0)
goto error;
- if(H5Sclose(sid) < 0)
+ if (H5Sclose(sid) < 0)
goto error;
return SUCCEED;
error:
- H5E_BEGIN_TRY {
+ H5E_BEGIN_TRY
+ {
H5Pclose(dxpl_id);
H5Gclose(top_level_gid);
H5Dclose(did);
H5Fclose(fid);
H5Sclose(sid);
H5Gclose(gid);
- } H5E_END_TRY;
+ }
+ H5E_END_TRY;
return FAIL;
} /* end check_test_file() */
@@ -140,31 +138,31 @@ error:
int
main(int argc, char *argv[])
{
- hid_t fapl_id1 = H5I_INVALID_HID;
- hid_t fapl_id2 = H5I_INVALID_HID;
+ hid_t fapl_id1 = H5I_INVALID_HID;
+ hid_t fapl_id2 = H5I_INVALID_HID;
H5E_auto2_t func;
char name[1024];
const char *envval = NULL;
- int mpi_size;
- int mpi_rank;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
+ int mpi_size;
+ int mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
MPI_Comm_rank(comm, &mpi_rank);
- if(mpi_rank == 0)
+ if (mpi_rank == 0)
TESTING("H5Fflush (part2 with flush)");
/* Don't run using the split VFD */
envval = HDgetenv("HDF5_DRIVER");
- if(envval == NULL)
+ if (envval == NULL)
envval = "nomatch";
- if(!HDstrcmp(envval, "split")) {
- if(mpi_rank == 0) {
+ if (!HDstrcmp(envval, "split")) {
+ if (mpi_rank == 0) {
SKIPPED();
HDputs(" Test not compatible with current Virtual File Driver");
}
@@ -172,37 +170,37 @@ main(int argc, char *argv[])
HDexit(EXIT_FAILURE);
}
- if((fapl_id1 = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ if ((fapl_id1 = H5Pcreate(H5P_FILE_ACCESS)) < 0)
goto error;
- if(H5Pset_fapl_mpio(fapl_id1, comm, info) < 0)
+ if (H5Pset_fapl_mpio(fapl_id1, comm, info) < 0)
goto error;
- if((fapl_id2 = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ if ((fapl_id2 = H5Pcreate(H5P_FILE_ACCESS)) < 0)
goto error;
- if(H5Pset_fapl_mpio(fapl_id2, comm, info) < 0)
+ if (H5Pset_fapl_mpio(fapl_id2, comm, info) < 0)
goto error;
/* Check the case where the file was flushed */
h5_fixname(FILENAME[0], fapl_id1, name, sizeof(name));
- if(check_test_file(name, fapl_id1)) {
+ if (check_test_file(name, fapl_id1)) {
H5_FAILED()
goto error;
}
- else if(mpi_rank == 0) {
+ else if (mpi_rank == 0) {
PASSED();
}
/* Check the case where the file was not flushed. This should give an error
* so we turn off the error stack temporarily.
*/
- if(mpi_rank == 0)
+ if (mpi_rank == 0)
TESTING("H5Fflush (part2 without flush)");
- H5Eget_auto2(H5E_DEFAULT,&func, NULL);
+ H5Eget_auto2(H5E_DEFAULT, &func, NULL);
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
h5_fixname(FILENAME[1], fapl_id2, name, sizeof(name));
- if(check_test_file(name, fapl_id2)) {
- if(mpi_rank == 0)
+ if (check_test_file(name, fapl_id2)) {
+ if (mpi_rank == 0)
PASSED();
}
else {
@@ -222,4 +220,3 @@ main(int argc, char *argv[])
error:
HDexit(EXIT_FAILURE);
} /* end main() */
-
diff --git a/testpar/t_ph5basic.c b/testpar/t_ph5basic.c
index 73d262e..6b2bfe3 100644
--- a/testpar/t_ph5basic.c
+++ b/testpar/t_ph5basic.c
@@ -17,7 +17,6 @@
#include "testphdf5.h"
-
/*-------------------------------------------------------------------------
* Function: test_fapl_mpio_dup
*
@@ -36,23 +35,23 @@
void
test_fapl_mpio_dup(void)
{
- int mpi_size, mpi_rank;
+ int mpi_size, mpi_rank;
MPI_Comm comm, comm_tmp;
- int mpi_size_old, mpi_rank_old;
- int mpi_size_tmp, mpi_rank_tmp;
- MPI_Info info = MPI_INFO_NULL;
+ int mpi_size_old, mpi_rank_old;
+ int mpi_size_tmp, mpi_rank_tmp;
+ MPI_Info info = MPI_INFO_NULL;
MPI_Info info_tmp = MPI_INFO_NULL;
- int mrc; /* MPI return value */
- hid_t acc_pl; /* File access properties */
- herr_t ret; /* HDF5 return value */
- int nkeys, nkeys_tmp;
+ int mrc; /* MPI return value */
+ hid_t acc_pl; /* File access properties */
+ herr_t ret; /* HDF5 return value */
+ int nkeys, nkeys_tmp;
if (VERBOSE_MED)
HDprintf("Verify fapl_mpio duplicates communicator and INFO objects\n");
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
if (VERBOSE_MED)
HDprintf("rank/size of MPI_COMM_WORLD are %d/%d\n", mpi_rank, mpi_size);
@@ -148,8 +147,7 @@ test_fapl_mpio_dup(void)
MPI_Comm_size(comm_tmp, &mpi_size_tmp);
MPI_Comm_rank(comm_tmp, &mpi_rank_tmp);
if (VERBOSE_MED)
- HDprintf("After second H5Pget_fapl_mpio: rank/size of comm are %d/%d\n",
- mpi_rank_tmp, mpi_size_tmp);
+ HDprintf("After second H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp);
VRFY((mpi_size_tmp == mpi_size), "MPI_Comm_size");
VRFY((mpi_rank_tmp == mpi_rank), "MPI_Comm_rank");
if (MPI_INFO_NULL != info_tmp) {
@@ -168,8 +166,7 @@ test_fapl_mpio_dup(void)
MPI_Comm_size(comm_tmp, &mpi_size_tmp);
MPI_Comm_rank(comm_tmp, &mpi_rank_tmp);
if (VERBOSE_MED)
- HDprintf("After Property list closed: rank/size of comm are %d/%d\n",
- mpi_rank_tmp, mpi_size_tmp);
+ HDprintf("After Property list closed: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp);
if (MPI_INFO_NULL != info_tmp) {
mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys");
@@ -185,4 +182,3 @@ test_fapl_mpio_dup(void)
VRFY((mrc == MPI_SUCCESS), "MPI_Info_free");
}
} /* end test_fapl_mpio_dup() */
-
diff --git a/testpar/t_pread.c b/testpar/t_pread.c
index ba4165e..78659d4 100644
--- a/testpar/t_pread.c
+++ b/testpar/t_pread.c
@@ -27,24 +27,20 @@
* or to read and validate.
*/
#define NFILENAME 3
-const char *FILENAMES[NFILENAME + 1]={"reloc_t_pread_data_file",
- "reloc_t_pread_group_0_file",
- "reloc_t_pread_group_1_file",
- NULL};
+const char *FILENAMES[NFILENAME + 1] = {"reloc_t_pread_data_file", "reloc_t_pread_group_0_file",
+ "reloc_t_pread_group_1_file", NULL};
#define FILENAME_BUF_SIZE 1024
#define COUNT 1000
#define LIMIT_NPROC 6
-hbool_t pass = true;
-static const char *random_hdf5_text =
-"Now is the time for all first-time-users of HDF5 to read their \
+hbool_t pass = true;
+static const char *random_hdf5_text = "Now is the time for all first-time-users of HDF5 to read their \
manual or go thru the tutorials!\n\
While you\'re at it, now is also the time to read up on MPI-IO.";
-static const char *hitchhiker_quote =
-"A common mistake that people make when trying to design something\n\
+static const char *hitchhiker_quote = "A common mistake that people make when trying to design something\n\
completely foolproof is to underestimate the ingenuity of complete\n\
fools.\n";
@@ -53,7 +49,6 @@ static int test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int gro
static char *test_argv0 = NULL;
-
/*-------------------------------------------------------------------------
* Function: generate_test_file
*
@@ -88,50 +83,50 @@ static char *test_argv0 = NULL;
*-------------------------------------------------------------------------
*/
static int
-generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
+generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
{
- int header = -1;
- const char *fcn_name = "generate_test_file()";
- const char *failure_mssg = NULL;
+ int header = -1;
+ const char *fcn_name = "generate_test_file()";
+ const char *failure_mssg = NULL;
const char *group_filename = NULL;
- char data_filename[FILENAME_BUF_SIZE];
- int file_index = 0;
- int group_size;
- int group_rank;
- int local_failure = 0;
- int global_failures = 0;
- hsize_t count = COUNT;
- hsize_t i;
- hsize_t offset;
- hsize_t dims[1] = {0};
- hid_t file_id = -1;
- hid_t memspace = -1;
- hid_t filespace = -1;
- hid_t fctmpl = -1;
- hid_t fapl_id = -1;
- hid_t dxpl_id = -1;
- hid_t dset_id = -1;
- hid_t dset_id_ch = -1;
- hid_t dcpl_id = H5P_DEFAULT;
- hsize_t chunk[1];
- float nextValue;
- float *data_slice = NULL;
+ char data_filename[FILENAME_BUF_SIZE];
+ int file_index = 0;
+ int group_size;
+ int group_rank;
+ int local_failure = 0;
+ int global_failures = 0;
+ hsize_t count = COUNT;
+ hsize_t i;
+ hsize_t offset;
+ hsize_t dims[1] = {0};
+ hid_t file_id = -1;
+ hid_t memspace = -1;
+ hid_t filespace = -1;
+ hid_t fctmpl = -1;
+ hid_t fapl_id = -1;
+ hid_t dxpl_id = -1;
+ hid_t dset_id = -1;
+ hid_t dset_id_ch = -1;
+ hid_t dcpl_id = H5P_DEFAULT;
+ hsize_t chunk[1];
+ float nextValue;
+ float * data_slice = NULL;
pass = true;
HDassert(comm != MPI_COMM_NULL);
- if ( (MPI_Comm_rank(comm, &group_rank)) != MPI_SUCCESS) {
- pass = FALSE;
+ if ((MPI_Comm_rank(comm, &group_rank)) != MPI_SUCCESS) {
+ pass = FALSE;
failure_mssg = "generate_test_file: MPI_Comm_rank failed.\n";
}
- if ( (MPI_Comm_size(comm, &group_size)) != MPI_SUCCESS) {
- pass = FALSE;
+ if ((MPI_Comm_size(comm, &group_size)) != MPI_SUCCESS) {
+ pass = FALSE;
failure_mssg = "generate_test_file: MPI_Comm_size failed.\n";
}
- if ( mpi_rank == 0 ) {
+ if (mpi_rank == 0) {
HDfprintf(stdout, "Constructing test files...");
}
@@ -143,14 +138,14 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
* file construction. The reading portion of the test
* will have a similar setup process...
*/
- if ( pass ) {
- if ( comm == MPI_COMM_WORLD ) { /* Test 1 */
+ if (pass) {
+ if (comm == MPI_COMM_WORLD) { /* Test 1 */
file_index = 0;
}
- else if ( group_id == 0 ) { /* Test 2 group 0 */
+ else if (group_id == 0) { /* Test 2 group 0 */
file_index = 1;
}
- else { /* Test 2 group 1 */
+ else { /* Test 2 group 1 */
file_index = 2;
}
@@ -159,220 +154,212 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
* need to worry that we reassign it for each file!
*/
group_filename = FILENAMES[file_index];
- HDassert( group_filename );
+ HDassert(group_filename);
/* Assign the 'data_filename' */
- if ( h5_fixname(group_filename, H5P_DEFAULT, data_filename,
- sizeof(data_filename)) == NULL ) {
- pass = FALSE;
+ if (h5_fixname(group_filename, H5P_DEFAULT, data_filename, sizeof(data_filename)) == NULL) {
+ pass = FALSE;
failure_mssg = "h5_fixname(0) failed.\n";
}
}
/* setup data to write */
- if ( pass ) {
- if ( (data_slice = (float *)HDmalloc(COUNT * sizeof(float))) == NULL ) {
- pass = FALSE;
+ if (pass) {
+ if ((data_slice = (float *)HDmalloc(COUNT * sizeof(float))) == NULL) {
+ pass = FALSE;
failure_mssg = "malloc of data_slice failed.\n";
}
}
- if ( pass ) {
+ if (pass) {
nextValue = (float)(mpi_rank * COUNT);
- for(i=0; i<COUNT; i++) {
+ for (i = 0; i < COUNT; i++) {
data_slice[i] = nextValue;
nextValue += 1;
}
}
- /* Initialize a file creation template */
- if (pass) {
- if ((fctmpl = H5Pcreate(H5P_FILE_CREATE)) < 0) {
- pass = FALSE;
- failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed.\n";
- }
- else if (H5Pset_userblock(fctmpl, 512) != SUCCEED) {
- pass = FALSE;
- failure_mssg = "H5Pset_userblock(,size) failed.\n";
- }
- }
+ /* Initialize a file creation template */
+ if (pass) {
+ if ((fctmpl = H5Pcreate(H5P_FILE_CREATE)) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed.\n";
+ }
+ else if (H5Pset_userblock(fctmpl, 512) != SUCCEED) {
+ pass = FALSE;
+ failure_mssg = "H5Pset_userblock(,size) failed.\n";
+ }
+ }
/* setup FAPL */
- if ( pass ) {
- if ( (fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0 ) {
- pass = FALSE;
+ if (pass) {
+ if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) {
+ pass = FALSE;
failure_mssg = "H5Pcreate(H5P_FILE_ACCESS) failed.\n";
}
}
- if ( pass ) {
- if ( (H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL)) < 0 ) {
- pass = FALSE;
+ if (pass) {
+ if ((H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL)) < 0) {
+ pass = FALSE;
failure_mssg = "H5Pset_fapl_mpio() failed\n";
}
}
/* create the data file */
- if ( pass ) {
- if ( (file_id = H5Fcreate(data_filename, H5F_ACC_TRUNC,
- fctmpl, fapl_id)) < 0 ) {
- pass = FALSE;
+ if (pass) {
+ if ((file_id = H5Fcreate(data_filename, H5F_ACC_TRUNC, fctmpl, fapl_id)) < 0) {
+ pass = FALSE;
failure_mssg = "H5Fcreate() failed.\n";
}
}
/* create and write the dataset */
- if ( pass ) {
- if ( (dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0 ) {
- pass = FALSE;
+ if (pass) {
+ if ((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0) {
+ pass = FALSE;
failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.\n";
}
}
- if ( pass ) {
- if ( (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)) < 0 ) {
- pass = FALSE;
+ if (pass) {
+ if ((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)) < 0) {
+ pass = FALSE;
failure_mssg = "H5Pset_dxpl_mpio() failed.\n";
}
}
- if ( pass ) {
+ if (pass) {
dims[0] = COUNT;
- if ( (memspace = H5Screate_simple(1, dims, NULL)) < 0 ) {
- pass = FALSE;
+ if ((memspace = H5Screate_simple(1, dims, NULL)) < 0) {
+ pass = FALSE;
failure_mssg = "H5Screate_simple(1, dims, NULL) failed (1).\n";
}
}
- if ( pass ) {
+ if (pass) {
dims[0] *= (hsize_t)group_size;
- if ( (filespace = H5Screate_simple(1, dims, NULL)) < 0 ) {
- pass = FALSE;
+ if ((filespace = H5Screate_simple(1, dims, NULL)) < 0) {
+ pass = FALSE;
failure_mssg = "H5Screate_simple(1, dims, NULL) failed (2).\n";
}
}
- if ( pass ) {
+ if (pass) {
offset = (hsize_t)group_rank * (hsize_t)COUNT;
- if ( (H5Sselect_hyperslab(filespace, H5S_SELECT_SET, &offset,
- NULL, &count, NULL)) < 0 ) {
- pass = FALSE;
+ if ((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, &offset, NULL, &count, NULL)) < 0) {
+ pass = FALSE;
failure_mssg = "H5Sselect_hyperslab() failed.\n";
}
}
- if ( pass ) {
- if ( (dset_id = H5Dcreate2(file_id, "dataset0", H5T_NATIVE_FLOAT,
- filespace, H5P_DEFAULT, H5P_DEFAULT,
- H5P_DEFAULT)) < 0 ) {
- pass = false;
+ if (pass) {
+ if ((dset_id = H5Dcreate2(file_id, "dataset0", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ pass = false;
failure_mssg = "H5Dcreate2() failed.\n";
}
}
- if ( pass ) {
- if ( (H5Dwrite(dset_id, H5T_NATIVE_FLOAT, memspace,
- filespace, dxpl_id, data_slice)) < 0 ) {
- pass = false;
+ if (pass) {
+ if ((H5Dwrite(dset_id, H5T_NATIVE_FLOAT, memspace, filespace, dxpl_id, data_slice)) < 0) {
+ pass = false;
failure_mssg = "H5Dwrite() failed.\n";
}
}
-
/* create a chunked dataset */
- chunk[0] = COUNT/8;
+ chunk[0] = COUNT / 8;
- if ( pass ) {
- if ( (dcpl_id = H5Pcreate (H5P_DATASET_CREATE)) < 0 ) {
- pass = false;
- failure_mssg = "H5Pcreate() failed.\n";
- }
+ if (pass) {
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ pass = false;
+ failure_mssg = "H5Pcreate() failed.\n";
+ }
}
- if ( pass ) {
- if ( (H5Pset_chunk (dcpl_id, 1, chunk) ) < 0 ) {
- pass = false;
- failure_mssg = "H5Pset_chunk() failed.\n";
- }
+ if (pass) {
+ if ((H5Pset_chunk(dcpl_id, 1, chunk)) < 0) {
+ pass = false;
+ failure_mssg = "H5Pset_chunk() failed.\n";
+ }
}
- if ( pass ) {
+ if (pass) {
- if ( (dset_id_ch = H5Dcreate2(file_id, "dataset0_chunked", H5T_NATIVE_FLOAT,
- filespace, H5P_DEFAULT, dcpl_id,
- H5P_DEFAULT)) < 0 ) {
- pass = false;
- failure_mssg = "H5Dcreate2() failed.\n";
- }
+ if ((dset_id_ch = H5Dcreate2(file_id, "dataset0_chunked", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT,
+ dcpl_id, H5P_DEFAULT)) < 0) {
+ pass = false;
+ failure_mssg = "H5Dcreate2() failed.\n";
+ }
}
- if ( pass ) {
- if ( (H5Dwrite(dset_id_ch, H5T_NATIVE_FLOAT, memspace,
- filespace, dxpl_id, data_slice)) < 0 ) {
- pass = false;
- failure_mssg = "H5Dwrite() failed.\n";
- }
+ if (pass) {
+ if ((H5Dwrite(dset_id_ch, H5T_NATIVE_FLOAT, memspace, filespace, dxpl_id, data_slice)) < 0) {
+ pass = false;
+ failure_mssg = "H5Dwrite() failed.\n";
+ }
}
- if ( pass || (dcpl_id != -1)) {
- if ( H5Pclose(dcpl_id) < 0 ) {
- pass = false;
- failure_mssg = "H5Pclose(dcpl_id) failed.\n";
- }
+ if (pass || (dcpl_id != -1)) {
+ if (H5Pclose(dcpl_id) < 0) {
+ pass = false;
+ failure_mssg = "H5Pclose(dcpl_id) failed.\n";
+ }
}
- if ( pass || (dset_id_ch != -1)) {
- if ( H5Dclose(dset_id_ch) < 0 ) {
- pass = false;
- failure_mssg = "H5Dclose(dset_id_ch) failed.\n";
- }
+ if (pass || (dset_id_ch != -1)) {
+ if (H5Dclose(dset_id_ch) < 0) {
+ pass = false;
+ failure_mssg = "H5Dclose(dset_id_ch) failed.\n";
+ }
}
/* close file, etc. */
- if ( pass || (dset_id != -1)) {
- if ( H5Dclose(dset_id) < 0 ) {
- pass = false;
+ if (pass || (dset_id != -1)) {
+ if (H5Dclose(dset_id) < 0) {
+ pass = false;
failure_mssg = "H5Dclose(dset_id) failed.\n";
}
}
- if ( pass || (memspace != -1) ) {
- if ( H5Sclose(memspace) < 0 ) {
- pass = false;
+ if (pass || (memspace != -1)) {
+ if (H5Sclose(memspace) < 0) {
+ pass = false;
failure_mssg = "H5Sclose(memspace) failed.\n";
}
}
- if ( pass || (filespace != -1) ) {
- if ( H5Sclose(filespace) < 0 ) {
- pass = false;
+ if (pass || (filespace != -1)) {
+ if (H5Sclose(filespace) < 0) {
+ pass = false;
failure_mssg = "H5Sclose(filespace) failed.\n";
}
}
- if ( pass || (file_id != -1) ) {
- if ( H5Fclose(file_id) < 0 ) {
- pass = false;
+ if (pass || (file_id != -1)) {
+ if (H5Fclose(file_id) < 0) {
+ pass = false;
failure_mssg = "H5Fclose(file_id) failed.\n";
}
}
- if ( pass || (dxpl_id != -1) ) {
- if ( H5Pclose(dxpl_id) < 0 ) {
- pass = false;
+ if (pass || (dxpl_id != -1)) {
+ if (H5Pclose(dxpl_id) < 0) {
+ pass = false;
failure_mssg = "H5Pclose(dxpl_id) failed.\n";
}
}
- if ( pass || (fapl_id != -1) ) {
- if ( H5Pclose(fapl_id) < 0 ) {
- pass = false;
+ if (pass || (fapl_id != -1)) {
+ if (H5Pclose(fapl_id) < 0) {
+ pass = false;
failure_mssg = "H5Pclose(fapl_id) failed.\n";
}
}
if (pass || (fctmpl != -1)) {
if (H5Pclose(fctmpl) < 0) {
- pass = false;
+ pass = false;
failure_mssg = "H5Pclose(fctmpl) failed.\n";
}
}
@@ -392,7 +379,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
*/
if (group_rank == 0) {
const char *text_to_write;
- size_t bytes_to_write;
+ size_t bytes_to_write;
if (group_id == 0)
text_to_write = random_hdf5_text;
@@ -402,8 +389,8 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
bytes_to_write = HDstrlen(text_to_write);
if (pass) {
- if ((header = HDopen(data_filename, O_WRONLY)) < 0) {
- pass = FALSE;
+ if ((header = HDopen(data_filename, O_WRONLY)) < 0) {
+ pass = FALSE;
failure_mssg = "HDopen(data_filename, O_WRONLY) failed.\n";
}
}
@@ -411,14 +398,14 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
if (pass) {
HDlseek(header, 0, SEEK_SET);
if (HDwrite(header, text_to_write, bytes_to_write) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "Unable to write user text into file.\n";
- }
+ }
}
if (pass || (header > 0)) {
if (HDclose(header) < 0) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "HDclose() failed.\n";
}
}
@@ -428,42 +415,41 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
* Only overwrite the failure message if no previous error
* has been detected
*/
- local_failure = ( pass ? 0 : 1 );
+ local_failure = (pass ? 0 : 1);
/* This is a global all reduce (NOT group specific) */
- if ( MPI_Allreduce(&local_failure, &global_failures, 1,
- MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS ) {
- if ( pass ) {
- pass = FALSE;
+ if (MPI_Allreduce(&local_failure, &global_failures, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS) {
+ if (pass) {
+ pass = FALSE;
failure_mssg = "MPI_Allreduce() failed.\n";
}
- } else if ( ( pass ) && ( global_failures > 0 ) ) {
- pass = FALSE;
+ }
+ else if ((pass) && (global_failures > 0)) {
+ pass = FALSE;
failure_mssg = "One or more processes report failure.\n";
}
/* report results */
- if ( mpi_rank == 0 ) {
- if ( pass ) {
+ if (mpi_rank == 0) {
+ if (pass) {
HDfprintf(stdout, "Done.\n");
- } else {
+ }
+ else {
HDfprintf(stdout, "FAILED.\n");
- HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n",
- fcn_name, failure_mssg);
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
}
/* free data_slice if it has been allocated */
- if ( data_slice != NULL ) {
+ if (data_slice != NULL) {
HDfree(data_slice);
data_slice = NULL;
}
- return(! pass);
+ return (!pass);
} /* generate_test_file() */
-
/*-------------------------------------------------------------------------
* Function: test_parallel_read
*
@@ -506,46 +492,46 @@ static int
test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
{
const char *failure_mssg;
- const char *fcn_name = "test_parallel_read()";
+ const char *fcn_name = "test_parallel_read()";
const char *group_filename = NULL;
- char reloc_data_filename[FILENAME_BUF_SIZE];
- int local_failure = 0;
- int global_failures = 0;
- int group_size;
- int group_rank;
- hid_t fapl_id = -1;
- hid_t file_id = -1;
- hid_t dset_id = -1;
- hid_t dset_id_ch = -1;
- hid_t dxpl_id = H5P_DEFAULT;
- hid_t memspace = -1;
- hid_t filespace = -1;
- hid_t filetype = -1;
- size_t filetype_size;
- hssize_t dset_size;
- hsize_t i;
- hsize_t offset;
- hsize_t count = COUNT;
- hsize_t dims[1] = {0};
- float nextValue;
- float *data_slice = NULL;
+ char reloc_data_filename[FILENAME_BUF_SIZE];
+ int local_failure = 0;
+ int global_failures = 0;
+ int group_size;
+ int group_rank;
+ hid_t fapl_id = -1;
+ hid_t file_id = -1;
+ hid_t dset_id = -1;
+ hid_t dset_id_ch = -1;
+ hid_t dxpl_id = H5P_DEFAULT;
+ hid_t memspace = -1;
+ hid_t filespace = -1;
+ hid_t filetype = -1;
+ size_t filetype_size;
+ hssize_t dset_size;
+ hsize_t i;
+ hsize_t offset;
+ hsize_t count = COUNT;
+ hsize_t dims[1] = {0};
+ float nextValue;
+ float * data_slice = NULL;
pass = TRUE;
HDassert(comm != MPI_COMM_NULL);
- if ( (MPI_Comm_rank(comm, &group_rank)) != MPI_SUCCESS) {
- pass = FALSE;
+ if ((MPI_Comm_rank(comm, &group_rank)) != MPI_SUCCESS) {
+ pass = FALSE;
failure_mssg = "test_parallel_read: MPI_Comm_rank failed.\n";
}
- if ( (MPI_Comm_size(comm, &group_size)) != MPI_SUCCESS) {
- pass = FALSE;
+ if ((MPI_Comm_size(comm, &group_size)) != MPI_SUCCESS) {
+ pass = FALSE;
failure_mssg = "test_parallel_read: MPI_Comm_size failed.\n";
}
- if ( mpi_rank == 0 ) {
- if ( comm == MPI_COMM_WORLD ) {
+ if (mpi_rank == 0) {
+ if (comm == MPI_COMM_WORLD) {
TESTING("parallel file open test 1");
}
else {
@@ -554,123 +540,118 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
}
/* allocate space for the data_slice array */
- if ( pass ) {
- if ( (data_slice = (float *)HDmalloc(COUNT * sizeof(float))) == NULL ) {
- pass = FALSE;
+ if (pass) {
+ if ((data_slice = (float *)HDmalloc(COUNT * sizeof(float))) == NULL) {
+ pass = FALSE;
failure_mssg = "malloc of data_slice failed.\n";
}
}
-
/* Select the file file name to read
* Please see the comments in the 'generate_test_file' function
* for more details...
*/
- if ( pass ) {
+ if (pass) {
- if ( comm == MPI_COMM_WORLD ) /* test 1 */
+ if (comm == MPI_COMM_WORLD) /* test 1 */
group_filename = FILENAMES[0];
- else if ( group_id == 0 ) /* test 2 group 0 */
+ else if (group_id == 0) /* test 2 group 0 */
group_filename = FILENAMES[1];
- else /* test 2 group 1 */
+ else /* test 2 group 1 */
group_filename = FILENAMES[2];
HDassert(group_filename);
- if ( h5_fixname(group_filename, H5P_DEFAULT, reloc_data_filename,
- sizeof(reloc_data_filename)) == NULL ) {
+ if (h5_fixname(group_filename, H5P_DEFAULT, reloc_data_filename, sizeof(reloc_data_filename)) ==
+ NULL) {
- pass = FALSE;
+ pass = FALSE;
failure_mssg = "h5_fixname(1) failed.\n";
}
}
/* setup FAPL */
- if ( pass ) {
- if ( (fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0 ) {
- pass = FALSE;
+ if (pass) {
+ if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) {
+ pass = FALSE;
failure_mssg = "H5Pcreate(H5P_FILE_ACCESS) failed.\n";
}
}
- if ( pass ) {
- if ( (H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL)) < 0 ) {
- pass = FALSE;
+ if (pass) {
+ if ((H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL)) < 0) {
+ pass = FALSE;
failure_mssg = "H5Pset_fapl_mpio() failed\n";
}
}
/* open the file -- should have user block, exercising the optimization */
- if ( pass ) {
- if ( (file_id = H5Fopen(reloc_data_filename,
- H5F_ACC_RDONLY, fapl_id)) < 0 ) {
- pass = FALSE;
+ if (pass) {
+ if ((file_id = H5Fopen(reloc_data_filename, H5F_ACC_RDONLY, fapl_id)) < 0) {
+ pass = FALSE;
failure_mssg = "H5Fopen() failed\n";
}
}
/* open the data set */
- if ( pass ) {
- if ( (dset_id = H5Dopen2(file_id, "dataset0", H5P_DEFAULT)) < 0 ) {
- pass = FALSE;
+ if (pass) {
+ if ((dset_id = H5Dopen2(file_id, "dataset0", H5P_DEFAULT)) < 0) {
+ pass = FALSE;
failure_mssg = "H5Dopen2() failed\n";
}
}
/* open the chunked data set */
- if ( pass ) {
- if ( (dset_id_ch = H5Dopen2(file_id, "dataset0_chunked", H5P_DEFAULT)) < 0 ) {
- pass = FALSE;
- failure_mssg = "H5Dopen2() failed\n";
- }
+ if (pass) {
+ if ((dset_id_ch = H5Dopen2(file_id, "dataset0_chunked", H5P_DEFAULT)) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Dopen2() failed\n";
+ }
}
/* setup memspace */
- if ( pass ) {
+ if (pass) {
dims[0] = count;
- if ( (memspace = H5Screate_simple(1, dims, NULL)) < 0 ) {
- pass = FALSE;
+ if ((memspace = H5Screate_simple(1, dims, NULL)) < 0) {
+ pass = FALSE;
failure_mssg = "H5Screate_simple(1, dims, NULL) failed\n";
}
}
/* setup filespace */
- if ( pass ) {
- if ( (filespace = H5Dget_space(dset_id)) < 0 ) {
- pass = FALSE;
+ if (pass) {
+ if ((filespace = H5Dget_space(dset_id)) < 0) {
+ pass = FALSE;
failure_mssg = "H5Dget_space(dataset) failed\n";
}
}
- if ( pass ) {
+ if (pass) {
offset = (hsize_t)group_rank * count;
- if ( (H5Sselect_hyperslab(filespace, H5S_SELECT_SET,
- &offset, NULL, &count, NULL)) < 0 ) {
- pass = FALSE;
+ if ((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, &offset, NULL, &count, NULL)) < 0) {
+ pass = FALSE;
failure_mssg = "H5Sselect_hyperslab() failed\n";
}
}
/* read this processes section of the data */
- if ( pass ) {
- if ( (H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace,
- filespace, H5P_DEFAULT, data_slice)) < 0 ) {
- pass = FALSE;
+ if (pass) {
+ if ((H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, data_slice)) < 0) {
+ pass = FALSE;
failure_mssg = "H5Dread() failed\n";
}
}
/* verify the data */
- if ( pass ) {
+ if (pass) {
nextValue = (float)((hsize_t)mpi_rank * count);
- i = 0;
- while ( ( pass ) && ( i < count ) ) {
+ i = 0;
+ while ((pass) && (i < count)) {
/* what we really want is data_slice[i] != nextValue --
* the following is a circumlocution to shut up the
* the compiler.
*/
- if ( ( data_slice[i] > nextValue ) ||
- ( data_slice[i] < nextValue ) ) {
- pass = FALSE;
+ if ((data_slice[i] > nextValue) || (data_slice[i] < nextValue)) {
+ pass = FALSE;
failure_mssg = "Unexpected dset contents.\n";
}
nextValue += 1;
@@ -678,22 +659,22 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
}
}
- if ( pass || (memspace != -1) ) {
- if ( H5Sclose(memspace) < 0 ) {
- pass = false;
+ if (pass || (memspace != -1)) {
+ if (H5Sclose(memspace) < 0) {
+ pass = false;
failure_mssg = "H5Sclose(memspace) failed.\n";
}
}
- if ( pass || (filespace != -1) ) {
- if ( H5Sclose(filespace) < 0 ) {
- pass = false;
+ if (pass || (filespace != -1)) {
+ if (H5Sclose(filespace) < 0) {
+ pass = false;
failure_mssg = "H5Sclose(filespace) failed.\n";
}
}
/* free data_slice if it has been allocated */
- if ( data_slice != NULL ) {
+ if (data_slice != NULL) {
HDfree(data_slice);
data_slice = NULL;
}
@@ -704,328 +685,321 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
/* Don't test with more than LIMIT_NPROC processes to avoid memory issues */
- if( group_size <= LIMIT_NPROC ) {
+ if (group_size <= LIMIT_NPROC) {
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- hbool_t prop_value;
+ hbool_t prop_value;
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
- if ( (filespace = H5Dget_space(dset_id )) < 0 ) {
- pass = FALSE;
- failure_mssg = "H5Dget_space failed.\n";
- }
+ if ((filespace = H5Dget_space(dset_id)) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Dget_space failed.\n";
+ }
- if ( (dset_size = H5Sget_simple_extent_npoints(filespace)) < 0 ) {
- pass = FALSE;
- failure_mssg = "H5Sget_simple_extent_npoints failed.\n";
- }
+ if ((dset_size = H5Sget_simple_extent_npoints(filespace)) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Sget_simple_extent_npoints failed.\n";
+ }
- if ( (filetype = H5Dget_type(dset_id)) < 0 ) {
- pass = FALSE;
- failure_mssg = "H5Dget_type failed.\n";
- }
+ if ((filetype = H5Dget_type(dset_id)) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Dget_type failed.\n";
+ }
- if ( (filetype_size = H5Tget_size(filetype)) == 0 ) {
- pass = FALSE;
- failure_mssg = "H5Tget_size failed.\n";
- }
+ if ((filetype_size = H5Tget_size(filetype)) == 0) {
+ pass = FALSE;
+ failure_mssg = "H5Tget_size failed.\n";
+ }
- if ( H5Tclose(filetype) < 0 ) {
- pass = FALSE;
- failure_mssg = "H5Tclose failed.\n";
- };
+ if (H5Tclose(filetype) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Tclose failed.\n";
+ };
- if ( (data_slice = (float *)HDmalloc((size_t)dset_size*filetype_size)) == NULL ) {
- pass = FALSE;
- failure_mssg = "malloc of data_slice failed.\n";
- }
+ if ((data_slice = (float *)HDmalloc((size_t)dset_size * filetype_size)) == NULL) {
+ pass = FALSE;
+ failure_mssg = "malloc of data_slice failed.\n";
+ }
- if ( pass ) {
- if ( (dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0 ) {
- pass = FALSE;
- failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.\n";
+ if (pass) {
+ if ((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.\n";
+ }
}
- }
- if ( pass ) {
- if ( (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)) < 0 ) {
- pass = FALSE;
- failure_mssg = "H5Pset_dxpl_mpio() failed.\n";
+ if (pass) {
+ if ((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pset_dxpl_mpio() failed.\n";
+ }
}
- }
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- if ( pass ) {
- prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
- if(H5Pinsert2(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value,
- NULL, NULL, NULL, NULL, NULL, NULL) < 0) {
- pass = FALSE;
- failure_mssg = "H5Pinsert2() failed\n";
- }
- }
+ if (pass) {
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ if (H5Pinsert2(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pinsert2() failed\n";
+ }
+ }
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
- /* read H5S_ALL section */
- if ( pass ) {
- if ( (H5Dread(dset_id, H5T_NATIVE_FLOAT, H5S_ALL,
- H5S_ALL, dxpl_id, data_slice)) < 0 ) {
- pass = FALSE;
- failure_mssg = "H5Dread() failed\n";
+ /* read H5S_ALL section */
+ if (pass) {
+ if ((H5Dread(dset_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, dxpl_id, data_slice)) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Dread() failed\n";
+ }
}
- }
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- if ( pass ) {
- prop_value = FALSE;
- if(H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
- pass = FALSE;
- failure_mssg = "H5Pget() failed\n";
- }
if (pass) {
- if(prop_value != TRUE) {
- pass = FALSE;
- failure_mssg = "rank 0 Bcast optimization was mistakenly not performed\n";
- }
+ prop_value = FALSE;
+ if (H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pget() failed\n";
+ }
+ if (pass) {
+ if (prop_value != TRUE) {
+ pass = FALSE;
+ failure_mssg = "rank 0 Bcast optimization was mistakenly not performed\n";
+ }
+ }
}
- }
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
- /* verify the data */
- if ( pass ) {
-
- if ( comm == MPI_COMM_WORLD ) /* test 1 */
- nextValue = 0;
- else if ( group_id == 0 ) /* test 2 group 0 */
- nextValue = 0;
- else /* test 2 group 1 */
- nextValue = (float)((hsize_t)( mpi_size / 2 )*count);
-
- i = 0;
- while ( ( pass ) && ( i < (hsize_t)dset_size ) ) {
- /* what we really want is data_slice[i] != nextValue --
- * the following is a circumlocution to shut up the
- * the compiler.
- */
- if ( ( data_slice[i] > nextValue ) ||
- ( data_slice[i] < nextValue ) ) {
- pass = FALSE;
- failure_mssg = "Unexpected dset contents.\n";
- }
- nextValue += 1;
- i++;
- }
- }
-
- /* read H5S_ALL section for the chunked dataset */
+ /* verify the data */
+ if (pass) {
+
+ if (comm == MPI_COMM_WORLD) /* test 1 */
+ nextValue = 0;
+ else if (group_id == 0) /* test 2 group 0 */
+ nextValue = 0;
+ else /* test 2 group 1 */
+ nextValue = (float)((hsize_t)(mpi_size / 2) * count);
+
+ i = 0;
+ while ((pass) && (i < (hsize_t)dset_size)) {
+ /* what we really want is data_slice[i] != nextValue --
+ * the following is a circumlocution to shut up the
+ * the compiler.
+ */
+ if ((data_slice[i] > nextValue) || (data_slice[i] < nextValue)) {
+ pass = FALSE;
+ failure_mssg = "Unexpected dset contents.\n";
+ }
+ nextValue += 1;
+ i++;
+ }
+ }
+
+ /* read H5S_ALL section for the chunked dataset */
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- if ( pass ) {
- prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
- if(H5Pset(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
- pass = FALSE;
- failure_mssg = "H5Pset() failed\n";
+ if (pass) {
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ if (H5Pset(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pset() failed\n";
+ }
}
- }
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
- for ( i = 0; i < (hsize_t)dset_size; i++) {
- data_slice[i] = 0;
- }
- if ( pass ) {
- if ( (H5Dread(dset_id_ch, H5T_NATIVE_FLOAT, H5S_ALL,
- H5S_ALL, dxpl_id, data_slice)) < 0 ) {
- pass = FALSE;
- failure_mssg = "H5Dread() failed\n";
+ for (i = 0; i < (hsize_t)dset_size; i++) {
+ data_slice[i] = 0;
+ }
+ if (pass) {
+ if ((H5Dread(dset_id_ch, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, dxpl_id, data_slice)) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Dread() failed\n";
+ }
}
- }
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- if ( pass ) {
- prop_value = FALSE;
- if(H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
- pass = FALSE;
- failure_mssg = "H5Pget() failed\n";
- }
if (pass) {
- if(prop_value == TRUE) {
- pass = FALSE;
- failure_mssg = "rank 0 Bcast optimization was mistakenly performed for chunked dataset\n";
- }
+ prop_value = FALSE;
+ if (H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pget() failed\n";
+ }
+ if (pass) {
+ if (prop_value == TRUE) {
+ pass = FALSE;
+ failure_mssg = "rank 0 Bcast optimization was mistakenly performed for chunked dataset\n";
+ }
+ }
}
- }
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
- /* verify the data */
- if ( pass ) {
-
- if ( comm == MPI_COMM_WORLD ) /* test 1 */
- nextValue = 0;
- else if ( group_id == 0 ) /* test 2 group 0 */
- nextValue = 0;
- else /* test 2 group 1 */
- nextValue = (float)((hsize_t)( mpi_size / 2 )*count);
-
- i = 0;
- while ( ( pass ) && ( i < (hsize_t)dset_size ) ) {
- /* what we really want is data_slice[i] != nextValue --
- * the following is a circumlocution to shut up the
- * the compiler.
- */
- if ( ( data_slice[i] > nextValue ) ||
- ( data_slice[i] < nextValue ) ) {
- pass = FALSE;
- failure_mssg = "Unexpected chunked dset contents.\n";
- }
- nextValue += 1;
- i++;
- }
- }
-
- if ( pass || (filespace != -1) ) {
- if ( H5Sclose(filespace) < 0 ) {
- pass = false;
- failure_mssg = "H5Sclose(filespace) failed.\n";
- }
- }
-
- /* free data_slice if it has been allocated */
- if ( data_slice != NULL ) {
- HDfree(data_slice);
- data_slice = NULL;
- }
-
- /*
- * Read an H5S_ALL filespace into a hyperslab defined memory space
- */
-
- if ( (data_slice = (float *)HDmalloc((size_t)(dset_size*2)*filetype_size)) == NULL ) {
- pass = FALSE;
- failure_mssg = "malloc of data_slice failed.\n";
- }
-
- /* setup memspace */
- if ( pass ) {
- dims[0] = (hsize_t)dset_size*2;
- if ( (memspace = H5Screate_simple(1, dims, NULL)) < 0 ) {
- pass = FALSE;
- failure_mssg = "H5Screate_simple(1, dims, NULL) failed\n";
- }
- }
- if ( pass ) {
- offset = (hsize_t)dset_size;
- if ( (H5Sselect_hyperslab(memspace, H5S_SELECT_SET,
- &offset, NULL, &offset, NULL)) < 0 ) {
- pass = FALSE;
- failure_mssg = "H5Sselect_hyperslab() failed\n";
- }
- }
+ /* verify the data */
+ if (pass) {
+
+ if (comm == MPI_COMM_WORLD) /* test 1 */
+ nextValue = 0;
+ else if (group_id == 0) /* test 2 group 0 */
+ nextValue = 0;
+ else /* test 2 group 1 */
+ nextValue = (float)((hsize_t)(mpi_size / 2) * count);
+
+ i = 0;
+ while ((pass) && (i < (hsize_t)dset_size)) {
+ /* what we really want is data_slice[i] != nextValue --
+ * the following is a circumlocution to shut up the
+ * the compiler.
+ */
+ if ((data_slice[i] > nextValue) || (data_slice[i] < nextValue)) {
+ pass = FALSE;
+ failure_mssg = "Unexpected chunked dset contents.\n";
+ }
+ nextValue += 1;
+ i++;
+ }
+ }
+
+ if (pass || (filespace != -1)) {
+ if (H5Sclose(filespace) < 0) {
+ pass = false;
+ failure_mssg = "H5Sclose(filespace) failed.\n";
+ }
+ }
+
+ /* free data_slice if it has been allocated */
+ if (data_slice != NULL) {
+ HDfree(data_slice);
+ data_slice = NULL;
+ }
+
+ /*
+ * Read an H5S_ALL filespace into a hyperslab defined memory space
+ */
+
+ if ((data_slice = (float *)HDmalloc((size_t)(dset_size * 2) * filetype_size)) == NULL) {
+ pass = FALSE;
+ failure_mssg = "malloc of data_slice failed.\n";
+ }
+
+ /* setup memspace */
+ if (pass) {
+ dims[0] = (hsize_t)dset_size * 2;
+ if ((memspace = H5Screate_simple(1, dims, NULL)) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple(1, dims, NULL) failed\n";
+ }
+ }
+ if (pass) {
+ offset = (hsize_t)dset_size;
+ if ((H5Sselect_hyperslab(memspace, H5S_SELECT_SET, &offset, NULL, &offset, NULL)) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Sselect_hyperslab() failed\n";
+ }
+ }
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- if ( pass ) {
- prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
- if(H5Pset(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
- pass = FALSE;
- failure_mssg = "H5Pset() failed\n";
+ if (pass) {
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ if (H5Pset(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pset() failed\n";
+ }
}
- }
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
- /* read this processes section of the data */
- if ( pass ) {
- if ( (H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace,
- H5S_ALL, dxpl_id, data_slice)) < 0 ) {
- pass = FALSE;
- failure_mssg = "H5Dread() failed\n";
+ /* read this processes section of the data */
+ if (pass) {
+ if ((H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace, H5S_ALL, dxpl_id, data_slice)) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Dread() failed\n";
+ }
}
- }
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- if ( pass ) {
- prop_value = FALSE;
- if(H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
- pass = FALSE;
- failure_mssg = "H5Pget() failed\n";
- }
if (pass) {
- if(prop_value != TRUE) {
- pass = FALSE;
- failure_mssg = "rank 0 Bcast optimization was mistakenly not performed\n";
- }
+ prop_value = FALSE;
+ if (H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pget() failed\n";
+ }
+ if (pass) {
+ if (prop_value != TRUE) {
+ pass = FALSE;
+ failure_mssg = "rank 0 Bcast optimization was mistakenly not performed\n";
+ }
+ }
}
- }
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
- /* verify the data */
- if ( pass ) {
-
- if ( comm == MPI_COMM_WORLD ) /* test 1 */
- nextValue = 0;
- else if ( group_id == 0 ) /* test 2 group 0 */
- nextValue = 0;
- else /* test 2 group 1 */
- nextValue = (float)((hsize_t)(mpi_size / 2)*count);
-
- i = (hsize_t)dset_size;
- while ( ( pass ) && ( i < (hsize_t)dset_size ) ) {
- /* what we really want is data_slice[i] != nextValue --
- * the following is a circumlocution to shut up the
- * the compiler.
- */
- if ( ( data_slice[i] > nextValue ) ||
- ( data_slice[i] < nextValue ) ) {
- pass = FALSE;
- failure_mssg = "Unexpected dset contents.\n";
- }
- nextValue += 1;
- i++;
- }
- }
-
- if ( pass || (memspace != -1) ) {
- if ( H5Sclose(memspace) < 0 ) {
- pass = false;
- failure_mssg = "H5Sclose(memspace) failed.\n";
- }
- }
-
- /* free data_slice if it has been allocated */
- if ( data_slice != NULL ) {
- HDfree(data_slice);
- data_slice = NULL;
- }
+ /* verify the data */
+ if (pass) {
+
+ if (comm == MPI_COMM_WORLD) /* test 1 */
+ nextValue = 0;
+ else if (group_id == 0) /* test 2 group 0 */
+ nextValue = 0;
+ else /* test 2 group 1 */
+ nextValue = (float)((hsize_t)(mpi_size / 2) * count);
+
+ i = (hsize_t)dset_size;
+ while ((pass) && (i < (hsize_t)dset_size)) {
+ /* what we really want is data_slice[i] != nextValue --
+ * the following is a circumlocution to shut up the
+ * the compiler.
+ */
+ if ((data_slice[i] > nextValue) || (data_slice[i] < nextValue)) {
+ pass = FALSE;
+ failure_mssg = "Unexpected dset contents.\n";
+ }
+ nextValue += 1;
+ i++;
+ }
+ }
- if ( pass || (dxpl_id != -1) ) {
- if ( H5Pclose(dxpl_id) < 0 ) {
- pass = false;
- failure_mssg = "H5Pclose(dxpl_id) failed.\n";
+ if (pass || (memspace != -1)) {
+ if (H5Sclose(memspace) < 0) {
+ pass = false;
+ failure_mssg = "H5Sclose(memspace) failed.\n";
+ }
+ }
+
+ /* free data_slice if it has been allocated */
+ if (data_slice != NULL) {
+ HDfree(data_slice);
+ data_slice = NULL;
+ }
+
+ if (pass || (dxpl_id != -1)) {
+ if (H5Pclose(dxpl_id) < 0) {
+ pass = false;
+ failure_mssg = "H5Pclose(dxpl_id) failed.\n";
+ }
}
- }
}
/* close file, etc. */
- if ( pass || (dset_id != -1) ) {
- if ( H5Dclose(dset_id) < 0 ) {
- pass = false;
+ if (pass || (dset_id != -1)) {
+ if (H5Dclose(dset_id) < 0) {
+ pass = false;
failure_mssg = "H5Dclose(dset_id) failed.\n";
}
}
- if ( pass || (dset_id_ch != -1) ) {
- if ( H5Dclose(dset_id_ch) < 0 ) {
- pass = false;
+ if (pass || (dset_id_ch != -1)) {
+ if (H5Dclose(dset_id_ch) < 0) {
+ pass = false;
failure_mssg = "H5Dclose(dset_id_ch) failed.\n";
}
}
- if ( pass || (file_id != -1) ) {
- if ( H5Fclose(file_id) < 0 ) {
- pass = false;
+ if (pass || (file_id != -1)) {
+ if (H5Fclose(file_id) < 0) {
+ pass = false;
failure_mssg = "H5Fclose(file_id) failed.\n";
}
}
- if ( pass || (fapl_id != -1) ) {
- if ( H5Pclose(fapl_id) < 0 ) {
- pass = false;
+ if (pass || (fapl_id != -1)) {
+ if (H5Pclose(fapl_id) < 0) {
+ pass = false;
failure_mssg = "H5Pclose(fapl_id) failed.\n";
}
}
@@ -1034,36 +1008,35 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
* Only overwrite the failure message if no previous error
* has been detected
*/
- local_failure = ( pass ? 0 : 1 );
+ local_failure = (pass ? 0 : 1);
- if ( MPI_Allreduce( &local_failure, &global_failures, 1,
- MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS ) {
- if ( pass ) {
- pass = FALSE;
+ if (MPI_Allreduce(&local_failure, &global_failures, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS) {
+ if (pass) {
+ pass = FALSE;
failure_mssg = "MPI_Allreduce() failed.\n";
}
- } else if ( ( pass ) && ( global_failures > 0 ) ) {
- pass = FALSE;
+ }
+ else if ((pass) && (global_failures > 0)) {
+ pass = FALSE;
failure_mssg = "One or more processes report failure.\n";
}
/* report results and finish cleanup */
- if ( group_rank == 0 ) {
- if ( pass ) {
+ if (group_rank == 0) {
+ if (pass) {
PASSED();
- } else {
+ }
+ else {
H5_FAILED();
- HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n",
- fcn_name, failure_mssg);
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
}
HDremove(reloc_data_filename);
}
- return( ! pass );
+ return (!pass);
} /* test_parallel_read() */
-
/*-------------------------------------------------------------------------
* Function: main
*
@@ -1088,13 +1061,13 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
*/
int
-main( int argc, char **argv)
+main(int argc, char **argv)
{
- int nerrs = 0;
- int which_group = 0;
- int mpi_rank;
- int mpi_size;
- int split_size;
+ int nerrs = 0;
+ int which_group = 0;
+ int mpi_rank;
+ int mpi_size;
+ int split_size;
MPI_Comm group_comm = MPI_COMM_NULL;
/* I don't believe that argv[0] can ever be NULL.
@@ -1108,33 +1081,33 @@ main( int argc, char **argv)
*/
test_argv0 = HDstrdup(argv[0]);
- if ( (MPI_Init(&argc, &argv)) != MPI_SUCCESS) {
- HDfprintf(stderr, "FATAL: Unable to initialize MPI\n");
- HDexit(EXIT_FAILURE);
+ if ((MPI_Init(&argc, &argv)) != MPI_SUCCESS) {
+ HDfprintf(stderr, "FATAL: Unable to initialize MPI\n");
+ HDexit(EXIT_FAILURE);
}
- if ( (MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank)) != MPI_SUCCESS) {
+ if ((MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank)) != MPI_SUCCESS) {
HDfprintf(stderr, "FATAL: MPI_Comm_rank returned an error\n");
HDexit(EXIT_FAILURE);
}
- if ( (MPI_Comm_size(MPI_COMM_WORLD, &mpi_size)) != MPI_SUCCESS) {
+ if ((MPI_Comm_size(MPI_COMM_WORLD, &mpi_size)) != MPI_SUCCESS) {
HDfprintf(stderr, "FATAL: MPI_Comm_size returned an error\n");
HDexit(EXIT_FAILURE);
}
H5open();
- if ( mpi_rank == 0 ) {
+ if (mpi_rank == 0) {
HDfprintf(stdout, "========================================\n");
HDfprintf(stdout, "Collective file open optimization tests\n");
HDfprintf(stdout, " mpi_size = %d\n", mpi_size);
HDfprintf(stdout, "========================================\n");
}
- if ( mpi_size < 3 ) {
+ if (mpi_size < 3) {
- if ( mpi_rank == 0 ) {
+ if (mpi_rank == 0) {
HDprintf(" Need at least 3 processes. Exiting.\n");
}
@@ -1148,13 +1121,10 @@ main( int argc, char **argv)
* two HDF files which in turn will be opened in parallel and the
* contents verified in the second read test below.
*/
- split_size = mpi_size / 2;
+ split_size = mpi_size / 2;
which_group = (mpi_rank < split_size ? 0 : 1);
- if ( (MPI_Comm_split(MPI_COMM_WORLD,
- which_group,
- 0,
- &group_comm)) != MPI_SUCCESS) {
+ if ((MPI_Comm_split(MPI_COMM_WORLD, which_group, 0, &group_comm)) != MPI_SUCCESS) {
HDfprintf(stderr, "FATAL: MPI_Comm_split returned an error\n");
HDexit(EXIT_FAILURE);
@@ -1163,37 +1133,37 @@ main( int argc, char **argv)
/* ------ Generate all files ------ */
/* We generate the file used for test 1 */
- nerrs += generate_test_file( MPI_COMM_WORLD, mpi_rank, which_group );
+ nerrs += generate_test_file(MPI_COMM_WORLD, mpi_rank, which_group);
- if ( nerrs > 0 ) {
- if ( mpi_rank == 0 ) {
+ if (nerrs > 0) {
+ if (mpi_rank == 0) {
HDprintf(" Test(1) file construction failed -- skipping tests.\n");
}
goto finish;
}
/* We generate the file used for test 2 */
- nerrs += generate_test_file( group_comm, mpi_rank, which_group );
+ nerrs += generate_test_file(group_comm, mpi_rank, which_group);
- if ( nerrs > 0 ) {
- if ( mpi_rank == 0 ) {
+ if (nerrs > 0) {
+ if (mpi_rank == 0) {
HDprintf(" Test(2) file construction failed -- skipping tests.\n");
}
goto finish;
}
/* Now read the generated test file (stil using MPI_COMM_WORLD) */
- nerrs += test_parallel_read( MPI_COMM_WORLD, mpi_rank, mpi_size, which_group);
+ nerrs += test_parallel_read(MPI_COMM_WORLD, mpi_rank, mpi_size, which_group);
- if ( nerrs > 0 ) {
- if ( mpi_rank == 0 ) {
+ if (nerrs > 0) {
+ if (mpi_rank == 0) {
HDprintf(" Parallel read test(1) failed -- skipping tests.\n");
}
goto finish;
}
/* Update the user on our progress so far. */
- if ( mpi_rank == 0 ) {
+ if (mpi_rank == 0) {
HDprintf(" Test 1 of 2 succeeded\n");
HDprintf(" -- Starting multi-group parallel read test.\n");
}
@@ -1201,21 +1171,20 @@ main( int argc, char **argv)
/* run the 2nd set of tests */
nerrs += test_parallel_read(group_comm, mpi_rank, mpi_size, which_group);
- if ( nerrs > 0 ) {
- if ( mpi_rank == 0 ) {
+ if (nerrs > 0) {
+ if (mpi_rank == 0) {
HDprintf(" Multi-group read test(2) failed\n");
}
goto finish;
}
- if ( mpi_rank == 0 ) {
+ if (mpi_rank == 0) {
HDprintf(" Test 2 of 2 succeeded\n");
}
finish:
- if ((group_comm != MPI_COMM_NULL) &&
- (MPI_Comm_free(&group_comm)) != MPI_SUCCESS) {
+ if ((group_comm != MPI_COMM_NULL) && (MPI_Comm_free(&group_comm)) != MPI_SUCCESS) {
HDfprintf(stderr, "MPI_Comm_free failed!\n");
}
@@ -1224,11 +1193,11 @@ finish:
*/
MPI_Barrier(MPI_COMM_WORLD);
- if ( mpi_rank == 0 ) { /* only process 0 reports */
+ if (mpi_rank == 0) { /* only process 0 reports */
const char *header = "Collective file open optimization tests";
HDfprintf(stdout, "===================================\n");
- if ( nerrs > 0 ) {
+ if (nerrs > 0) {
HDfprintf(stdout, "***%s detected %d failures***\n", header, nerrs);
}
else {
@@ -1246,6 +1215,6 @@ finish:
MPI_Finalize();
/* cannot just return (nerrs) because exit code is limited to 1byte */
- return((nerrs > 0) ? EXIT_FAILURE : EXIT_SUCCESS );
+ return ((nerrs > 0) ? EXIT_FAILURE : EXIT_SUCCESS);
} /* main() */
diff --git a/testpar/t_prestart.c b/testpar/t_prestart.c
index da6bbe0..21f0591 100644
--- a/testpar/t_prestart.c
+++ b/testpar/t_prestart.c
@@ -21,37 +21,34 @@
#include "testphdf5.h"
-int nerrors = 0; /* errors count */
+int nerrors = 0; /* errors count */
-const char *FILENAME[] = {
- "shutdown",
- NULL
-};
+const char *FILENAME[] = {"shutdown", NULL};
int
-main (int argc, char **argv)
+main(int argc, char **argv)
{
- hid_t file_id, dset_id, grp_id;
- hid_t fapl, sid, mem_dataspace;
- herr_t ret;
- char filename[1024];
- int mpi_size, mpi_rank, ndims;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
- hsize_t dims[RANK];
- hsize_t start[RANK];
- hsize_t count[RANK];
- hsize_t stride[RANK];
- hsize_t block[RANK];
- hsize_t i, j;
- DATATYPE *data_array = NULL, *dataptr; /* data buffer */
+ hid_t file_id, dset_id, grp_id;
+ hid_t fapl, sid, mem_dataspace;
+ herr_t ret;
+ char filename[1024];
+ int mpi_size, mpi_rank, ndims;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ hsize_t dims[RANK];
+ hsize_t start[RANK];
+ hsize_t count[RANK];
+ hsize_t stride[RANK];
+ hsize_t block[RANK];
+ hsize_t i, j;
+ DATATYPE *data_array = NULL, *dataptr; /* data buffer */
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
MPI_Comm_rank(comm, &mpi_rank);
- if(MAINPROCESS)
- TESTING("proper shutdown of HDF5 library");
+ if (MAINPROCESS)
+ TESTING("proper shutdown of HDF5 library");
/* Set up file access property list with parallel I/O access */
fapl = H5Pcreate(H5P_FILE_ACCESS);
@@ -74,64 +71,62 @@ main (int argc, char **argv)
ndims = H5Sget_simple_extent_dims(sid, dims, NULL);
VRFY((ndims == 2), "H5Sget_simple_extent_dims succeeded");
- VRFY(dims[0] == (hsize_t)(ROW_FACTOR*mpi_size), "Wrong dataset dimensions");
- VRFY(dims[1] == (hsize_t)(COL_FACTOR*mpi_size), "Wrong dataset dimensions");
+ VRFY(dims[0] == (hsize_t)(ROW_FACTOR * mpi_size), "Wrong dataset dimensions");
+ VRFY(dims[1] == (hsize_t)(COL_FACTOR * mpi_size), "Wrong dataset dimensions");
/* allocate memory for data buffer */
- data_array = (DATATYPE *)HDmalloc(dims[0]*dims[1]*sizeof(DATATYPE));
+ data_array = (DATATYPE *)HDmalloc(dims[0] * dims[1] * sizeof(DATATYPE));
VRFY((data_array != NULL), "data_array HDmalloc succeeded");
/* Each process takes a slabs of rows. */
- block[0] = dims[0]/(hsize_t)mpi_size;
- block[1] = dims[1];
+ block[0] = dims[0] / (hsize_t)mpi_size;
+ block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)mpi_rank*block[0];
- start[1] = 0;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* write data independently */
- ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid,
- H5P_DEFAULT, data_array);
+ ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
VRFY((ret >= 0), "H5Dwrite succeeded");
dataptr = data_array;
- for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- if(*dataptr != mpi_rank+1) {
+ for (i = 0; i < block[0]; i++) {
+ for (j = 0; j < block[1]; j++) {
+ if (*dataptr != mpi_rank + 1) {
HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
- (unsigned long)i, (unsigned long)j,
- (unsigned long)((hsize_t)i+start[0]), (unsigned long)((hsize_t)j+start[1]),
- mpi_rank+1, *(dataptr));
- nerrors ++;
+ (unsigned long)i, (unsigned long)j, (unsigned long)((hsize_t)i + start[0]),
+ (unsigned long)((hsize_t)j + start[1]), mpi_rank + 1, *(dataptr));
+ nerrors++;
}
dataptr++;
- }
+ }
}
MPI_Finalize();
HDremove(filename);
/* release data buffers */
- if(data_array)
+ if (data_array)
HDfree(data_array);
nerrors += GetTestNumErrs();
- if(MAINPROCESS) {
- if(0 == nerrors)
+ if (MAINPROCESS) {
+ if (0 == nerrors)
PASSED();
else
- H5_FAILED()
+ H5_FAILED()
}
- return (nerrors!=0);
+ return (nerrors != 0);
}
diff --git a/testpar/t_prop.c b/testpar/t_prop.c
index dde322d..e84a942 100644
--- a/testpar/t_prop.c
+++ b/testpar/t_prop.c
@@ -23,13 +23,13 @@ static int
test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc)
{
MPI_Request req[2];
- MPI_Status status;
- hid_t pl; /* Decoded property list */
- size_t buf_size = 0;
- void *sbuf = NULL;
- herr_t ret; /* Generic return value */
+ MPI_Status status;
+ hid_t pl; /* Decoded property list */
+ size_t buf_size = 0;
+ void * sbuf = NULL;
+ herr_t ret; /* Generic return value */
- if(mpi_rank == 0) {
+ if (mpi_rank == 0) {
int send_size = 0;
/* first call to encode returns only the size of the buffer needed */
@@ -48,13 +48,13 @@ test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc)
MPI_Isend(sbuf, send_size, MPI_BYTE, recv_proc, 124, MPI_COMM_WORLD, &req[1]);
} /* end if */
- if(mpi_rank == recv_proc) {
- int recv_size;
+ if (mpi_rank == recv_proc) {
+ int recv_size;
void *rbuf;
MPI_Recv(&recv_size, 1, MPI_INT, 0, 123, MPI_COMM_WORLD, &status);
buf_size = (size_t)recv_size;
- rbuf = (uint8_t *)HDmalloc(buf_size);
+ rbuf = (uint8_t *)HDmalloc(buf_size);
MPI_Recv(rbuf, recv_size, MPI_BYTE, 0, 124, MPI_COMM_WORLD, &status);
pl = H5Pdecode(rbuf);
@@ -65,89 +65,88 @@ test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc)
ret = H5Pclose(pl);
VRFY((ret >= 0), "H5Pclose succeeded");
- if(NULL != rbuf)
+ if (NULL != rbuf)
HDfree(rbuf);
} /* end if */
- if(0 == mpi_rank)
+ if (0 == mpi_rank)
MPI_Waitall(2, req, MPI_STATUSES_IGNORE);
- if(NULL != sbuf)
+ if (NULL != sbuf)
HDfree(sbuf);
MPI_Barrier(MPI_COMM_WORLD);
- return(0);
+ return (0);
}
void
test_plist_ed(void)
{
- hid_t dcpl; /* dataset create prop. list */
- hid_t dapl; /* dataset access prop. list */
- hid_t dxpl; /* dataset transfer prop. list */
- hid_t gcpl; /* group create prop. list */
- hid_t lcpl; /* link create prop. list */
- hid_t lapl; /* link access prop. list */
- hid_t ocpypl; /* object copy prop. list */
- hid_t ocpl; /* object create prop. list */
- hid_t fapl; /* file access prop. list */
- hid_t fcpl; /* file create prop. list */
- hid_t strcpl; /* string create prop. list */
- hid_t acpl; /* attribute create prop. list */
+ hid_t dcpl; /* dataset create prop. list */
+ hid_t dapl; /* dataset access prop. list */
+ hid_t dxpl; /* dataset transfer prop. list */
+ hid_t gcpl; /* group create prop. list */
+ hid_t lcpl; /* link create prop. list */
+ hid_t lapl; /* link access prop. list */
+ hid_t ocpypl; /* object copy prop. list */
+ hid_t ocpl; /* object create prop. list */
+ hid_t fapl; /* file access prop. list */
+ hid_t fcpl; /* file create prop. list */
+ hid_t strcpl; /* string create prop. list */
+ hid_t acpl; /* attribute create prop. list */
int mpi_size, mpi_rank, recv_proc;
- hsize_t chunk_size = 16384; /* chunk size */
- double fill = 2.7f; /* Fill value */
- size_t nslots = 521*2;
- size_t nbytes = 1048576 * 10;
- double w0 = 0.5f;
- unsigned max_compact;
- unsigned min_dense;
- hsize_t max_size[1]; /*data space maximum size */
- const char* c_to_f = "x+32";
- H5AC_cache_config_t my_cache_config = {
- H5AC__CURR_CACHE_CONFIG_VERSION,
- TRUE,
- FALSE,
- FALSE,
- "temp",
- TRUE,
- FALSE,
- ( 2 * 2048 * 1024),
- 0.3f,
- (64 * 1024 * 1024),
- (4 * 1024 * 1024),
- 60000,
- H5C_incr__threshold,
- 0.8f,
- 3.0f,
- TRUE,
- (8 * 1024 * 1024),
- H5C_flash_incr__add_space,
- 2.0f,
- 0.25f,
- H5C_decr__age_out_with_threshold,
- 0.997f,
- 0.8f,
- TRUE,
- (3 * 1024 * 1024),
- 3,
- FALSE,
- 0.2f,
- (256 * 2048),
- H5AC__DEFAULT_METADATA_WRITE_STRATEGY};
-
- herr_t ret; /* Generic return value */
-
- if(VERBOSE_MED)
- HDprintf("Encode/Decode DCPLs\n");
+ hsize_t chunk_size = 16384; /* chunk size */
+ double fill = 2.7f; /* Fill value */
+ size_t nslots = 521 * 2;
+ size_t nbytes = 1048576 * 10;
+ double w0 = 0.5f;
+ unsigned max_compact;
+ unsigned min_dense;
+ hsize_t max_size[1]; /*data space maximum size */
+ const char * c_to_f = "x+32";
+ H5AC_cache_config_t my_cache_config = {H5AC__CURR_CACHE_CONFIG_VERSION,
+ TRUE,
+ FALSE,
+ FALSE,
+ "temp",
+ TRUE,
+ FALSE,
+ (2 * 2048 * 1024),
+ 0.3f,
+ (64 * 1024 * 1024),
+ (4 * 1024 * 1024),
+ 60000,
+ H5C_incr__threshold,
+ 0.8f,
+ 3.0f,
+ TRUE,
+ (8 * 1024 * 1024),
+ H5C_flash_incr__add_space,
+ 2.0f,
+ 0.25f,
+ H5C_decr__age_out_with_threshold,
+ 0.997f,
+ 0.8f,
+ TRUE,
+ (3 * 1024 * 1024),
+ 3,
+ FALSE,
+ 0.2f,
+ (256 * 2048),
+ H5AC__DEFAULT_METADATA_WRITE_STRATEGY};
+
+ herr_t ret; /* Generic return value */
+
+ if (VERBOSE_MED)
+ HDprintf("Encode/Decode DCPLs\n");
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- if(mpi_size == 1)
+ if (mpi_size == 1)
recv_proc = 0;
else
recv_proc = 1;
@@ -162,21 +161,17 @@ test_plist_ed(void)
VRFY((ret >= 0), "H5Pset_alloc_time succeeded");
ret = H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill);
- VRFY((ret>=0), "set fill-value succeeded");
+ VRFY((ret >= 0), "set fill-value succeeded");
max_size[0] = 100;
- ret = H5Pset_external(dcpl, "ext1.data", (off_t)0,
- (hsize_t)(max_size[0] * sizeof(int)/4));
- VRFY((ret>=0), "set external succeeded");
- ret = H5Pset_external(dcpl, "ext2.data", (off_t)0,
- (hsize_t)(max_size[0] * sizeof(int)/4));
- VRFY((ret>=0), "set external succeeded");
- ret = H5Pset_external(dcpl, "ext3.data", (off_t)0,
- (hsize_t)(max_size[0] * sizeof(int)/4));
- VRFY((ret>=0), "set external succeeded");
- ret = H5Pset_external(dcpl, "ext4.data", (off_t)0,
- (hsize_t)(max_size[0] * sizeof(int)/4));
- VRFY((ret>=0), "set external succeeded");
+ ret = H5Pset_external(dcpl, "ext1.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4));
+ VRFY((ret >= 0), "set external succeeded");
+ ret = H5Pset_external(dcpl, "ext2.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4));
+ VRFY((ret >= 0), "set external succeeded");
+ ret = H5Pset_external(dcpl, "ext3.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4));
+ VRFY((ret >= 0), "set external succeeded");
+ ret = H5Pset_external(dcpl, "ext4.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4));
+ VRFY((ret >= 0), "set external succeeded");
ret = test_encode_decode(dcpl, mpi_rank, recv_proc);
VRFY((ret >= 0), "test_encode_decode succeeded");
@@ -184,7 +179,6 @@ test_plist_ed(void)
ret = H5Pclose(dcpl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/******* ENCODE/DECODE DAPLS *****/
dapl = H5Pcreate(H5P_DATASET_ACCESS);
VRFY((dapl >= 0), "H5Pcreate succeeded");
@@ -198,7 +192,6 @@ test_plist_ed(void)
ret = H5Pclose(dapl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/******* ENCODE/DECODE OCPLS *****/
ocpl = H5Pcreate(H5P_OBJECT_CREATE);
VRFY((ocpl >= 0), "H5Pcreate succeeded");
@@ -218,7 +211,6 @@ test_plist_ed(void)
ret = H5Pclose(ocpl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/******* ENCODE/DECODE DXPLS *****/
dxpl = H5Pcreate(H5P_DATASET_XFER);
VRFY((dxpl >= 0), "H5Pcreate succeeded");
@@ -256,7 +248,6 @@ test_plist_ed(void)
ret = H5Pclose(dxpl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/******* ENCODE/DECODE GCPLS *****/
gcpl = H5Pcreate(H5P_GROUP_CREATE);
VRFY((gcpl >= 0), "H5Pcreate succeeded");
@@ -283,12 +274,11 @@ test_plist_ed(void)
ret = H5Pclose(gcpl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/******* ENCODE/DECODE LCPLS *****/
lcpl = H5Pcreate(H5P_LINK_CREATE);
VRFY((lcpl >= 0), "H5Pcreate succeeded");
- ret= H5Pset_create_intermediate_group(lcpl, TRUE);
+ ret = H5Pset_create_intermediate_group(lcpl, TRUE);
VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded");
ret = test_encode_decode(lcpl, mpi_rank, recv_proc);
@@ -297,7 +287,6 @@ test_plist_ed(void)
ret = H5Pclose(lcpl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/******* ENCODE/DECODE LAPLS *****/
lapl = H5Pcreate(H5P_LINK_ACCESS);
VRFY((lapl >= 0), "H5Pcreate succeeded");
@@ -330,7 +319,6 @@ test_plist_ed(void)
ret = H5Pclose(lapl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/******* ENCODE/DECODE OCPYPLS *****/
ocpypl = H5Pcreate(H5P_OBJECT_COPY);
VRFY((ocpypl >= 0), "H5Pcreate succeeded");
@@ -350,7 +338,6 @@ test_plist_ed(void)
ret = H5Pclose(ocpypl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/******* ENCODE/DECODE FAPLS *****/
fapl = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl >= 0), "H5Pcreate succeeded");
@@ -397,7 +384,6 @@ test_plist_ed(void)
ret = H5Pclose(fapl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/******* ENCODE/DECODE FCPLS *****/
fcpl = H5Pcreate(H5P_FILE_CREATE);
VRFY((fcpl >= 0), "H5Pcreate succeeded");
@@ -414,7 +400,7 @@ test_plist_ed(void)
ret = H5Pset_shared_mesg_nindexes(fcpl, 8);
VRFY((ret >= 0), "H5Pset_shared_mesg_nindexes succeeded");
- ret = H5Pset_shared_mesg_index(fcpl, 1, H5O_SHMESG_SDSPACE_FLAG, 32);
+ ret = H5Pset_shared_mesg_index(fcpl, 1, H5O_SHMESG_SDSPACE_FLAG, 32);
VRFY((ret >= 0), "H5Pset_shared_mesg_index succeeded");
ret = H5Pset_shared_mesg_phase_change(fcpl, 60, 20);
@@ -429,7 +415,6 @@ test_plist_ed(void)
ret = H5Pclose(fcpl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/******* ENCODE/DECODE STRCPLS *****/
strcpl = H5Pcreate(H5P_STRING_CREATE);
VRFY((strcpl >= 0), "H5Pcreate succeeded");
@@ -443,7 +428,6 @@ test_plist_ed(void)
ret = H5Pclose(strcpl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/******* ENCODE/DECODE ACPLS *****/
acpl = H5Pcreate(H5P_ATTRIBUTE_CREATE);
VRFY((acpl >= 0), "H5Pcreate succeeded");
@@ -457,4 +441,3 @@ test_plist_ed(void)
ret = H5Pclose(acpl);
VRFY((ret >= 0), "H5Pclose succeeded");
}
-
diff --git a/testpar/t_pshutdown.c b/testpar/t_pshutdown.c
index 6a35fb2..5940ba8 100644
--- a/testpar/t_pshutdown.c
+++ b/testpar/t_pshutdown.c
@@ -25,36 +25,33 @@
#include "testphdf5.h"
-int nerrors = 0; /* errors count */
+int nerrors = 0; /* errors count */
-const char *FILENAME[] = {
- "shutdown",
- NULL
-};
+const char *FILENAME[] = {"shutdown", NULL};
int
-main (int argc, char **argv)
+main(int argc, char **argv)
{
- hid_t file_id, dset_id, grp_id;
- hid_t fapl, sid, mem_dataspace;
- hsize_t dims[RANK], i;
- herr_t ret;
- char filename[1024];
- int mpi_size, mpi_rank;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
- hsize_t start[RANK];
- hsize_t count[RANK];
- hsize_t stride[RANK];
- hsize_t block[RANK];
- DATATYPE *data_array = NULL; /* data buffer */
+ hid_t file_id, dset_id, grp_id;
+ hid_t fapl, sid, mem_dataspace;
+ hsize_t dims[RANK], i;
+ herr_t ret;
+ char filename[1024];
+ int mpi_size, mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ hsize_t start[RANK];
+ hsize_t count[RANK];
+ hsize_t stride[RANK];
+ hsize_t block[RANK];
+ DATATYPE *data_array = NULL; /* data buffer */
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
MPI_Comm_rank(comm, &mpi_rank);
- if(MAINPROCESS)
- TESTING("proper shutdown of HDF5 library");
+ if (MAINPROCESS)
+ TESTING("proper shutdown of HDF5 library");
/* Set up file access property list with parallel I/O access */
fapl = H5Pcreate(H5P_FILE_ACCESS);
@@ -68,58 +65,57 @@ main (int argc, char **argv)
grp_id = H5Gcreate2(file_id, "Group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((grp_id >= 0), "H5Gcreate succeeded");
- dims[0] = (hsize_t)ROW_FACTOR*(hsize_t)mpi_size;
- dims[1] = (hsize_t)COL_FACTOR*(hsize_t)mpi_size;
- sid = H5Screate_simple (RANK, dims, NULL);
+ dims[0] = (hsize_t)ROW_FACTOR * (hsize_t)mpi_size;
+ dims[1] = (hsize_t)COL_FACTOR * (hsize_t)mpi_size;
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
dset_id = H5Dcreate2(grp_id, "Dataset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "H5Dcreate succeeded");
/* allocate memory for data buffer */
- data_array = (DATATYPE *)HDmalloc(dims[0]*dims[1]*sizeof(DATATYPE));
+ data_array = (DATATYPE *)HDmalloc(dims[0] * dims[1] * sizeof(DATATYPE));
VRFY((data_array != NULL), "data_array HDmalloc succeeded");
/* Each process takes a slabs of rows. */
- block[0] = dims[0]/(hsize_t)mpi_size;
- block[1] = dims[1];
+ block[0] = dims[0] / (hsize_t)mpi_size;
+ block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (hsize_t)mpi_rank*block[0];
- start[1] = 0;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
/* put some trivial data in the data_array */
- for(i=0 ; i<dims[0]*dims[1]; i++)
+ for (i = 0; i < dims[0] * dims[1]; i++)
data_array[i] = mpi_rank + 1;
ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* write data independently */
- ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid,
- H5P_DEFAULT, data_array);
+ ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release data buffers */
- if(data_array)
+ if (data_array)
HDfree(data_array);
MPI_Finalize();
nerrors += GetTestNumErrs();
- if(MAINPROCESS) {
- if(0 == nerrors)
+ if (MAINPROCESS) {
+ if (0 == nerrors)
PASSED();
else
- H5_FAILED()
+ H5_FAILED()
}
- return (nerrors!=0);
+ return (nerrors != 0);
}
diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c
index c34ea8d..1c5e875 100644
--- a/testpar/t_shapesame.c
+++ b/testpar/t_shapesame.c
@@ -16,16 +16,14 @@
same shape by H5Sselect_shape_same().
*/
-#define H5S_FRIEND /*suppress error about including H5Spkg */
+#define H5S_FRIEND /*suppress error about including H5Spkg */
/* Define this macro to indicate that the testing APIs should be available */
#define H5S_TESTING
-
-#include "H5Spkg.h" /* Dataspaces */
+#include "H5Spkg.h" /* Dataspaces */
#include "testphdf5.h"
-
/* On Lustre (and perhaps other parallel file systems?), we have severe
* slow downs if two or more processes attempt to access the same file system
* block. To minimize this problem, we set alignment in the shape same tests
@@ -33,71 +31,69 @@
* the chunked dataset case.
*/
-#define SHAPE_SAME_TEST_ALIGNMENT ((hsize_t)(4 * 1024 * 1024))
-
-
-#define PAR_SS_DR_MAX_RANK 5 /* must update code if this changes */
-
-struct hs_dr_pio_test_vars_t
-{
- int mpi_size;
- int mpi_rank;
- MPI_Comm mpi_comm;
- MPI_Info mpi_info;
- int test_num;
- int edge_size;
- int checker_edge_size;
- int chunk_edge_size;
- int small_rank;
- int large_rank;
- hid_t dset_type;
- uint32_t * small_ds_buf_0;
- uint32_t * small_ds_buf_1;
- uint32_t * small_ds_buf_2;
- uint32_t * small_ds_slice_buf;
- uint32_t * large_ds_buf_0;
- uint32_t * large_ds_buf_1;
- uint32_t * large_ds_buf_2;
- uint32_t * large_ds_slice_buf;
- int small_ds_offset;
- int large_ds_offset;
- hid_t fid; /* HDF5 file ID */
- hid_t xfer_plist;
- hid_t full_mem_small_ds_sid;
- hid_t full_file_small_ds_sid;
- hid_t mem_small_ds_sid;
- hid_t file_small_ds_sid_0;
- hid_t file_small_ds_sid_1;
- hid_t small_ds_slice_sid;
- hid_t full_mem_large_ds_sid;
- hid_t full_file_large_ds_sid;
- hid_t mem_large_ds_sid;
- hid_t file_large_ds_sid_0;
- hid_t file_large_ds_sid_1;
- hid_t file_large_ds_process_slice_sid;
- hid_t mem_large_ds_process_slice_sid;
- hid_t large_ds_slice_sid;
- hid_t small_dataset; /* Dataset ID */
- hid_t large_dataset; /* Dataset ID */
- size_t small_ds_size;
- size_t small_ds_slice_size;
- size_t large_ds_size;
- size_t large_ds_slice_size;
- hsize_t dims[PAR_SS_DR_MAX_RANK];
- hsize_t chunk_dims[PAR_SS_DR_MAX_RANK];
- hsize_t start[PAR_SS_DR_MAX_RANK];
- hsize_t stride[PAR_SS_DR_MAX_RANK];
- hsize_t count[PAR_SS_DR_MAX_RANK];
- hsize_t block[PAR_SS_DR_MAX_RANK];
- hsize_t * start_ptr;
- hsize_t * stride_ptr;
- hsize_t * count_ptr;
- hsize_t * block_ptr;
- int skips;
- int max_skips;
- int64_t total_tests;
- int64_t tests_run;
- int64_t tests_skipped;
+#define SHAPE_SAME_TEST_ALIGNMENT ((hsize_t)(4 * 1024 * 1024))
+
+#define PAR_SS_DR_MAX_RANK 5 /* must update code if this changes */
+
+struct hs_dr_pio_test_vars_t {
+ int mpi_size;
+ int mpi_rank;
+ MPI_Comm mpi_comm;
+ MPI_Info mpi_info;
+ int test_num;
+ int edge_size;
+ int checker_edge_size;
+ int chunk_edge_size;
+ int small_rank;
+ int large_rank;
+ hid_t dset_type;
+ uint32_t *small_ds_buf_0;
+ uint32_t *small_ds_buf_1;
+ uint32_t *small_ds_buf_2;
+ uint32_t *small_ds_slice_buf;
+ uint32_t *large_ds_buf_0;
+ uint32_t *large_ds_buf_1;
+ uint32_t *large_ds_buf_2;
+ uint32_t *large_ds_slice_buf;
+ int small_ds_offset;
+ int large_ds_offset;
+ hid_t fid; /* HDF5 file ID */
+ hid_t xfer_plist;
+ hid_t full_mem_small_ds_sid;
+ hid_t full_file_small_ds_sid;
+ hid_t mem_small_ds_sid;
+ hid_t file_small_ds_sid_0;
+ hid_t file_small_ds_sid_1;
+ hid_t small_ds_slice_sid;
+ hid_t full_mem_large_ds_sid;
+ hid_t full_file_large_ds_sid;
+ hid_t mem_large_ds_sid;
+ hid_t file_large_ds_sid_0;
+ hid_t file_large_ds_sid_1;
+ hid_t file_large_ds_process_slice_sid;
+ hid_t mem_large_ds_process_slice_sid;
+ hid_t large_ds_slice_sid;
+ hid_t small_dataset; /* Dataset ID */
+ hid_t large_dataset; /* Dataset ID */
+ size_t small_ds_size;
+ size_t small_ds_slice_size;
+ size_t large_ds_size;
+ size_t large_ds_slice_size;
+ hsize_t dims[PAR_SS_DR_MAX_RANK];
+ hsize_t chunk_dims[PAR_SS_DR_MAX_RANK];
+ hsize_t start[PAR_SS_DR_MAX_RANK];
+ hsize_t stride[PAR_SS_DR_MAX_RANK];
+ hsize_t count[PAR_SS_DR_MAX_RANK];
+ hsize_t block[PAR_SS_DR_MAX_RANK];
+ hsize_t * start_ptr;
+ hsize_t * stride_ptr;
+ hsize_t * count_ptr;
+ hsize_t * block_ptr;
+ int skips;
+ int max_skips;
+ int64_t total_tests;
+ int64_t tests_run;
+ int64_t tests_skipped;
};
/*-------------------------------------------------------------------------
@@ -116,60 +112,53 @@ struct hs_dr_pio_test_vars_t
#define CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG 0
static void
-hs_dr_pio_test__setup(const int test_num,
- const int edge_size,
- const int checker_edge_size,
- const int chunk_edge_size,
- const int small_rank,
- const int large_rank,
- const hbool_t use_collective_io,
- const hid_t dset_type,
- const int express_test,
- struct hs_dr_pio_test_vars_t * tv_ptr)
+hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker_edge_size,
+ const int chunk_edge_size, const int small_rank, const int large_rank,
+ const hbool_t use_collective_io, const hid_t dset_type, const int express_test,
+ struct hs_dr_pio_test_vars_t *tv_ptr)
{
#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
const char *fcnName = "hs_dr_pio_test__setup()";
#endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */
const char *filename;
- hbool_t mis_match = FALSE;
- int i;
+ hbool_t mis_match = FALSE;
+ int i;
int mrc;
- int mpi_rank; /* needed by the VRFY macro */
+ int mpi_rank; /* needed by the VRFY macro */
uint32_t expected_value;
- uint32_t * ptr_0;
- uint32_t * ptr_1;
- hid_t acc_tpl; /* File access templates */
+ uint32_t * ptr_0;
+ uint32_t * ptr_1;
+ hid_t acc_tpl; /* File access templates */
hid_t small_ds_dcpl_id = H5P_DEFAULT;
hid_t large_ds_dcpl_id = H5P_DEFAULT;
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
- HDassert( edge_size >= 6 );
- HDassert( edge_size >= chunk_edge_size );
- HDassert( ( chunk_edge_size == 0 ) || ( chunk_edge_size >= 3 ) );
- HDassert( 1 < small_rank );
- HDassert( small_rank < large_rank );
- HDassert( large_rank <= PAR_SS_DR_MAX_RANK );
+ HDassert(edge_size >= 6);
+ HDassert(edge_size >= chunk_edge_size);
+ HDassert((chunk_edge_size == 0) || (chunk_edge_size >= 3));
+ HDassert(1 < small_rank);
+ HDassert(small_rank < large_rank);
+ HDassert(large_rank <= PAR_SS_DR_MAX_RANK);
- tv_ptr->test_num = test_num;
- tv_ptr->edge_size = edge_size;
+ tv_ptr->test_num = test_num;
+ tv_ptr->edge_size = edge_size;
tv_ptr->checker_edge_size = checker_edge_size;
- tv_ptr->chunk_edge_size = chunk_edge_size;
- tv_ptr->small_rank = small_rank;
- tv_ptr->large_rank = large_rank;
- tv_ptr->dset_type = dset_type;
+ tv_ptr->chunk_edge_size = chunk_edge_size;
+ tv_ptr->small_rank = small_rank;
+ tv_ptr->large_rank = large_rank;
+ tv_ptr->dset_type = dset_type;
MPI_Comm_size(MPI_COMM_WORLD, &(tv_ptr->mpi_size));
MPI_Comm_rank(MPI_COMM_WORLD, &(tv_ptr->mpi_rank));
/* the VRFY() macro needs the local variable mpi_rank -- set it up now */
mpi_rank = tv_ptr->mpi_rank;
- HDassert( tv_ptr->mpi_size >= 1 );
+ HDassert(tv_ptr->mpi_size >= 1);
tv_ptr->mpi_comm = MPI_COMM_WORLD;
tv_ptr->mpi_info = MPI_INFO_NULL;
- for ( i = 0; i < tv_ptr->small_rank - 1; i++ )
- {
+ for (i = 0; i < tv_ptr->small_rank - 1; i++) {
tv_ptr->small_ds_size *= (size_t)(tv_ptr->edge_size);
tv_ptr->small_ds_slice_size *= (size_t)(tv_ptr->edge_size);
}
@@ -178,10 +167,10 @@ hs_dr_pio_test__setup(const int test_num,
/* used by checker board tests only */
tv_ptr->small_ds_offset = PAR_SS_DR_MAX_RANK - tv_ptr->small_rank;
- HDassert( 0 < tv_ptr->small_ds_offset );
- HDassert( tv_ptr->small_ds_offset < PAR_SS_DR_MAX_RANK );
+ HDassert(0 < tv_ptr->small_ds_offset);
+ HDassert(tv_ptr->small_ds_offset < PAR_SS_DR_MAX_RANK);
- for ( i = 0; i < tv_ptr->large_rank - 1; i++ ) {
+ for (i = 0; i < tv_ptr->large_rank - 1; i++) {
tv_ptr->large_ds_size *= (size_t)(tv_ptr->edge_size);
tv_ptr->large_ds_slice_size *= (size_t)(tv_ptr->edge_size);
@@ -191,9 +180,8 @@ hs_dr_pio_test__setup(const int test_num,
/* used by checker board tests only */
tv_ptr->large_ds_offset = PAR_SS_DR_MAX_RANK - tv_ptr->large_rank;
- HDassert( 0 <= tv_ptr->large_ds_offset );
- HDassert( tv_ptr->large_ds_offset < PAR_SS_DR_MAX_RANK );
-
+ HDassert(0 <= tv_ptr->large_ds_offset);
+ HDassert(tv_ptr->large_ds_offset < PAR_SS_DR_MAX_RANK);
/* set up the start, stride, count, and block pointers */
/* used by contiguous tests only */
@@ -202,7 +190,6 @@ hs_dr_pio_test__setup(const int test_num,
tv_ptr->count_ptr = &(tv_ptr->count[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]);
tv_ptr->block_ptr = &(tv_ptr->block[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]);
-
/* Allocate buffers */
tv_ptr->small_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_size);
VRFY((tv_ptr->small_ds_buf_0 != NULL), "malloc of small_ds_buf_0 succeeded");
@@ -213,8 +200,7 @@ hs_dr_pio_test__setup(const int test_num,
tv_ptr->small_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_size);
VRFY((tv_ptr->small_ds_buf_2 != NULL), "malloc of small_ds_buf_2 succeeded");
- tv_ptr->small_ds_slice_buf =
- (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
+ tv_ptr->small_ds_slice_buf = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
VRFY((tv_ptr->small_ds_slice_buf != NULL), "malloc of small_ds_slice_buf succeeded");
tv_ptr->large_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_size);
@@ -226,14 +212,13 @@ hs_dr_pio_test__setup(const int test_num,
tv_ptr->large_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_size);
VRFY((tv_ptr->large_ds_buf_2 != NULL), "malloc of large_ds_buf_2 succeeded");
- tv_ptr->large_ds_slice_buf =
- (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_slice_size);
+ tv_ptr->large_ds_slice_buf = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_slice_size);
VRFY((tv_ptr->large_ds_slice_buf != NULL), "malloc of large_ds_slice_buf succeeded");
/* initialize the buffers */
ptr_0 = tv_ptr->small_ds_buf_0;
- for(i = 0; i < (int)(tv_ptr->small_ds_size); i++)
+ for (i = 0; i < (int)(tv_ptr->small_ds_size); i++)
*ptr_0++ = (uint32_t)i;
HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
HDmemset(tv_ptr->small_ds_buf_2, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
@@ -241,7 +226,7 @@ hs_dr_pio_test__setup(const int test_num,
HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
ptr_0 = tv_ptr->large_ds_buf_0;
- for(i = 0; i < (int)(tv_ptr->large_ds_size); i++)
+ for (i = 0; i < (int)(tv_ptr->large_ds_size); i++)
*ptr_0++ = (uint32_t)i;
HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
HDmemset(tv_ptr->large_ds_buf_2, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
@@ -249,23 +234,19 @@ hs_dr_pio_test__setup(const int test_num,
HDmemset(tv_ptr->large_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->large_ds_slice_size);
filename = (const char *)GetTestParameters();
- HDassert( filename != NULL );
+ HDassert(filename != NULL);
#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
- if ( MAINPROCESS ) {
+ if (MAINPROCESS) {
HDfprintf(stdout, "%d: test num = %d.\n", tv_ptr->mpi_rank, tv_ptr->test_num);
HDfprintf(stdout, "%d: mpi_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->mpi_size);
- HDfprintf(stdout,
- "%d: small/large rank = %d/%d, use_collective_io = %d.\n",
- tv_ptr->mpi_rank, tv_ptr->small_rank, tv_ptr->large_rank,
- (int)use_collective_io);
- HDfprintf(stdout, "%d: edge_size = %d, chunk_edge_size = %d.\n",
- tv_ptr->mpi_rank, tv_ptr->edge_size, tv_ptr->chunk_edge_size);
- HDfprintf(stdout, "%d: checker_edge_size = %d.\n",
- tv_ptr->mpi_rank, tv_ptr->checker_edge_size);
- HDfprintf(stdout, "%d: small_ds_size = %d, large_ds_size = %d.\n",
- tv_ptr->mpi_rank, (int)(tv_ptr->small_ds_size),
- (int)(tv_ptr->large_ds_size));
+ HDfprintf(stdout, "%d: small/large rank = %d/%d, use_collective_io = %d.\n", tv_ptr->mpi_rank,
+ tv_ptr->small_rank, tv_ptr->large_rank, (int)use_collective_io);
+ HDfprintf(stdout, "%d: edge_size = %d, chunk_edge_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->edge_size,
+ tv_ptr->chunk_edge_size);
+ HDfprintf(stdout, "%d: checker_edge_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->checker_edge_size);
+ HDfprintf(stdout, "%d: small_ds_size = %d, large_ds_size = %d.\n", tv_ptr->mpi_rank,
+ (int)(tv_ptr->small_ds_size), (int)(tv_ptr->large_ds_size));
HDfprintf(stdout, "%d: filename = %s.\n", tv_ptr->mpi_rank, filename);
}
#endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */
@@ -280,7 +261,7 @@ hs_dr_pio_test__setup(const int test_num,
* the same file system block. Do this only if express_test is greater
* than zero.
*/
- if ( express_test > 0 ) {
+ if (express_test > 0) {
ret = H5Pset_alignment(acc_tpl, (hsize_t)0, SHAPE_SAME_TEST_ALIGNMENT);
VRFY((ret != FAIL), "H5Pset_alignment() succeeded");
@@ -296,89 +277,63 @@ hs_dr_pio_test__setup(const int test_num,
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded");
-
/* setup dims: */
tv_ptr->dims[0] = (hsize_t)(tv_ptr->mpi_size + 1);
- tv_ptr->dims[1] = tv_ptr->dims[2] =
- tv_ptr->dims[3] = tv_ptr->dims[4] = (hsize_t)(tv_ptr->edge_size);
-
+ tv_ptr->dims[1] = tv_ptr->dims[2] = tv_ptr->dims[3] = tv_ptr->dims[4] = (hsize_t)(tv_ptr->edge_size);
/* Create small ds dataspaces */
- tv_ptr->full_mem_small_ds_sid =
- H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->full_mem_small_ds_sid != 0),
- "H5Screate_simple() full_mem_small_ds_sid succeeded");
+ tv_ptr->full_mem_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->full_mem_small_ds_sid != 0), "H5Screate_simple() full_mem_small_ds_sid succeeded");
- tv_ptr->full_file_small_ds_sid =
- H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->full_file_small_ds_sid != 0),
- "H5Screate_simple() full_file_small_ds_sid succeeded");
+ tv_ptr->full_file_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->full_file_small_ds_sid != 0), "H5Screate_simple() full_file_small_ds_sid succeeded");
tv_ptr->mem_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->mem_small_ds_sid != 0),
- "H5Screate_simple() mem_small_ds_sid succeeded");
+ VRFY((tv_ptr->mem_small_ds_sid != 0), "H5Screate_simple() mem_small_ds_sid succeeded");
tv_ptr->file_small_ds_sid_0 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_small_ds_sid_0 != 0),
- "H5Screate_simple() file_small_ds_sid_0 succeeded");
+ VRFY((tv_ptr->file_small_ds_sid_0 != 0), "H5Screate_simple() file_small_ds_sid_0 succeeded");
/* used by checker board tests only */
tv_ptr->file_small_ds_sid_1 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_small_ds_sid_1 != 0),
- "H5Screate_simple() file_small_ds_sid_1 succeeded");
-
- tv_ptr->small_ds_slice_sid =
- H5Screate_simple(tv_ptr->small_rank - 1, &(tv_ptr->dims[1]), NULL);
- VRFY((tv_ptr->small_ds_slice_sid != 0),
- "H5Screate_simple() small_ds_slice_sid succeeded");
+ VRFY((tv_ptr->file_small_ds_sid_1 != 0), "H5Screate_simple() file_small_ds_sid_1 succeeded");
+ tv_ptr->small_ds_slice_sid = H5Screate_simple(tv_ptr->small_rank - 1, &(tv_ptr->dims[1]), NULL);
+ VRFY((tv_ptr->small_ds_slice_sid != 0), "H5Screate_simple() small_ds_slice_sid succeeded");
/* Create large ds dataspaces */
- tv_ptr->full_mem_large_ds_sid =
- H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->full_mem_large_ds_sid != 0),
- "H5Screate_simple() full_mem_large_ds_sid succeeded");
+ tv_ptr->full_mem_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->full_mem_large_ds_sid != 0), "H5Screate_simple() full_mem_large_ds_sid succeeded");
- tv_ptr->full_file_large_ds_sid =
- H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->full_file_large_ds_sid != FAIL),
- "H5Screate_simple() full_file_large_ds_sid succeeded");
+ tv_ptr->full_file_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->full_file_large_ds_sid != FAIL), "H5Screate_simple() full_file_large_ds_sid succeeded");
tv_ptr->mem_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->mem_large_ds_sid != FAIL),
- "H5Screate_simple() mem_large_ds_sid succeeded");
+ VRFY((tv_ptr->mem_large_ds_sid != FAIL), "H5Screate_simple() mem_large_ds_sid succeeded");
tv_ptr->file_large_ds_sid_0 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_large_ds_sid_0 != FAIL),
- "H5Screate_simple() file_large_ds_sid_0 succeeded");
+ VRFY((tv_ptr->file_large_ds_sid_0 != FAIL), "H5Screate_simple() file_large_ds_sid_0 succeeded");
/* used by checker board tests only */
tv_ptr->file_large_ds_sid_1 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_large_ds_sid_1 != FAIL),
- "H5Screate_simple() file_large_ds_sid_1 succeeded");
+ VRFY((tv_ptr->file_large_ds_sid_1 != FAIL), "H5Screate_simple() file_large_ds_sid_1 succeeded");
- tv_ptr->mem_large_ds_process_slice_sid =
- H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ tv_ptr->mem_large_ds_process_slice_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
VRFY((tv_ptr->mem_large_ds_process_slice_sid != FAIL),
"H5Screate_simple() mem_large_ds_process_slice_sid succeeded");
- tv_ptr->file_large_ds_process_slice_sid =
- H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ tv_ptr->file_large_ds_process_slice_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
VRFY((tv_ptr->file_large_ds_process_slice_sid != FAIL),
"H5Screate_simple() file_large_ds_process_slice_sid succeeded");
-
- tv_ptr->large_ds_slice_sid =
- H5Screate_simple(tv_ptr->large_rank - 1, &(tv_ptr->dims[1]), NULL);
- VRFY((tv_ptr->large_ds_slice_sid != 0),
- "H5Screate_simple() large_ds_slice_sid succeeded");
-
+ tv_ptr->large_ds_slice_sid = H5Screate_simple(tv_ptr->large_rank - 1, &(tv_ptr->dims[1]), NULL);
+ VRFY((tv_ptr->large_ds_slice_sid != 0), "H5Screate_simple() large_ds_slice_sid succeeded");
/* if chunk edge size is greater than zero, set up the small and
* large data set creation property lists to specify chunked
* datasets.
*/
- if ( tv_ptr->chunk_edge_size > 0 ) {
+ if (tv_ptr->chunk_edge_size > 0) {
/* Under Lustre (and perhaps other parallel file systems?) we get
* locking delays when two or more processes attempt to access the
@@ -400,9 +355,8 @@ hs_dr_pio_test__setup(const int test_num,
tv_ptr->chunk_dims[0] = 1;
- tv_ptr->chunk_dims[1] = tv_ptr->chunk_dims[2] =
- tv_ptr->chunk_dims[3] =
- tv_ptr->chunk_dims[4] = (hsize_t)(tv_ptr->chunk_edge_size);
+ tv_ptr->chunk_dims[1] = tv_ptr->chunk_dims[2] = tv_ptr->chunk_dims[3] = tv_ptr->chunk_dims[4] =
+ (hsize_t)(tv_ptr->chunk_edge_size);
small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded");
@@ -413,7 +367,6 @@ hs_dr_pio_test__setup(const int test_num,
ret = H5Pset_chunk(small_ds_dcpl_id, tv_ptr->small_rank, tv_ptr->chunk_dims);
VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
-
large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded");
@@ -425,79 +378,61 @@ hs_dr_pio_test__setup(const int test_num,
}
/* create the small dataset */
- tv_ptr->small_dataset = H5Dcreate2(tv_ptr->fid, "small_dataset", tv_ptr->dset_type,
- tv_ptr->file_small_ds_sid_0, H5P_DEFAULT,
- small_ds_dcpl_id, H5P_DEFAULT);
+ tv_ptr->small_dataset =
+ H5Dcreate2(tv_ptr->fid, "small_dataset", tv_ptr->dset_type, tv_ptr->file_small_ds_sid_0, H5P_DEFAULT,
+ small_ds_dcpl_id, H5P_DEFAULT);
VRFY((ret != FAIL), "H5Dcreate2() small_dataset succeeded");
/* create the large dataset */
- tv_ptr->large_dataset = H5Dcreate2(tv_ptr->fid, "large_dataset", tv_ptr->dset_type,
- tv_ptr->file_large_ds_sid_0, H5P_DEFAULT,
- large_ds_dcpl_id, H5P_DEFAULT);
+ tv_ptr->large_dataset =
+ H5Dcreate2(tv_ptr->fid, "large_dataset", tv_ptr->dset_type, tv_ptr->file_large_ds_sid_0, H5P_DEFAULT,
+ large_ds_dcpl_id, H5P_DEFAULT);
VRFY((ret != FAIL), "H5Dcreate2() large_dataset succeeded");
-
/* setup xfer property list */
tv_ptr->xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((tv_ptr->xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
- if(use_collective_io) {
+ if (use_collective_io) {
ret = H5Pset_dxpl_mpio(tv_ptr->xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
}
/* setup selection to write initial data to the small and large data sets */
- tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
- tv_ptr->count[0] = 1;
- tv_ptr->block[0] = 1;
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
- for ( i = 1; i < tv_ptr->large_rank; i++ ) {
+ for (i = 1; i < tv_ptr->large_rank; i++) {
- tv_ptr->start[i] = 0;
+ tv_ptr->start[i] = 0;
tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
}
/* setup selections for writing initial data to the small data set */
- ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid,
- H5S_SELECT_SET,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
+ ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded");
- ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0,
- H5S_SELECT_SET,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
+ ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) suceeded");
- if ( MAINPROCESS ) { /* add an additional slice to the selections */
+ if (MAINPROCESS) { /* add an additional slice to the selections */
tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_size);
- ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid,
- H5S_SELECT_OR,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
- VRFY((ret>= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) suceeded");
-
- ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0,
- H5S_SELECT_OR,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
- VRFY((ret>= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, or) suceeded");
- }
+ ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) suceeded");
+ ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, or) suceeded");
+ }
/* write the initial value of the small data set to file */
ret = H5Dwrite(tv_ptr->small_dataset, tv_ptr->dset_type, tv_ptr->mem_small_ds_sid,
@@ -505,167 +440,124 @@ hs_dr_pio_test__setup(const int test_num,
VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded");
-
/* sync with the other processes before checking data */
- if ( ! use_collective_io ) {
+ if (!use_collective_io) {
mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc==MPI_SUCCESS), "Sync after small dataset writes");
+ VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes");
}
/* read the small data set back to verify that it contains the
* expected data. Note that each process reads in the entire
* data set and verifies it.
*/
- ret = H5Dread(tv_ptr->small_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->full_mem_small_ds_sid,
- tv_ptr->full_file_small_ds_sid,
- tv_ptr->xfer_plist,
- tv_ptr->small_ds_buf_1);
+ ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->full_mem_small_ds_sid,
+ tv_ptr->full_file_small_ds_sid, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1);
VRFY((ret >= 0), "H5Dread() small_dataset initial read succeeded");
-
/* verify that the correct data was written to the small data set */
expected_value = 0;
- mis_match = FALSE;
- ptr_1 = tv_ptr->small_ds_buf_1;
+ mis_match = FALSE;
+ ptr_1 = tv_ptr->small_ds_buf_1;
i = 0;
- for ( i = 0; i < (int)(tv_ptr->small_ds_size); i++ ) {
+ for (i = 0; i < (int)(tv_ptr->small_ds_size); i++) {
- if ( *ptr_1 != expected_value ) {
+ if (*ptr_1 != expected_value) {
mis_match = TRUE;
}
ptr_1++;
expected_value++;
}
- VRFY( (mis_match == FALSE), "small ds init data good.");
-
+ VRFY((mis_match == FALSE), "small ds init data good.");
/* setup selections for writing initial data to the large data set */
tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
- ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid,
- H5S_SELECT_SET,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) suceeded");
- ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0,
- H5S_SELECT_SET,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) suceeded");
/* In passing, setup the process slice dataspaces as well */
- ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_process_slice_sid,
- H5S_SELECT_SET,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
- VRFY((ret >= 0),
- "H5Sselect_hyperslab(mem_large_ds_process_slice_sid, set) suceeded");
-
- ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_process_slice_sid,
- H5S_SELECT_SET,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
- VRFY((ret >= 0),
- "H5Sselect_hyperslab(file_large_ds_process_slice_sid, set) suceeded");
-
- if ( MAINPROCESS ) { /* add an additional slice to the selections */
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_process_slice_sid, H5S_SELECT_SET, tv_ptr->start,
+ tv_ptr->stride, tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_process_slice_sid, set) suceeded");
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_process_slice_sid, H5S_SELECT_SET, tv_ptr->start,
+ tv_ptr->stride, tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_process_slice_sid, set) suceeded");
+
+ if (MAINPROCESS) { /* add an additional slice to the selections */
tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_size);
- ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid,
- H5S_SELECT_OR,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
- VRFY((ret>= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) suceeded");
-
- ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0,
- H5S_SELECT_OR,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
- VRFY((ret>= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, or) suceeded");
- }
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) suceeded");
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, or) suceeded");
+ }
/* write the initial value of the large data set to file */
- ret = H5Dwrite(tv_ptr->large_dataset, tv_ptr->dset_type,
- tv_ptr->mem_large_ds_sid, tv_ptr->file_large_ds_sid_0,
- tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0);
- if ( ret < 0 ) H5Eprint2(H5E_DEFAULT, stderr);
+ ret = H5Dwrite(tv_ptr->large_dataset, tv_ptr->dset_type, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stderr);
VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded");
-
/* sync with the other processes before checking data */
- if ( ! use_collective_io ) {
+ if (!use_collective_io) {
mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc==MPI_SUCCESS), "Sync after large dataset writes");
+ VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes");
}
-
/* read the large data set back to verify that it contains the
* expected data. Note that each process reads in the entire
* data set.
*/
- ret = H5Dread(tv_ptr->large_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->full_mem_large_ds_sid,
- tv_ptr->full_file_large_ds_sid,
- tv_ptr->xfer_plist,
- tv_ptr->large_ds_buf_1);
+ ret = H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->full_mem_large_ds_sid,
+ tv_ptr->full_file_large_ds_sid, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
VRFY((ret >= 0), "H5Dread() large_dataset initial read succeeded");
-
/* verify that the correct data was written to the large data set */
expected_value = 0;
- mis_match = FALSE;
- ptr_1 = tv_ptr->large_ds_buf_1;
+ mis_match = FALSE;
+ ptr_1 = tv_ptr->large_ds_buf_1;
i = 0;
- for ( i = 0; i < (int)(tv_ptr->large_ds_size); i++ ) {
+ for (i = 0; i < (int)(tv_ptr->large_ds_size); i++) {
- if ( *ptr_1 != expected_value ) {
+ if (*ptr_1 != expected_value) {
mis_match = TRUE;
}
ptr_1++;
expected_value++;
}
- VRFY( (mis_match == FALSE), "large ds init data good.");
-
+ VRFY((mis_match == FALSE), "large ds init data good.");
/* sync with the other processes before changing data */
- if ( ! use_collective_io ) {
+ if (!use_collective_io) {
mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc==MPI_SUCCESS), "Sync initial values check");
+ VRFY((mrc == MPI_SUCCESS), "Sync initial values check");
}
return;
} /* hs_dr_pio_test__setup() */
-
/*-------------------------------------------------------------------------
* Function: hs_dr_pio_test__takedown()
*
@@ -682,19 +574,19 @@ hs_dr_pio_test__setup(const int test_num,
#define HS_DR_PIO_TEST__TAKEDOWN__DEBUG 0
static void
-hs_dr_pio_test__takedown( struct hs_dr_pio_test_vars_t * tv_ptr)
+hs_dr_pio_test__takedown(struct hs_dr_pio_test_vars_t *tv_ptr)
{
#if HS_DR_PIO_TEST__TAKEDOWN__DEBUG
const char *fcnName = "hs_dr_pio_test__takedown()";
-#endif /* HS_DR_PIO_TEST__TAKEDOWN__DEBUG */
- int mpi_rank; /* needed by the VRFY macro */
- herr_t ret; /* Generic return value */
+#endif /* HS_DR_PIO_TEST__TAKEDOWN__DEBUG */
+ int mpi_rank; /* needed by the VRFY macro */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
/* Close property lists */
- if ( tv_ptr->xfer_plist != H5P_DEFAULT ) {
+ if (tv_ptr->xfer_plist != H5P_DEFAULT) {
ret = H5Pclose(tv_ptr->xfer_plist);
VRFY((ret != FAIL), "H5Pclose(xfer_plist) succeeded");
}
@@ -756,21 +648,28 @@ hs_dr_pio_test__takedown( struct hs_dr_pio_test_vars_t * tv_ptr)
/* Free memory buffers */
- if ( tv_ptr->small_ds_buf_0 != NULL ) HDfree(tv_ptr->small_ds_buf_0);
- if ( tv_ptr->small_ds_buf_1 != NULL ) HDfree(tv_ptr->small_ds_buf_1);
- if ( tv_ptr->small_ds_buf_2 != NULL ) HDfree(tv_ptr->small_ds_buf_2);
- if ( tv_ptr->small_ds_slice_buf != NULL ) HDfree(tv_ptr->small_ds_slice_buf);
-
- if ( tv_ptr->large_ds_buf_0 != NULL ) HDfree(tv_ptr->large_ds_buf_0);
- if ( tv_ptr->large_ds_buf_1 != NULL ) HDfree(tv_ptr->large_ds_buf_1);
- if ( tv_ptr->large_ds_buf_2 != NULL ) HDfree(tv_ptr->large_ds_buf_2);
- if ( tv_ptr->large_ds_slice_buf != NULL ) HDfree(tv_ptr->large_ds_slice_buf);
+ if (tv_ptr->small_ds_buf_0 != NULL)
+ HDfree(tv_ptr->small_ds_buf_0);
+ if (tv_ptr->small_ds_buf_1 != NULL)
+ HDfree(tv_ptr->small_ds_buf_1);
+ if (tv_ptr->small_ds_buf_2 != NULL)
+ HDfree(tv_ptr->small_ds_buf_2);
+ if (tv_ptr->small_ds_slice_buf != NULL)
+ HDfree(tv_ptr->small_ds_slice_buf);
+
+ if (tv_ptr->large_ds_buf_0 != NULL)
+ HDfree(tv_ptr->large_ds_buf_0);
+ if (tv_ptr->large_ds_buf_1 != NULL)
+ HDfree(tv_ptr->large_ds_buf_1);
+ if (tv_ptr->large_ds_buf_2 != NULL)
+ HDfree(tv_ptr->large_ds_buf_2);
+ if (tv_ptr->large_ds_slice_buf != NULL)
+ HDfree(tv_ptr->large_ds_slice_buf);
return;
} /* hs_dr_pio_test__takedown() */
-
/*-------------------------------------------------------------------------
* Function: contig_hs_dr_pio_test__d2m_l2s()
*
@@ -796,24 +695,23 @@ hs_dr_pio_test__takedown( struct hs_dr_pio_test_vars_t * tv_ptr)
#define CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG 0
static void
-contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
+contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
{
#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__run_test()";
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
- hbool_t mis_match = FALSE;
- int i, j, k, l;
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
size_t n;
- int mpi_rank; /* needed by the VRFY macro */
- uint32_t expected_value;
- uint32_t * ptr_1;
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ int mpi_rank; /* needed by the VRFY macro */
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
-
/* We have already done a H5Sselect_all() on the dataspace
* small_ds_slice_sid in the initialization phase, so no need to
* call H5Sselect_all() again.
@@ -822,16 +720,16 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/* set up start, stride, count, and block -- note that we will
* change start[] so as to read slices of the large cube.
*/
- for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
- tv_ptr->start[i] = 0;
+ tv_ptr->start[i] = 0;
tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- if ( (PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1) ) {
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
tv_ptr->block[i] = 1;
-
- } else {
+ }
+ else {
tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
}
@@ -841,9 +739,7 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- HDfprintf(stdout,
- "%s reading slices from big cube on disk into small cube slice.\n",
- fcnName);
+ HDfprintf(stdout, "%s reading slices from big cube on disk into small cube slice.\n", fcnName);
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
/* in serial versions of this test, we loop through all the dimensions
@@ -853,11 +749,11 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* mpi_rank, and don't itterate over it.
*/
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
i = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
i = 0;
}
@@ -868,21 +764,21 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* test.
*/
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
j = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
j = 0;
}
do {
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
k = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
k = 0;
}
@@ -900,11 +796,11 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
l = 0;
do {
- if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
- (tv_ptr->tests_skipped)++;
-
- } else { /* run the test */
+ (tv_ptr->tests_skipped)++;
+ }
+ else { /* run the test */
tv_ptr->skips = 0; /* reset the skips counter */
@@ -918,15 +814,9 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->start[3] = (hsize_t)l;
tv_ptr->start[4] = 0;
- ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0,
- H5S_SELECT_SET,
- tv_ptr->start_ptr,
- tv_ptr->stride_ptr,
- tv_ptr->count_ptr,
- tv_ptr->block_ptr);
- VRFY((ret != FAIL),
- "H5Sselect_hyperslab(file_large_cube_sid) succeeded");
-
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start_ptr,
+ tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr);
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(file_large_cube_sid) succeeded");
/* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
@@ -934,42 +824,32 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0);
VRFY((check == TRUE), "H5Sselect_shape_same passed");
-
/* Read selection from disk */
#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
- (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
- (int)(tv_ptr->start[4]));
- HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n",
- fcnName,
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]),
+ (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
+ HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n", fcnName,
H5Sget_simple_extent_ndims(tv_ptr->small_ds_slice_sid),
H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
- ret = H5Dread(tv_ptr->large_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->small_ds_slice_sid,
- tv_ptr->file_large_ds_sid_0,
- tv_ptr->xfer_plist,
- tv_ptr->small_ds_slice_buf);
+ ret =
+ H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->small_ds_slice_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_slice_buf);
VRFY((ret >= 0), "H5Dread() slice from large ds succeeded.");
-
/* verify that expected data is retrieved */
- mis_match = FALSE;
- ptr_1 = tv_ptr->small_ds_slice_buf;
+ mis_match = FALSE;
+ ptr_1 = tv_ptr->small_ds_slice_buf;
expected_value = (uint32_t)(
- (i * tv_ptr->edge_size * tv_ptr->edge_size *
- tv_ptr->edge_size * tv_ptr->edge_size) +
+ (i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
- (k * tv_ptr->edge_size * tv_ptr->edge_size) +
- (l * tv_ptr->edge_size));
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
- for ( n = 0; n < tv_ptr->small_ds_slice_size; n++ ) {
+ for (n = 0; n < tv_ptr->small_ds_slice_size; n++) {
- if ( *ptr_1 != expected_value ) {
+ if (*ptr_1 != expected_value) {
mis_match = TRUE;
}
@@ -980,33 +860,25 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
expected_value++;
}
- VRFY((mis_match == FALSE),
- "small slice read from large ds data good.");
+ VRFY((mis_match == FALSE), "small slice read from large ds data good.");
- (tv_ptr->tests_run)++;
+ (tv_ptr->tests_run)++;
}
l++;
(tv_ptr->total_tests)++;
- } while ( ( tv_ptr->large_rank > 2 ) &&
- ( (tv_ptr->small_rank - 1) <= 1 ) &&
- ( l < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
k++;
- } while ( ( tv_ptr->large_rank > 3 ) &&
- ( (tv_ptr->small_rank - 1) <= 2 ) &&
- ( k < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
j++;
- } while ( ( tv_ptr->large_rank > 4 ) &&
- ( (tv_ptr->small_rank - 1) <= 3 ) &&
- ( j < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
return;
} /* contig_hs_dr_pio_test__d2m_l2s() */
-
/*-------------------------------------------------------------------------
* Function: contig_hs_dr_pio_test__d2m_s2l()
*
@@ -1032,21 +904,21 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
#define CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG 0
static void
-contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
+contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
{
#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__d2m_s2l()";
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
- hbool_t mis_match = FALSE;
- int i, j, k, l;
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
size_t n;
- int mpi_rank; /* needed by the VRFY macro */
- size_t start_index;
- size_t stop_index;
- uint32_t expected_value;
- uint32_t * ptr_1;
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ int mpi_rank; /* needed by the VRFY macro */
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
@@ -1056,32 +928,25 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
* data (and only the correct data) is read.
*/
- tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
- tv_ptr->count[0] = 1;
- tv_ptr->block[0] = 1;
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
- for ( i = 1; i < tv_ptr->large_rank; i++ ) {
+ for (i = 1; i < tv_ptr->large_rank; i++) {
- tv_ptr->start[i] = 0;
+ tv_ptr->start[i] = 0;
tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
}
- ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0,
- H5S_SELECT_SET,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
+ ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) suceeded");
-
#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- HDfprintf(stdout,
- "%s reading slices of on disk small data set into slices of big data set.\n",
- fcnName);
+ HDfprintf(stdout, "%s reading slices of on disk small data set into slices of big data set.\n", fcnName);
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
/* zero out the in memory large ds */
@@ -1090,22 +955,21 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/* set up start, stride, count, and block -- note that we will
* change start[] so as to read slices of the large cube.
*/
- for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
- tv_ptr->start[i] = 0;
+ tv_ptr->start[i] = 0;
tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- if ( (PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1) ) {
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
tv_ptr->block[i] = 1;
-
- } else {
+ }
+ else {
tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
}
}
-
/* in serial versions of this test, we loop through all the dimensions
* of the large data set that don't appear in the small data set.
*
@@ -1115,12 +979,11 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
* over it.
*/
-
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
i = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
i = 0;
}
@@ -1131,21 +994,21 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
* test.
*/
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
j = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
j = 0;
}
do {
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
k = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
k = 0;
}
@@ -1163,11 +1026,11 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
l = 0;
do {
- if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
(tv_ptr->tests_skipped)++;
-
- } else { /* run the test */
+ }
+ else { /* run the test */
tv_ptr->skips = 0; /* reset the skips counter */
@@ -1181,15 +1044,9 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->start[3] = (hsize_t)l;
tv_ptr->start[4] = 0;
- ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid,
- H5S_SELECT_SET,
- tv_ptr->start_ptr,
- tv_ptr->stride_ptr,
- tv_ptr->count_ptr,
- tv_ptr->block_ptr);
- VRFY((ret != FAIL),
- "H5Sselect_hyperslab(mem_large_ds_sid) succeeded");
-
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start_ptr,
+ tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr);
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(mem_large_ds_sid) succeeded");
/* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
@@ -1197,57 +1054,46 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
VRFY((check == TRUE), "H5Sselect_shape_same passed");
-
/* Read selection from disk */
#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
- (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
- (int)(tv_ptr->start[4]));
- HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
- fcnName, tv_ptr->mpi_rank,
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]),
+ (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid),
H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0));
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
- ret = H5Dread(tv_ptr->small_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->mem_large_ds_sid,
- tv_ptr->file_small_ds_sid_0,
- tv_ptr->xfer_plist,
- tv_ptr->large_ds_buf_1);
+ ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
/* verify that the expected data and only the
* expected data was read.
*/
- ptr_1 = tv_ptr->large_ds_buf_1;
- expected_value = (uint32_t)
- ((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
- start_index = (size_t)(
- (i * tv_ptr->edge_size * tv_ptr->edge_size *
- tv_ptr->edge_size * tv_ptr->edge_size) +
+ ptr_1 = tv_ptr->large_ds_buf_1;
+ expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
+ start_index = (size_t)(
+ (i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
- (k * tv_ptr->edge_size * tv_ptr->edge_size) +
- (l * tv_ptr->edge_size));
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
- HDassert( start_index < stop_index );
- HDassert( stop_index <= tv_ptr->large_ds_size );
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= tv_ptr->large_ds_size);
- for ( n = 0; n < tv_ptr->large_ds_size; n++ ) {
+ for (n = 0; n < tv_ptr->large_ds_size; n++) {
- if ( ( n >= start_index ) && ( n <= stop_index ) ) {
+ if ((n >= start_index) && (n <= stop_index)) {
- if ( *ptr_1 != expected_value ) {
+ if (*ptr_1 != expected_value) {
mis_match = TRUE;
}
expected_value++;
+ }
+ else {
- } else {
-
- if ( *ptr_1 != 0 ) {
+ if (*ptr_1 != 0) {
mis_match = TRUE;
}
@@ -1258,8 +1104,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_1++;
}
- VRFY((mis_match == FALSE),
- "small slice read from large ds data good.");
+ VRFY((mis_match == FALSE), "small slice read from large ds data good.");
(tv_ptr->tests_run)++;
}
@@ -1268,23 +1113,16 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
(tv_ptr->total_tests)++;
- } while ( ( tv_ptr->large_rank > 2 ) &&
- ( (tv_ptr->small_rank - 1) <= 1 ) &&
- ( l < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
k++;
- } while ( ( tv_ptr->large_rank > 3 ) &&
- ( (tv_ptr->small_rank - 1) <= 2 ) &&
- ( k < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
j++;
- } while ( ( tv_ptr->large_rank > 4 ) &&
- ( (tv_ptr->small_rank - 1) <= 3 ) &&
- ( j < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
return;
} /* contig_hs_dr_pio_test__d2m_s2l() */
-
/*-------------------------------------------------------------------------
* Function: contig_hs_dr_pio_test__m2d_l2s()
*
@@ -1312,26 +1150,25 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
#define CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG 0
static void
-contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
+contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
{
#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__m2d_l2s()";
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
- hbool_t mis_match = FALSE;
- int i, j, k, l;
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
size_t n;
- int mpi_rank; /* needed by the VRFY macro */
- size_t start_index;
- size_t stop_index;
- uint32_t expected_value;
- uint32_t * ptr_1;
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ int mpi_rank; /* needed by the VRFY macro */
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
-
/* now we go in the opposite direction, verifying that we can write
* from memory to file using selections of different rank that
* H5Sselect_shape_same() views as being of the same shape.
@@ -1343,49 +1180,40 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* the memory and file selections.
*/
- tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
- tv_ptr->count[0] = 1;
- tv_ptr->block[0] = 1;
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
- for ( i = 1; i < tv_ptr->large_rank; i++ ) {
+ for (i = 1; i < tv_ptr->large_rank; i++) {
- tv_ptr->start[i] = 0;
+ tv_ptr->start[i] = 0;
tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
}
- ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0,
- H5S_SELECT_SET,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
+ ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) suceeded");
- ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid,
- H5S_SELECT_SET,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
+ ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded");
-
/* set up start, stride, count, and block -- note that we will
* change start[] so as to read slices of the large cube.
*/
- for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
- tv_ptr->start[i] = 0;
+ tv_ptr->start[i] = 0;
tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- if ( (PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1) ) {
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
tv_ptr->block[i] = 1;
-
- } else {
+ }
+ else {
tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
}
@@ -1394,11 +1222,8 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/* zero out the in memory small ds */
HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
-
#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
- HDfprintf(stdout,
- "%s writing slices from big ds to slices of small ds on disk.\n",
- fcnName);
+ HDfprintf(stdout, "%s writing slices from big ds to slices of small ds on disk.\n", fcnName);
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
/* in serial versions of this test, we loop through all the dimensions
@@ -1410,12 +1235,11 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* over it.
*/
-
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
i = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
i = 0;
}
@@ -1426,22 +1250,22 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* test.
*/
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
j = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
j = 0;
}
j = 0;
do {
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
k = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
k = 0;
}
@@ -1459,11 +1283,11 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
l = 0;
do {
- if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
(tv_ptr->tests_skipped)++;
-
- } else { /* run the test */
+ }
+ else { /* run the test */
tv_ptr->skips = 0; /* reset the skips counter */
@@ -1473,12 +1297,8 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
*/
/* zero out this rank's slice of the on disk small data set */
- ret = H5Dwrite(tv_ptr->small_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->mem_small_ds_sid,
- tv_ptr->file_small_ds_sid_0,
- tv_ptr->xfer_plist,
- tv_ptr->small_ds_buf_2);
+ ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_2);
VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded.");
/* select the portion of the in memory large cube from which we
@@ -1490,15 +1310,9 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->start[3] = (hsize_t)l;
tv_ptr->start[4] = 0;
- ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid,
- H5S_SELECT_SET,
- tv_ptr->start_ptr,
- tv_ptr->stride_ptr,
- tv_ptr->count_ptr,
- tv_ptr->block_ptr);
- VRFY((ret >= 0),
- "H5Sselect_hyperslab() mem_large_ds_sid succeeded.");
-
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start_ptr,
+ tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr);
+ VRFY((ret >= 0), "H5Sselect_hyperslab() mem_large_ds_sid succeeded.");
/* verify that H5Sselect_shape_same() reports the in
* memory slice through the cube selection and the
@@ -1507,70 +1321,54 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
VRFY((check == TRUE), "H5Sselect_shape_same passed.");
-
/* write the slice from the in memory large data set to the
* slice of the on disk small dataset. */
#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
- (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
- (int)(tv_ptr->start[4]));
- HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
- fcnName, tv_ptr->mpi_rank,
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]),
+ (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid),
H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0));
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
- ret = H5Dwrite(tv_ptr->small_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->mem_large_ds_sid,
- tv_ptr->file_small_ds_sid_0,
- tv_ptr->xfer_plist,
- tv_ptr->large_ds_buf_0);
+ ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0);
VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded.");
-
/* read the on disk square into memory */
- ret = H5Dread(tv_ptr->small_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->mem_small_ds_sid,
- tv_ptr->file_small_ds_sid_0,
- tv_ptr->xfer_plist,
- tv_ptr->small_ds_buf_1);
+ ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1);
VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
-
/* verify that expected data is retrieved */
mis_match = FALSE;
- ptr_1 = tv_ptr->small_ds_buf_1;
+ ptr_1 = tv_ptr->small_ds_buf_1;
expected_value = (uint32_t)(
- (i * tv_ptr->edge_size * tv_ptr->edge_size *
- tv_ptr->edge_size * tv_ptr->edge_size) +
+ (i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
- (k * tv_ptr->edge_size * tv_ptr->edge_size) +
- (l * tv_ptr->edge_size));
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
start_index = (size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size;
- stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
+ stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
- HDassert( start_index < stop_index );
- HDassert( stop_index <= tv_ptr->small_ds_size );
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= tv_ptr->small_ds_size);
- for ( n = 0; n < tv_ptr->small_ds_size; n++ ) {
+ for (n = 0; n < tv_ptr->small_ds_size; n++) {
- if ( ( n >= start_index ) && ( n <= stop_index ) ) {
+ if ((n >= start_index) && (n <= stop_index)) {
- if ( *ptr_1 != expected_value ) {
+ if (*ptr_1 != expected_value) {
mis_match = TRUE;
}
expected_value++;
+ }
+ else {
- } else {
-
- if ( *ptr_1 != 0 ) {
+ if (*ptr_1 != 0) {
mis_match = TRUE;
}
@@ -1581,33 +1379,25 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_1++;
}
- VRFY((mis_match == FALSE),
- "small slice write from large ds data good.");
+ VRFY((mis_match == FALSE), "small slice write from large ds data good.");
(tv_ptr->tests_run)++;
}
l++;
- (tv_ptr->total_tests)++;
+ (tv_ptr->total_tests)++;
- } while ( ( tv_ptr->large_rank > 2 ) &&
- ( (tv_ptr->small_rank - 1) <= 1 ) &&
- ( l < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
k++;
- } while ( ( tv_ptr->large_rank > 3 ) &&
- ( (tv_ptr->small_rank - 1) <= 2 ) &&
- ( k < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
j++;
- } while ( ( tv_ptr->large_rank > 4 ) &&
- ( (tv_ptr->small_rank - 1) <= 3 ) &&
- ( j < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
return;
} /* contig_hs_dr_pio_test__m2d_l2s() */
-
/*-------------------------------------------------------------------------
* Function: contig_hs_dr_pio_test__m2d_s2l()
*
@@ -1637,21 +1427,21 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
#define CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG 0
static void
-contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
+contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
{
#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__m2d_s2l()";
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- hbool_t mis_match = FALSE;
- int i, j, k, l;
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
size_t n;
- int mpi_rank; /* needed by the VRFY macro */
- size_t start_index;
- size_t stop_index;
- uint32_t expected_value;
- uint32_t * ptr_1;
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ int mpi_rank; /* needed by the VRFY macro */
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
@@ -1667,42 +1457,37 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/* select the slice of the in memory small data set associated with
* the process's mpi rank.
*/
- tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
- tv_ptr->count[0] = 1;
- tv_ptr->block[0] = 1;
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
- for ( i = 1; i < tv_ptr->large_rank; i++ ) {
+ for (i = 1; i < tv_ptr->large_rank; i++) {
- tv_ptr->start[i] = 0;
+ tv_ptr->start[i] = 0;
tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
}
- ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid,
- H5S_SELECT_SET,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
+ ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded");
-
/* set up start, stride, count, and block -- note that we will
* change start[] so as to write slices of the small data set to
* slices of the large data set.
*/
- for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
- tv_ptr->start[i] = 0;
+ tv_ptr->start[i] = 0;
tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- if ( (PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1) ) {
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
tv_ptr->block[i] = 1;
-
- } else {
+ }
+ else {
tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
}
@@ -1712,16 +1497,14 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- HDfprintf(stdout,
- "%s writing process slices of small ds to slices of large ds on disk.\n",
- fcnName);
+ HDfprintf(stdout, "%s writing process slices of small ds to slices of large ds on disk.\n", fcnName);
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
i = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
i = 0;
}
@@ -1732,21 +1515,21 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
* test.
*/
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
j = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
j = 0;
}
do {
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
k = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
k = 0;
}
@@ -1764,7 +1547,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
l = 0;
do {
- if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
(tv_ptr->tests_skipped)++;
@@ -1775,18 +1558,15 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->start[3] = (hsize_t)l;
tv_ptr->start[4] = 0;
- HDfprintf(stdout,
- "%s:%d: skipping test with start = %d %d %d %d %d.\n",
- fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
- (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
- (int)(tv_ptr->start[4]));
- HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
- fcnName, tv_ptr->mpi_rank,
+ HDfprintf(stdout, "%s:%d: skipping test with start = %d %d %d %d %d.\n", fcnName,
+ (int)(tv_ptr->mpi_rank), (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
+ (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid),
H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- } else { /* run the test */
+ }
+ else { /* run the test */
tv_ptr->skips = 0; /* reset the skips counter */
@@ -1799,15 +1579,11 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
* Note that this will leave one slice with its original data
* as there is one more slice than processes.
*/
- ret = H5Dwrite(tv_ptr->large_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->large_ds_slice_sid,
- tv_ptr->file_large_ds_process_slice_sid,
- tv_ptr->xfer_plist,
+ ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->large_ds_slice_sid,
+ tv_ptr->file_large_ds_process_slice_sid, tv_ptr->xfer_plist,
tv_ptr->large_ds_buf_2);
VRFY((ret != FAIL), "H5Dwrite() to zero large ds suceeded");
-
/* select the portion of the in memory large cube to which we
* are going to write data.
*/
@@ -1817,15 +1593,9 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->start[3] = (hsize_t)l;
tv_ptr->start[4] = 0;
- ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0,
- H5S_SELECT_SET,
- tv_ptr->start_ptr,
- tv_ptr->stride_ptr,
- tv_ptr->count_ptr,
- tv_ptr->block_ptr);
- VRFY((ret != FAIL),
- "H5Sselect_hyperslab() target large ds slice succeeded");
-
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start_ptr,
+ tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr);
+ VRFY((ret != FAIL), "H5Sselect_hyperslab() target large ds slice succeeded");
/* verify that H5Sselect_shape_same() reports the in
* memory small data set slice selection and the
@@ -1835,78 +1605,59 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_0);
VRFY((check == TRUE), "H5Sselect_shape_same passed");
-
/* write the small data set slice from memory to the
* target slice of the disk data set
*/
#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
- (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
- (int)(tv_ptr->start[4]));
- HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
- fcnName, tv_ptr->mpi_rank,
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]),
+ (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid),
H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- ret = H5Dwrite(tv_ptr->large_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->mem_small_ds_sid,
- tv_ptr->file_large_ds_sid_0,
- tv_ptr->xfer_plist,
- tv_ptr->small_ds_buf_0);
- VRFY((ret != FAIL),
- "H5Dwrite of small ds slice to large ds succeeded");
-
+ ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0);
+ VRFY((ret != FAIL), "H5Dwrite of small ds slice to large ds succeeded");
/* read this processes slice on the on disk large
* data set into memory.
*/
- ret = H5Dread(tv_ptr->large_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->mem_large_ds_process_slice_sid,
- tv_ptr->file_large_ds_process_slice_sid,
- tv_ptr->xfer_plist,
- tv_ptr->large_ds_buf_1);
- VRFY((ret != FAIL),
- "H5Dread() of process slice of large ds succeeded");
-
+ ret = H5Dread(
+ tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_process_slice_sid,
+ tv_ptr->file_large_ds_process_slice_sid, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
+ VRFY((ret != FAIL), "H5Dread() of process slice of large ds succeeded");
/* verify that the expected data and only the
* expected data was read.
*/
- ptr_1 = tv_ptr->large_ds_buf_1;
- expected_value = (uint32_t)
- ((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
-
- start_index = (size_t)
- ((i * tv_ptr->edge_size * tv_ptr->edge_size *
- tv_ptr->edge_size * tv_ptr->edge_size) +
- (j * tv_ptr->edge_size * tv_ptr->edge_size *
- tv_ptr->edge_size) +
- (k * tv_ptr->edge_size * tv_ptr->edge_size) +
- (l * tv_ptr->edge_size));
+ ptr_1 = tv_ptr->large_ds_buf_1;
+ expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
+
+ start_index = (size_t)(
+ (i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
- HDassert( start_index < stop_index );
- HDassert( stop_index < tv_ptr->large_ds_size );
+ HDassert(start_index < stop_index);
+ HDassert(stop_index < tv_ptr->large_ds_size);
- for ( n = 0; n < tv_ptr->large_ds_size; n++ ) {
+ for (n = 0; n < tv_ptr->large_ds_size; n++) {
- if ( ( n >= start_index ) && ( n <= stop_index ) ) {
+ if ((n >= start_index) && (n <= stop_index)) {
- if ( *ptr_1 != expected_value ) {
+ if (*ptr_1 != expected_value) {
mis_match = TRUE;
}
expected_value++;
+ }
+ else {
- } else {
-
- if ( *ptr_1 != 0 ) {
+ if (*ptr_1 != 0) {
mis_match = TRUE;
}
@@ -1916,8 +1667,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_1++;
}
- VRFY((mis_match == FALSE),
- "small ds slice write to large ds slice data good.");
+ VRFY((mis_match == FALSE), "small ds slice write to large ds slice data good.");
(tv_ptr->tests_run)++;
}
@@ -1926,23 +1676,16 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
(tv_ptr->total_tests)++;
- } while ( ( tv_ptr->large_rank > 2 ) &&
- ( (tv_ptr->small_rank - 1) <= 1 ) &&
- ( l < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
k++;
- } while ( ( tv_ptr->large_rank > 3 ) &&
- ( (tv_ptr->small_rank - 1) <= 2 ) &&
- ( k < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
j++;
- } while ( ( tv_ptr->large_rank > 4 ) &&
- ( (tv_ptr->small_rank - 1) <= 3 ) &&
- ( j < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
return;
} /* contig_hs_dr_pio_test__m2d_s2l() */
-
/*-------------------------------------------------------------------------
* Function: contig_hs_dr_pio_test__run_test()
*
@@ -1959,25 +1702,15 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
#define CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG 0
static void
-contig_hs_dr_pio_test__run_test(const int test_num,
- const int edge_size,
- const int chunk_edge_size,
- const int small_rank,
- const int large_rank,
- const hbool_t use_collective_io,
- const hid_t dset_type,
- int express_test,
- int * skips_ptr,
- int max_skips,
- int64_t * total_tests_ptr,
- int64_t * tests_run_ptr,
- int64_t * tests_skipped_ptr)
+contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int chunk_edge_size,
+ const int small_rank, const int large_rank, const hbool_t use_collective_io,
+ const hid_t dset_type, int express_test, int *skips_ptr, int max_skips,
+ int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr)
{
#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__run_test()";
#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
- struct hs_dr_pio_test_vars_t test_vars =
- {
+ struct hs_dr_pio_test_vars_t test_vars = {
/* int mpi_size = */ -1,
/* int mpi_rank = */ -1,
/* MPI_Comm mpi_comm = */ MPI_COMM_NULL,
@@ -1999,7 +1732,7 @@ contig_hs_dr_pio_test__run_test(const int test_num,
/* uint32_t * large_ds_slice_buf = */ NULL,
/* int small_ds_offset = */ -1,
/* int large_ds_offset = */ -1,
- /* hid_t fid = */ -1, /* HDF5 file ID */
+ /* hid_t fid = */ -1, /* HDF5 file ID */
/* hid_t xfer_plist = */ H5P_DEFAULT,
/* hid_t full_mem_small_ds_sid = */ -1,
/* hid_t full_file_small_ds_sid = */ -1,
@@ -2015,18 +1748,18 @@ contig_hs_dr_pio_test__run_test(const int test_num,
/* hid_t file_large_ds_process_slice_sid = */ -1,
/* hid_t mem_large_ds_process_slice_sid = */ -1,
/* hid_t large_ds_slice_sid = */ -1,
- /* hid_t small_dataset = */ -1, /* Dataset ID */
- /* hid_t large_dataset = */ -1, /* Dataset ID */
+ /* hid_t small_dataset = */ -1, /* Dataset ID */
+ /* hid_t large_dataset = */ -1, /* Dataset ID */
/* size_t small_ds_size = */ 1,
/* size_t small_ds_slice_size = */ 1,
/* size_t large_ds_size = */ 1,
/* size_t large_ds_slice_size = */ 1,
- /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0},
- /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0},
- /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0},
- /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0},
- /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0},
- /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0},
+ /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
/* hsize_t * start_ptr = */ NULL,
/* hsize_t * stride_ptr = */ NULL,
/* hsize_t * count_ptr = */ NULL,
@@ -2035,22 +1768,19 @@ contig_hs_dr_pio_test__run_test(const int test_num,
/* int max_skips = */ 0,
/* int64_t total_tests = */ 0,
/* int64_t tests_run = */ 0,
- /* int64_t tests_skipped = */ 0
- };
- struct hs_dr_pio_test_vars_t * tv_ptr = &test_vars;
+ /* int64_t tests_skipped = */ 0};
+ struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars;
- hs_dr_pio_test__setup(test_num, edge_size, -1, chunk_edge_size,
- small_rank, large_rank, use_collective_io,
+ hs_dr_pio_test__setup(test_num, edge_size, -1, chunk_edge_size, small_rank, large_rank, use_collective_io,
dset_type, express_test, tv_ptr);
/* initialize skips & max_skips */
- tv_ptr->skips = *skips_ptr;
+ tv_ptr->skips = *skips_ptr;
tv_ptr->max_skips = max_skips;
#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if ( MAINPROCESS ) {
- HDfprintf(stdout, "test %d: small rank = %d, large rank = %d.\n",
- test_num, small_rank, large_rank);
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: small rank = %d, large rank = %d.\n", test_num, small_rank, large_rank);
HDfprintf(stdout, "test %d: Initialization complete.\n", test_num);
}
#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
@@ -2065,26 +1795,24 @@ contig_hs_dr_pio_test__run_test(const int test_num,
*/
#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if ( MAINPROCESS ) {
+ if (MAINPROCESS) {
HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_l2s.\n", test_num);
}
#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
contig_hs_dr_pio_test__d2m_l2s(tv_ptr);
-
/* Second, read slices of the on disk small data set into slices
* through the in memory large data set, and verify that the correct
* data (and only the correct data) is read.
*/
#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if ( MAINPROCESS ) {
+ if (MAINPROCESS) {
HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_s2l.\n", test_num);
}
#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
contig_hs_dr_pio_test__d2m_s2l(tv_ptr);
-
/* now we go in the opposite direction, verifying that we can write
* from memory to file using selections of different rank that
* H5Sselect_shape_same() views as being of the same shape.
@@ -2097,13 +1825,12 @@ contig_hs_dr_pio_test__run_test(const int test_num,
*/
#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if ( MAINPROCESS ) {
+ if (MAINPROCESS) {
HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_l2s.\n", test_num);
}
#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
contig_hs_dr_pio_test__m2d_l2s(tv_ptr);
-
/* Now write the contents of the process's slice of the in memory
* small data set to slices of the on disk large data set. After
* each write, read the process's slice of the large data set back
@@ -2113,25 +1840,24 @@ contig_hs_dr_pio_test__run_test(const int test_num,
*/
#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if ( MAINPROCESS ) {
+ if (MAINPROCESS) {
HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_s2l.\n", test_num);
}
#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
contig_hs_dr_pio_test__m2d_s2l(tv_ptr);
#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if ( MAINPROCESS ) {
- HDfprintf(stdout,
- "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n",
- test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped),
- (long long)(tv_ptr->total_tests));
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n",
+ test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped),
+ (long long)(tv_ptr->total_tests));
}
#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
hs_dr_pio_test__takedown(tv_ptr);
#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if ( MAINPROCESS ) {
+ if (MAINPROCESS) {
HDfprintf(stdout, "test %d: Takedown complete.\n", test_num);
}
#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
@@ -2145,7 +1871,6 @@ contig_hs_dr_pio_test__run_test(const int test_num,
} /* contig_hs_dr_pio_test__run_test() */
-
/*-------------------------------------------------------------------------
* Function: contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
*
@@ -2164,28 +1889,28 @@ contig_hs_dr_pio_test__run_test(const int test_num,
static void
contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
{
- int express_test;
- int local_express_test;
- int mpi_rank = -1;
- int mpi_size;
- int test_num = 0;
- int edge_size;
- int chunk_edge_size = 0;
- int small_rank;
- int large_rank;
- int mpi_result;
- int skips = 0;
- int max_skips = 0;
+ int express_test;
+ int local_express_test;
+ int mpi_rank = -1;
+ int mpi_size;
+ int test_num = 0;
+ int edge_size;
+ int chunk_edge_size = 0;
+ int small_rank;
+ int large_rank;
+ int mpi_result;
+ int skips = 0;
+ int max_skips = 0;
/* The following table list the number of sub-tests skipped between
* each test that is actually executed as a function of the express
* test level. Note that any value in excess of 4880 will cause all
* sub tests to be skipped.
*/
- int max_skips_tbl[4] = {0, 4, 64, 1024};
- hid_t dset_type = H5T_NATIVE_UINT;
- int64_t total_tests = 0;
- int64_t tests_run = 0;
- int64_t tests_skipped = 0;
+ int max_skips_tbl[4] = {0, 4, 64, 1024};
+ hid_t dset_type = H5T_NATIVE_UINT;
+ int64_t total_tests = 0;
+ int64_t tests_run = 0;
+ int64_t tests_skipped = 0;
HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
@@ -2196,45 +1921,33 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
local_express_test = GetTestExpress();
- mpi_result = MPI_Allreduce((void *)&local_express_test,
- (void *)&express_test,
- 1,
- MPI_INT,
- MPI_MAX,
+ mpi_result = MPI_Allreduce((void *)&local_express_test, (void *)&express_test, 1, MPI_INT, MPI_MAX,
MPI_COMM_WORLD);
- VRFY((mpi_result == MPI_SUCCESS ), "MPI_Allreduce(0) succeeded");
+ VRFY((mpi_result == MPI_SUCCESS), "MPI_Allreduce(0) succeeded");
- if ( local_express_test < 0 ) {
+ if (local_express_test < 0) {
max_skips = max_skips_tbl[0];
- } else if ( local_express_test > 3 ) {
+ }
+ else if (local_express_test > 3) {
max_skips = max_skips_tbl[3];
- } else {
+ }
+ else {
max_skips = max_skips_tbl[local_express_test];
}
- for ( large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++ ) {
+ for (large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++) {
- for ( small_rank = 2; small_rank < large_rank; small_rank++ ) {
+ for (small_rank = 2; small_rank < large_rank; small_rank++) {
- switch(sstest_type){
+ switch (sstest_type) {
case IND_CONTIG:
/* contiguous data set, independent I/O */
chunk_edge_size = 0;
- contig_hs_dr_pio_test__run_test(test_num,
- edge_size,
- chunk_edge_size,
- small_rank,
- large_rank,
- FALSE,
- dset_type,
- express_test,
- &skips,
- max_skips,
- &total_tests,
- &tests_run,
- &tests_skipped);
+ contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank,
+ large_rank, FALSE, dset_type, express_test, &skips,
+ max_skips, &total_tests, &tests_run, &tests_skipped);
test_num++;
break;
/* end of case IND_CONTIG */
@@ -2243,19 +1956,9 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/* contiguous data set, collective I/O */
chunk_edge_size = 0;
- contig_hs_dr_pio_test__run_test(test_num,
- edge_size,
- chunk_edge_size,
- small_rank,
- large_rank,
- TRUE,
- dset_type,
- express_test,
- &skips,
- max_skips,
- &total_tests,
- &tests_run,
- &tests_skipped);
+ contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank,
+ large_rank, TRUE, dset_type, express_test, &skips,
+ max_skips, &total_tests, &tests_run, &tests_skipped);
test_num++;
break;
/* end of case COL_CONTIG */
@@ -2264,19 +1967,9 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/* chunked data set, independent I/O */
chunk_edge_size = 5;
- contig_hs_dr_pio_test__run_test(test_num,
- edge_size,
- chunk_edge_size,
- small_rank,
- large_rank,
- FALSE,
- dset_type,
- express_test,
- &skips,
- max_skips,
- &total_tests,
- &tests_run,
- &tests_skipped);
+ contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank,
+ large_rank, FALSE, dset_type, express_test, &skips,
+ max_skips, &total_tests, &tests_run, &tests_skipped);
test_num++;
break;
/* end of case IND_CHUNKED */
@@ -2285,19 +1978,9 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/* chunked data set, collective I/O */
chunk_edge_size = 5;
- contig_hs_dr_pio_test__run_test(test_num,
- edge_size,
- chunk_edge_size,
- small_rank,
- large_rank,
- TRUE,
- dset_type,
- express_test,
- &skips,
- max_skips,
- &total_tests,
- &tests_run,
- &tests_skipped);
+ contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank,
+ large_rank, TRUE, dset_type, express_test, &skips,
+ max_skips, &total_tests, &tests_run, &tests_skipped);
test_num++;
break;
/* end of case COL_CHUNKED */
@@ -2308,15 +1991,15 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
} /* end of switch(sstest_type) */
#if CONTIG_HS_DR_PIO_TEST__DEBUG
- if ( ( MAINPROCESS ) && ( tests_skipped > 0 ) ) {
- HDfprintf(stdout, " run/skipped/total = %lld/%lld/%lld.\n",
- tests_run, tests_skipped, total_tests);
+ if ((MAINPROCESS) && (tests_skipped > 0)) {
+ HDfprintf(stdout, " run/skipped/total = %lld/%lld/%lld.\n", tests_run, tests_skipped,
+ total_tests);
}
#endif /* CONTIG_HS_DR_PIO_TEST__DEBUG */
}
}
- if ( ( MAINPROCESS ) && ( tests_skipped > 0 ) ) {
+ if ((MAINPROCESS) && (tests_skipped > 0)) {
HDfprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n",
tests_skipped, total_tests);
}
@@ -2325,7 +2008,6 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
} /* contig_hs_dr_pio_test() */
-
/****************************************************************
**
** ckrbrd_hs_dr_pio_test__slct_ckrbrd():
@@ -2352,53 +2034,47 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
#define CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG 0
static void
-ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
- const hid_t tgt_sid,
- const int tgt_rank,
- const int edge_size,
- const int checker_edge_size,
- const int sel_rank,
+ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, const hid_t tgt_sid, const int tgt_rank,
+ const int edge_size, const int checker_edge_size, const int sel_rank,
hsize_t sel_start[])
{
#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
- const char * fcnName = "ckrbrd_hs_dr_pio_test__slct_ckrbrd():";
+ const char *fcnName = "ckrbrd_hs_dr_pio_test__slct_ckrbrd():";
#endif
- hbool_t first_selection = TRUE;
- int i, j, k, l, m;
- int n_cube_offset;
- int sel_offset;
- const int test_max_rank = PAR_SS_DR_MAX_RANK; /* must update code if */
- /* this changes */
- hsize_t base_count;
- hsize_t offset_count;
- hsize_t start[PAR_SS_DR_MAX_RANK];
- hsize_t stride[PAR_SS_DR_MAX_RANK];
- hsize_t count[PAR_SS_DR_MAX_RANK];
- hsize_t block[PAR_SS_DR_MAX_RANK];
- herr_t ret; /* Generic return value */
-
- HDassert( edge_size >= 6 );
- HDassert( 0 < checker_edge_size );
- HDassert( checker_edge_size <= edge_size );
- HDassert( 0 < sel_rank );
- HDassert( sel_rank <= tgt_rank );
- HDassert( tgt_rank <= test_max_rank );
- HDassert( test_max_rank <= PAR_SS_DR_MAX_RANK );
+ hbool_t first_selection = TRUE;
+ int i, j, k, l, m;
+ int n_cube_offset;
+ int sel_offset;
+ const int test_max_rank = PAR_SS_DR_MAX_RANK; /* must update code if */
+ /* this changes */
+ hsize_t base_count;
+ hsize_t offset_count;
+ hsize_t start[PAR_SS_DR_MAX_RANK];
+ hsize_t stride[PAR_SS_DR_MAX_RANK];
+ hsize_t count[PAR_SS_DR_MAX_RANK];
+ hsize_t block[PAR_SS_DR_MAX_RANK];
+ herr_t ret; /* Generic return value */
+
+ HDassert(edge_size >= 6);
+ HDassert(0 < checker_edge_size);
+ HDassert(checker_edge_size <= edge_size);
+ HDassert(0 < sel_rank);
+ HDassert(sel_rank <= tgt_rank);
+ HDassert(tgt_rank <= test_max_rank);
+ HDassert(test_max_rank <= PAR_SS_DR_MAX_RANK);
sel_offset = test_max_rank - sel_rank;
- HDassert( sel_offset >= 0 );
+ HDassert(sel_offset >= 0);
n_cube_offset = test_max_rank - tgt_rank;
- HDassert( n_cube_offset >= 0 );
- HDassert( n_cube_offset <= sel_offset );
+ HDassert(n_cube_offset >= 0);
+ HDassert(n_cube_offset <= sel_offset);
#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
- HDfprintf(stdout, "%s:%d: edge_size/checker_edge_size = %d/%d\n",
- fcnName, mpi_rank, edge_size, checker_edge_size);
- HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n",
- fcnName, mpi_rank, sel_rank, sel_offset);
- HDfprintf(stdout, "%s:%d: tgt_rank/n_cube_offset = %d/%d.\n",
- fcnName, mpi_rank, tgt_rank, n_cube_offset);
+ HDfprintf(stdout, "%s:%d: edge_size/checker_edge_size = %d/%d\n", fcnName, mpi_rank, edge_size,
+ checker_edge_size);
+ HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n", fcnName, mpi_rank, sel_rank, sel_offset);
+ HDfprintf(stdout, "%s:%d: tgt_rank/n_cube_offset = %d/%d.\n", fcnName, mpi_rank, tgt_rank, n_cube_offset);
#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
/* First, compute the base count (which assumes start == 0
@@ -2416,14 +2092,14 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
base_count = (hsize_t)(edge_size / (checker_edge_size * 2));
- if ( (edge_size % (checker_edge_size * 2)) > 0 ) {
+ if ((edge_size % (checker_edge_size * 2)) > 0) {
base_count++;
}
offset_count = (hsize_t)((edge_size - checker_edge_size) / (checker_edge_size * 2));
- if ( ((edge_size - checker_edge_size) % (checker_edge_size * 2)) > 0 ) {
+ if (((edge_size - checker_edge_size) % (checker_edge_size * 2)) > 0) {
offset_count++;
}
@@ -2433,217 +2109,187 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
* the checker board.
*/
i = 0;
- while ( i < n_cube_offset ) {
+ while (i < n_cube_offset) {
/* these values should never be used */
- start[i] = 0;
+ start[i] = 0;
stride[i] = 0;
- count[i] = 0;
- block[i] = 0;
+ count[i] = 0;
+ block[i] = 0;
i++;
}
- while ( i < sel_offset ) {
+ while (i < sel_offset) {
- start[i] = sel_start[i];
+ start[i] = sel_start[i];
stride[i] = (hsize_t)(2 * edge_size);
- count[i] = 1;
- block[i] = 1;
+ count[i] = 1;
+ block[i] = 1;
i++;
}
- while ( i < test_max_rank ) {
+ while (i < test_max_rank) {
stride[i] = (hsize_t)(2 * checker_edge_size);
- block[i] = (hsize_t)checker_edge_size;
+ block[i] = (hsize_t)checker_edge_size;
i++;
}
i = 0;
do {
- if ( 0 >= sel_offset ) {
+ if (0 >= sel_offset) {
- if ( i == 0 ) {
+ if (i == 0) {
start[0] = 0;
count[0] = base_count;
-
- } else {
+ }
+ else {
start[0] = (hsize_t)checker_edge_size;
count[0] = offset_count;
-
}
}
j = 0;
do {
- if ( 1 >= sel_offset ) {
+ if (1 >= sel_offset) {
- if ( j == 0 ) {
+ if (j == 0) {
start[1] = 0;
count[1] = base_count;
-
- } else {
+ }
+ else {
start[1] = (hsize_t)checker_edge_size;
count[1] = offset_count;
-
}
}
k = 0;
do {
- if ( 2 >= sel_offset ) {
+ if (2 >= sel_offset) {
- if ( k == 0 ) {
+ if (k == 0) {
start[2] = 0;
count[2] = base_count;
-
- } else {
+ }
+ else {
start[2] = (hsize_t)checker_edge_size;
count[2] = offset_count;
-
}
}
l = 0;
do {
- if ( 3 >= sel_offset ) {
+ if (3 >= sel_offset) {
- if ( l == 0 ) {
+ if (l == 0) {
start[3] = 0;
count[3] = base_count;
-
- } else {
+ }
+ else {
start[3] = (hsize_t)checker_edge_size;
count[3] = offset_count;
-
}
}
m = 0;
do {
- if ( 4 >= sel_offset ) {
+ if (4 >= sel_offset) {
- if ( m == 0 ) {
+ if (m == 0) {
start[4] = 0;
count[4] = base_count;
-
- } else {
+ }
+ else {
start[4] = (hsize_t)checker_edge_size;
count[4] = offset_count;
-
}
}
- if ( ((i + j + k + l + m) % 2) == 0 ) {
+ if (((i + j + k + l + m) % 2) == 0) {
#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
- HDfprintf(stdout, "%s%d: *** first_selection = %d ***\n",
- fcnName, mpi_rank, (int)first_selection);
- HDfprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n",
- fcnName, mpi_rank, i, j, k, l, m);
- HDfprintf(stdout,
- "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, mpi_rank, (int)start[0], (int)start[1],
- (int)start[2], (int)start[3], (int)start[4]);
- HDfprintf(stdout,
- "%s:%d: stride = %d %d %d %d %d.\n",
- fcnName, mpi_rank, (int)stride[0], (int)stride[1],
- (int)stride[2], (int)stride[3], (int)stride[4]);
- HDfprintf(stdout,
- "%s:%d: count = %d %d %d %d %d.\n",
- fcnName, mpi_rank, (int)count[0], (int)count[1],
- (int)count[2], (int)count[3], (int)count[4]);
- HDfprintf(stdout,
- "%s:%d: block = %d %d %d %d %d.\n",
- fcnName, mpi_rank, (int)block[0], (int)block[1],
- (int)block[2], (int)block[3], (int)block[4]);
- HDfprintf(stdout, "%s:%d: n-cube extent dims = %d.\n",
- fcnName, mpi_rank,
+ HDfprintf(stdout, "%s%d: *** first_selection = %d ***\n", fcnName, mpi_rank,
+ (int)first_selection);
+ HDfprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n", fcnName, mpi_rank, i, j,
+ k, l, m);
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)start[0], (int)start[1], (int)start[2], (int)start[3],
+ (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)stride[0], (int)stride[1], (int)stride[2], (int)stride[3],
+ (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)count[0], (int)count[1], (int)count[2], (int)count[3],
+ (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)block[0], (int)block[1], (int)block[2], (int)block[3],
+ (int)block[4]);
+ HDfprintf(stdout, "%s:%d: n-cube extent dims = %d.\n", fcnName, mpi_rank,
H5Sget_simple_extent_ndims(tgt_sid));
- HDfprintf(stdout, "%s:%d: selection rank = %d.\n",
- fcnName, mpi_rank, sel_rank);
+ HDfprintf(stdout, "%s:%d: selection rank = %d.\n", fcnName, mpi_rank, sel_rank);
#endif
- if ( first_selection ) {
+ if (first_selection) {
first_selection = FALSE;
- ret = H5Sselect_hyperslab
- (
- tgt_sid,
- H5S_SELECT_SET,
- &(start[n_cube_offset]),
- &(stride[n_cube_offset]),
- &(count[n_cube_offset]),
- &(block[n_cube_offset])
- );
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[n_cube_offset]),
+ &(stride[n_cube_offset]), &(count[n_cube_offset]),
+ &(block[n_cube_offset]));
VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded");
+ }
+ else {
- } else {
-
- ret = H5Sselect_hyperslab
- (
- tgt_sid,
- H5S_SELECT_OR,
- &(start[n_cube_offset]),
- &(stride[n_cube_offset]),
- &(count[n_cube_offset]),
- &(block[n_cube_offset])
- );
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_OR, &(start[n_cube_offset]),
+ &(stride[n_cube_offset]), &(count[n_cube_offset]),
+ &(block[n_cube_offset]));
VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded");
-
}
}
m++;
- } while ( ( m <= 1 ) &&
- ( 4 >= sel_offset ) );
+ } while ((m <= 1) && (4 >= sel_offset));
l++;
- } while ( ( l <= 1 ) &&
- ( 3 >= sel_offset ) );
+ } while ((l <= 1) && (3 >= sel_offset));
k++;
- } while ( ( k <= 1 ) &&
- ( 2 >= sel_offset ) );
+ } while ((k <= 1) && (2 >= sel_offset));
j++;
- } while ( ( j <= 1 ) &&
- ( 1 >= sel_offset ) );
-
+ } while ((j <= 1) && (1 >= sel_offset));
i++;
- } while ( ( i <= 1 ) &&
- ( 0 >= sel_offset ) );
+ } while ((i <= 1) && (0 >= sel_offset));
#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
- HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n",
- fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid));
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(tgt_sid));
#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
/* Clip the selection back to the dataspace proper. */
- for ( i = 0; i < test_max_rank; i++ ) {
+ for (i = 0; i < test_max_rank; i++) {
start[i] = 0;
stride[i] = (hsize_t)edge_size;
@@ -2651,14 +2297,13 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
block[i] = (hsize_t)edge_size;
}
- ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND,
- start, stride, count, block);
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND, start, stride, count, block);
VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded");
#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
- HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n",
- fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid));
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(tgt_sid));
HDfprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank);
#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
@@ -2666,7 +2311,6 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
} /* ckrbrd_hs_dr_pio_test__slct_ckrbrd() */
-
/****************************************************************
**
** ckrbrd_hs_dr_pio_test__verify_data():
@@ -2726,36 +2370,33 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
#define CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG 0
static hbool_t
-ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
- const int rank,
- const int edge_size,
- const int checker_edge_size,
- uint32_t first_expected_val,
+ckrbrd_hs_dr_pio_test__verify_data(uint32_t *buf_ptr, const int rank, const int edge_size,
+ const int checker_edge_size, uint32_t first_expected_val,
hbool_t buf_starts_in_checker)
{
#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
- const char * fcnName = "ckrbrd_hs_dr_pio_test__verify_data():";
+ const char *fcnName = "ckrbrd_hs_dr_pio_test__verify_data():";
#endif
- hbool_t good_data = TRUE;
- hbool_t in_checker;
- hbool_t start_in_checker[5];
- uint32_t expected_value;
- uint32_t * val_ptr;
- int i, j, k, l, m; /* to track position in n-cube */
- int v, w, x, y, z; /* to track position in checker */
+ hbool_t good_data = TRUE;
+ hbool_t in_checker;
+ hbool_t start_in_checker[5];
+ uint32_t expected_value;
+ uint32_t *val_ptr;
+ int i, j, k, l, m; /* to track position in n-cube */
+ int v, w, x, y, z; /* to track position in checker */
const int test_max_rank = 5; /* code changes needed if this is increased */
- HDassert( buf_ptr != NULL );
- HDassert( 0 < rank );
- HDassert( rank <= test_max_rank );
- HDassert( edge_size >= 6 );
- HDassert( 0 < checker_edge_size );
- HDassert( checker_edge_size <= edge_size );
- HDassert( test_max_rank <= PAR_SS_DR_MAX_RANK );
+ HDassert(buf_ptr != NULL);
+ HDassert(0 < rank);
+ HDassert(rank <= test_max_rank);
+ HDassert(edge_size >= 6);
+ HDassert(0 < checker_edge_size);
+ HDassert(checker_edge_size <= edge_size);
+ HDassert(test_max_rank <= PAR_SS_DR_MAX_RANK);
#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
- int mpi_rank;
+ int mpi_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
HDfprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank);
@@ -2767,121 +2408,109 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
}
#endif
- val_ptr = buf_ptr;
- expected_value = first_expected_val;
+val_ptr = buf_ptr;
+expected_value = first_expected_val;
- i = 0;
- v = 0;
- start_in_checker[0] = buf_starts_in_checker;
- do
- {
- if ( v >= checker_edge_size ) {
+i = 0;
+v = 0;
+start_in_checker[0] = buf_starts_in_checker;
+do {
+ if (v >= checker_edge_size) {
+
+ start_in_checker[0] = !start_in_checker[0];
+ v = 0;
+ }
- start_in_checker[0] = ! start_in_checker[0];
- v = 0;
+ j = 0;
+ w = 0;
+ start_in_checker[1] = start_in_checker[0];
+ do {
+ if (w >= checker_edge_size) {
+
+ start_in_checker[1] = !start_in_checker[1];
+ w = 0;
}
- j = 0;
- w = 0;
- start_in_checker[1] = start_in_checker[0];
- do
- {
- if ( w >= checker_edge_size ) {
-
- start_in_checker[1] = ! start_in_checker[1];
- w = 0;
+ k = 0;
+ x = 0;
+ start_in_checker[2] = start_in_checker[1];
+ do {
+ if (x >= checker_edge_size) {
+
+ start_in_checker[2] = !start_in_checker[2];
+ x = 0;
}
- k = 0;
- x = 0;
- start_in_checker[2] = start_in_checker[1];
- do
- {
- if ( x >= checker_edge_size ) {
-
- start_in_checker[2] = ! start_in_checker[2];
- x = 0;
- }
+ l = 0;
+ y = 0;
+ start_in_checker[3] = start_in_checker[2];
+ do {
+ if (y >= checker_edge_size) {
- l = 0;
- y = 0;
- start_in_checker[3] = start_in_checker[2];
- do
- {
- if ( y >= checker_edge_size ) {
-
- start_in_checker[3] = ! start_in_checker[3];
- y = 0;
- }
+ start_in_checker[3] = !start_in_checker[3];
+ y = 0;
+ }
- m = 0;
- z = 0;
+ m = 0;
+ z = 0;
#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
- HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m);
+ HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m);
#endif
- in_checker = start_in_checker[3];
- do
- {
+ in_checker = start_in_checker[3];
+ do {
#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
- HDfprintf(stdout, " %d", (int)(*val_ptr));
+ HDfprintf(stdout, " %d", (int)(*val_ptr));
#endif
- if ( z >= checker_edge_size ) {
-
- in_checker = ! in_checker;
- z = 0;
- }
-
- if ( in_checker ) {
+ if (z >= checker_edge_size) {
- if ( *val_ptr != expected_value ) {
-
- good_data = FALSE;
- }
+ in_checker = !in_checker;
+ z = 0;
+ }
- /* zero out buffer for re-use */
- *val_ptr = 0;
+ if (in_checker) {
- } else if ( *val_ptr != 0 ) {
+ if (*val_ptr != expected_value) {
good_data = FALSE;
+ }
- /* zero out buffer for re-use */
- *val_ptr = 0;
+ /* zero out buffer for re-use */
+ *val_ptr = 0;
+ }
+ else if (*val_ptr != 0) {
- }
+ good_data = FALSE;
- val_ptr++;
- expected_value++;
- m++;
- z++;
+ /* zero out buffer for re-use */
+ *val_ptr = 0;
+ }
- } while ( ( rank >= (test_max_rank - 4) ) &&
- ( m < edge_size ) );
+ val_ptr++;
+ expected_value++;
+ m++;
+ z++;
+
+ } while ((rank >= (test_max_rank - 4)) && (m < edge_size));
#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
- HDfprintf(stdout, "\n");
+ HDfprintf(stdout, "\n");
#endif
- l++;
- y++;
- } while ( ( rank >= (test_max_rank - 3) ) &&
- ( l < edge_size ) );
- k++;
- x++;
- } while ( ( rank >= (test_max_rank - 2) ) &&
- ( k < edge_size ) );
- j++;
- w++;
- } while ( ( rank >= (test_max_rank - 1) ) &&
- ( j < edge_size ) );
- i++;
- v++;
- } while ( ( rank >= test_max_rank ) &&
- ( i < edge_size ) );
+ l++;
+ y++;
+ } while ((rank >= (test_max_rank - 3)) && (l < edge_size));
+ k++;
+ x++;
+ } while ((rank >= (test_max_rank - 2)) && (k < edge_size));
+ j++;
+ w++;
+ } while ((rank >= (test_max_rank - 1)) && (j < edge_size));
+ i++;
+ v++;
+} while ((rank >= test_max_rank) && (i < edge_size));
- return(good_data);
+return (good_data);
} /* ckrbrd_hs_dr_pio_test__verify_data() */
-
/*-------------------------------------------------------------------------
* Function: ckrbrd_hs_dr_pio_test__d2m_l2s()
*
@@ -2908,24 +2537,23 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
#define CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG 0
static void
-ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
+ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
{
#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_l2s()";
- uint32_t * ptr_0;
+ uint32_t * ptr_0;
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
- hbool_t data_ok = FALSE;
- int i, j, k, l;
- uint32_t expected_value;
- int mpi_rank; /* needed by VRFY */
- hsize_t sel_start[PAR_SS_DR_MAX_RANK];
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
+ uint32_t expected_value;
+ int mpi_rank; /* needed by VRFY */
+ hsize_t sel_start[PAR_SS_DR_MAX_RANK];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
-
/* first, verify that we can read from disk correctly using selections
* of different rank that H5Sselect_shape_same() views as being of the
* same shape.
@@ -2940,24 +2568,19 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
*/
sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
- sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
-
- ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank,
- tv_ptr->small_ds_slice_sid,
- tv_ptr->small_rank - 1,
- tv_ptr->edge_size,
- tv_ptr->checker_edge_size,
- tv_ptr->small_rank - 1,
+ sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->small_ds_slice_sid, tv_ptr->small_rank - 1,
+ tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1,
sel_start);
/* zero out the buffer we will be reading into */
HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- HDfprintf(stdout, "%s:%d: initial small_ds_slice_buf = ",
- fcnName, tv_ptr->mpi_rank);
+ HDfprintf(stdout, "%s:%d: initial small_ds_slice_buf = ", fcnName, tv_ptr->mpi_rank);
ptr_0 = tv_ptr->small_ds_slice_buf;
- for ( i = 0; i < (int)(tv_ptr->small_ds_slice_size); i++ ) {
+ for (i = 0; i < (int)(tv_ptr->small_ds_slice_size); i++) {
HDfprintf(stdout, "%d ", (int)(*ptr_0));
ptr_0++;
}
@@ -2967,25 +2590,24 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/* set up start, stride, count, and block -- note that we will
* change start[] so as to read slices of the large cube.
*/
- for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
- tv_ptr->start[i] = 0;
+ tv_ptr->start[i] = 0;
tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- if ( (PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1) ) {
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
tv_ptr->block[i] = 1;
-
- } else {
+ }
+ else {
tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
}
}
#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- HDfprintf(stdout,
- "%s:%d: reading slice from big ds on disk into small ds slice.\n",
- fcnName, tv_ptr->mpi_rank);
+ HDfprintf(stdout, "%s:%d: reading slice from big ds on disk into small ds slice.\n", fcnName,
+ tv_ptr->mpi_rank);
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
/* in serial versions of this test, we loop through all the dimensions
* of the large data set. However, in the parallel version, each
@@ -2994,11 +2616,11 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* mpi_rank, and don't itterate over it.
*/
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
i = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
i = 0;
}
@@ -3009,21 +2631,21 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* test.
*/
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
j = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
j = 0;
}
do {
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
k = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
k = 0;
}
@@ -3041,11 +2663,11 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
l = 0;
do {
- if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
(tv_ptr->tests_skipped)++;
-
- } else { /* run the test */
+ }
+ else { /* run the test */
tv_ptr->skips = 0; /* reset the skips counter */
@@ -3059,22 +2681,15 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->start[3] = (hsize_t)l;
tv_ptr->start[4] = 0;
- HDassert((tv_ptr->start[0] == 0)||(0 < tv_ptr->small_ds_offset + 1));
- HDassert((tv_ptr->start[1] == 0)||(1 < tv_ptr->small_ds_offset + 1));
- HDassert((tv_ptr->start[2] == 0)||(2 < tv_ptr->small_ds_offset + 1));
- HDassert((tv_ptr->start[3] == 0)||(3 < tv_ptr->small_ds_offset + 1));
- HDassert((tv_ptr->start[4] == 0)||(4 < tv_ptr->small_ds_offset + 1));
-
- ckrbrd_hs_dr_pio_test__slct_ckrbrd
- (
- tv_ptr->mpi_rank,
- tv_ptr->file_large_ds_sid_0,
- tv_ptr->large_rank,
- tv_ptr->edge_size,
- tv_ptr->checker_edge_size,
- tv_ptr->small_rank - 1,
- tv_ptr->start
- );
+ HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1));
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(
+ tv_ptr->mpi_rank, tv_ptr->file_large_ds_sid_0, tv_ptr->large_rank, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start);
/* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
@@ -3082,52 +2697,37 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0);
VRFY((check == TRUE), "H5Sselect_shape_same passed");
-
/* Read selection from disk */
#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName,
- tv_ptr->mpi_rank, tv_ptr->start[0], tv_ptr->start[1],
- tv_ptr->start[2], tv_ptr->start[3], tv_ptr->start[4]);
- HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n",
- fcnName,
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank,
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3],
+ tv_ptr->start[4]);
+ HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n", fcnName,
H5Sget_simple_extent_ndims(tv_ptr->small_ds_slice_sid),
H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
- ret = H5Dread(tv_ptr->large_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->small_ds_slice_sid,
- tv_ptr->file_large_ds_sid_0,
- tv_ptr->xfer_plist,
- tv_ptr->small_ds_slice_buf);
+ ret =
+ H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->small_ds_slice_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_slice_buf);
VRFY((ret >= 0), "H5Dread() slice from large ds succeeded.");
#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- HDfprintf(stdout, "%s:%d: H5Dread() returns.\n",
- fcnName, tv_ptr->mpi_rank);
+ HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, tv_ptr->mpi_rank);
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
/* verify that expected data is retrieved */
- expected_value = (uint32_t)
- ((i * tv_ptr->edge_size * tv_ptr->edge_size *
- tv_ptr->edge_size * tv_ptr->edge_size) +
- (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
- (k * tv_ptr->edge_size * tv_ptr->edge_size) +
- (l * tv_ptr->edge_size));
-
- data_ok = ckrbrd_hs_dr_pio_test__verify_data
- (
- tv_ptr->small_ds_slice_buf,
- tv_ptr->small_rank - 1,
- tv_ptr->edge_size,
- tv_ptr->checker_edge_size,
- expected_value,
- (hbool_t)TRUE
- );
-
- VRFY((data_ok == TRUE),
- "small slice read from large ds data good.");
+ expected_value = (uint32_t)(
+ (i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+
+ data_ok = ckrbrd_hs_dr_pio_test__verify_data(
+ tv_ptr->small_ds_slice_buf, tv_ptr->small_rank - 1, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE);
+
+ VRFY((data_ok == TRUE), "small slice read from large ds data good.");
(tv_ptr->tests_run)++;
}
@@ -3136,23 +2736,16 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
(tv_ptr->total_tests)++;
- } while ( ( tv_ptr->large_rank > 2 ) &&
- ( (tv_ptr->small_rank - 1) <= 1 ) &&
- ( l < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
k++;
- } while ( ( tv_ptr->large_rank > 3 ) &&
- ( (tv_ptr->small_rank - 1) <= 2 ) &&
- ( k < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
j++;
- } while ( ( tv_ptr->large_rank > 4 ) &&
- ( (tv_ptr->small_rank - 1) <= 3 ) &&
- ( j < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
return;
} /* ckrbrd_hs_dr_pio_test__d2m_l2s() */
-
/*-------------------------------------------------------------------------
* Function: ckrbrd_hs_dr_pio_test__d2m_s2l()
*
@@ -3178,47 +2771,40 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
#define CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG 0
static void
-ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
+ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
{
#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_s2l()";
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
- hbool_t data_ok = FALSE;
- int i, j, k, l;
- size_t u;
- size_t start_index;
- size_t stop_index;
- uint32_t expected_value;
- uint32_t * ptr_1;
- int mpi_rank; /* needed by VRFY */
- hsize_t sel_start[PAR_SS_DR_MAX_RANK];
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
+ size_t u;
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ int mpi_rank; /* needed by VRFY */
+ hsize_t sel_start[PAR_SS_DR_MAX_RANK];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
-
/* similarly, read slices of the on disk small data set into slices
* through the in memory large data set, and verify that the correct
* data (and only the correct data) is read.
*/
sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
- sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
-
- ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank,
- tv_ptr->file_small_ds_sid_0,
- tv_ptr->small_rank,
- tv_ptr->edge_size,
- tv_ptr->checker_edge_size,
- tv_ptr->small_rank - 1,
+ sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->file_small_ds_sid_0, tv_ptr->small_rank,
+ tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1,
sel_start);
#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- HDfprintf(stdout,
- "%s reading slices of on disk small data set into slices of big data set.\n",
- fcnName);
+ HDfprintf(stdout, "%s reading slices of on disk small data set into slices of big data set.\n", fcnName);
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
/* zero out the buffer we will be reading into */
@@ -3229,16 +2815,16 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
* into different slices of the process slice of the large data
* set.
*/
- for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
- tv_ptr->start[i] = 0;
+ tv_ptr->start[i] = 0;
tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- if ( (PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1) ) {
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
tv_ptr->block[i] = 1;
-
- } else {
+ }
+ else {
tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
}
@@ -3253,12 +2839,11 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
* over it.
*/
-
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
i = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
i = 0;
}
@@ -3269,21 +2854,21 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
* test.
*/
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
j = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
j = 0;
}
do {
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
k = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
k = 0;
}
@@ -3301,11 +2886,11 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
l = 0;
do {
- if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
(tv_ptr->tests_skipped)++;
-
- } else { /* run the test */
+ }
+ else { /* run the test */
tv_ptr->skips = 0; /* reset the skips counter */
@@ -3319,23 +2904,15 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->start[3] = (hsize_t)l;
tv_ptr->start[4] = 0;
- HDassert((tv_ptr->start[0] == 0)||(0 < tv_ptr->small_ds_offset + 1));
- HDassert((tv_ptr->start[1] == 0)||(1 < tv_ptr->small_ds_offset + 1));
- HDassert((tv_ptr->start[2] == 0)||(2 < tv_ptr->small_ds_offset + 1));
- HDassert((tv_ptr->start[3] == 0)||(3 < tv_ptr->small_ds_offset + 1));
- HDassert((tv_ptr->start[4] == 0)||(4 < tv_ptr->small_ds_offset + 1));
-
- ckrbrd_hs_dr_pio_test__slct_ckrbrd
- (
- tv_ptr->mpi_rank,
- tv_ptr->mem_large_ds_sid,
- tv_ptr->large_rank,
- tv_ptr->edge_size,
- tv_ptr->checker_edge_size,
- tv_ptr->small_rank - 1,
- tv_ptr->start
- );
+ HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1));
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(
+ tv_ptr->mpi_rank, tv_ptr->mem_large_ds_sid, tv_ptr->large_rank, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start);
/* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
@@ -3343,55 +2920,45 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
VRFY((check == TRUE), "H5Sselect_shape_same passed");
-
/* Read selection from disk */
#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, tv_ptr->mpi_rank,
- tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2],
- tv_ptr->start[3], tv_ptr->start[4]);
- HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
- fcnName, tv_ptr->mpi_rank,
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank,
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3],
+ tv_ptr->start[4]);
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
H5Sget_simple_extent_ndims(tv_ptr->large_ds_slice_sid),
H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0));
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
- ret = H5Dread(tv_ptr->small_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->mem_large_ds_sid,
- tv_ptr->file_small_ds_sid_0,
- tv_ptr->xfer_plist,
- tv_ptr->large_ds_buf_1);
+ ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
/* verify that the expected data and only the
* expected data was read.
*/
- data_ok = TRUE;
- ptr_1 = tv_ptr->large_ds_buf_1;
- expected_value =
- (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
- start_index = (size_t)(
- (i * tv_ptr->edge_size * tv_ptr->edge_size *
- tv_ptr->edge_size * tv_ptr->edge_size) +
+ data_ok = TRUE;
+ ptr_1 = tv_ptr->large_ds_buf_1;
+ expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
+ start_index = (size_t)(
+ (i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
- (k * tv_ptr->edge_size * tv_ptr->edge_size) +
- (l * tv_ptr->edge_size));
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
{
int m, n;
- HDfprintf(stdout, "%s:%d: expected_value = %d.\n",
- fcnName, tv_ptr->mpi_rank, expected_value);
- HDfprintf(stdout, "%s:%d: start/stop index = %d/%d.\n",
- fcnName, tv_ptr->mpi_rank, start_index, stop_index);
+ HDfprintf(stdout, "%s:%d: expected_value = %d.\n", fcnName, tv_ptr->mpi_rank,
+ expected_value);
+ HDfprintf(stdout, "%s:%d: start/stop index = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
+ start_index, stop_index);
n = 0;
- for ( m = 0; (unsigned)m < tv_ptr->large_ds_size; m ++ ) {
+ for (m = 0; (unsigned)m < tv_ptr->large_ds_size; m++) {
HDfprintf(stdout, "%d ", (int)(*ptr_1));
ptr_1++;
n++;
- if ( n >= tv_ptr->edge_size ) {
+ if (n >= tv_ptr->edge_size) {
HDfprintf(stdout, "\n");
n = 0;
}
@@ -3401,12 +2968,12 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
}
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
- HDassert( start_index < stop_index );
- HDassert( stop_index <= tv_ptr->large_ds_size );
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= tv_ptr->large_ds_size);
- for ( u = 0; u < start_index; u++ ) {
+ for (u = 0; u < start_index; u++) {
- if ( *ptr_1 != 0 ) {
+ if (*ptr_1 != 0) {
data_ok = FALSE;
}
@@ -3417,28 +2984,19 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_1++;
}
- VRFY((data_ok == TRUE),
- "slice read from small to large ds data good(1).");
+ VRFY((data_ok == TRUE), "slice read from small to large ds data good(1).");
- data_ok = ckrbrd_hs_dr_pio_test__verify_data
- (
- ptr_1,
- tv_ptr->small_rank - 1,
- tv_ptr->edge_size,
- tv_ptr->checker_edge_size,
- expected_value,
- (hbool_t)TRUE
- );
-
- VRFY((data_ok == TRUE),
- "slice read from small to large ds data good(2).");
+ data_ok = ckrbrd_hs_dr_pio_test__verify_data(ptr_1, tv_ptr->small_rank - 1,
+ tv_ptr->edge_size, tv_ptr->checker_edge_size,
+ expected_value, (hbool_t)TRUE);
+ VRFY((data_ok == TRUE), "slice read from small to large ds data good(2).");
ptr_1 = tv_ptr->large_ds_buf_1 + stop_index + 1;
- for ( u = stop_index + 1; u < tv_ptr->large_ds_size; u++ ) {
+ for (u = stop_index + 1; u < tv_ptr->large_ds_size; u++) {
- if ( *ptr_1 != 0 ) {
+ if (*ptr_1 != 0) {
data_ok = FALSE;
}
@@ -3449,8 +3007,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_1++;
}
- VRFY((data_ok == TRUE),
- "slice read from small to large ds data good(3).");
+ VRFY((data_ok == TRUE), "slice read from small to large ds data good(3).");
(tv_ptr->tests_run)++;
}
@@ -3459,23 +3016,16 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
(tv_ptr->total_tests)++;
- } while ( ( tv_ptr->large_rank > 2 ) &&
- ( (tv_ptr->small_rank - 1) <= 1 ) &&
- ( l < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
k++;
- } while ( ( tv_ptr->large_rank > 3 ) &&
- ( (tv_ptr->small_rank - 1) <= 2 ) &&
- ( k < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
j++;
- } while ( ( tv_ptr->large_rank > 4 ) &&
- ( (tv_ptr->small_rank - 1) <= 3 ) &&
- ( j < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
return;
} /* ckrbrd_hs_dr_pio_test__d2m_s2l() */
-
/*-------------------------------------------------------------------------
* Function: ckrbrd_hs_dr_pio_test__m2d_l2s()
*
@@ -3505,27 +3055,26 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
#define CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG 0
static void
-ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
+ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
{
#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_l2s()";
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
- hbool_t data_ok = FALSE;
- int i, j, k, l;
- size_t u;
- size_t start_index;
- size_t stop_index;
- uint32_t expected_value;
- uint32_t * ptr_1;
- int mpi_rank; /* needed by VRFY */
- hsize_t sel_start[PAR_SS_DR_MAX_RANK];
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
+ size_t u;
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ int mpi_rank; /* needed by VRFY */
+ hsize_t sel_start[PAR_SS_DR_MAX_RANK];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
-
/* now we go in the opposite direction, verifying that we can write
* from memory to file using selections of different rank that
* H5Sselect_shape_same() views as being of the same shape.
@@ -3537,61 +3086,47 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* the memory and file selections.
*/
- tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
- tv_ptr->count[0] = 1;
- tv_ptr->block[0] = 1;
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
- for ( i = 1; i < tv_ptr->large_rank; i++ ) {
+ for (i = 1; i < tv_ptr->large_rank; i++) {
- tv_ptr->start[i] = 0;
+ tv_ptr->start[i] = 0;
tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
}
- ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0,
- H5S_SELECT_SET,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
+ ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) suceeded");
- ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid,
- H5S_SELECT_SET,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
+ ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded");
-
sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
- sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
-
- ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank,
- tv_ptr->file_small_ds_sid_1,
- tv_ptr->small_rank,
- tv_ptr->edge_size,
- tv_ptr->checker_edge_size,
- tv_ptr->small_rank - 1,
- sel_start);
+ sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->file_small_ds_sid_1, tv_ptr->small_rank,
+ tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1,
+ sel_start);
/* set up start, stride, count, and block -- note that we will
* change start[] so as to read slices of the large cube.
*/
- for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
- tv_ptr->start[i] = 0;
+ tv_ptr->start[i] = 0;
tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- if ( (PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1) ) {
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
tv_ptr->block[i] = 1;
-
- } else {
+ }
+ else {
tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
}
@@ -3600,11 +3135,10 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/* zero out the in memory small ds */
HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
-
#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
HDfprintf(stdout,
- "%s writing checker boards selections of slices from big ds to slices of small ds on disk.\n",
- fcnName);
+ "%s writing checker boards selections of slices from big ds to slices of small ds on disk.\n",
+ fcnName);
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
/* in serial versions of this test, we loop through all the dimensions
@@ -3616,12 +3150,11 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* over it.
*/
-
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
i = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
i = 0;
}
@@ -3632,22 +3165,22 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* test.
*/
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
j = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
j = 0;
}
j = 0;
do {
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
k = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
k = 0;
}
@@ -3665,11 +3198,11 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
l = 0;
do {
- if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
(tv_ptr->tests_skipped)++;
-
- } else { /* run the test */
+ }
+ else { /* run the test */
tv_ptr->skips = 0; /* reset the skips counter */
@@ -3679,12 +3212,8 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
*/
/* zero out this rank's slice of the on disk small data set */
- ret = H5Dwrite(tv_ptr->small_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->mem_small_ds_sid,
- tv_ptr->file_small_ds_sid_0,
- tv_ptr->xfer_plist,
- tv_ptr->small_ds_buf_2);
+ ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_2);
VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded.");
/* select the portion of the in memory large cube from which we
@@ -3696,23 +3225,15 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->start[3] = (hsize_t)l;
tv_ptr->start[4] = 0;
- HDassert((tv_ptr->start[0] == 0)||(0 < tv_ptr->small_ds_offset + 1));
- HDassert((tv_ptr->start[1] == 0)||(1 < tv_ptr->small_ds_offset + 1));
- HDassert((tv_ptr->start[2] == 0)||(2 < tv_ptr->small_ds_offset + 1));
- HDassert((tv_ptr->start[3] == 0)||(3 < tv_ptr->small_ds_offset + 1));
- HDassert((tv_ptr->start[4] == 0)||(4 < tv_ptr->small_ds_offset + 1));
-
- ckrbrd_hs_dr_pio_test__slct_ckrbrd
- (
- tv_ptr->mpi_rank,
- tv_ptr->mem_large_ds_sid,
- tv_ptr->large_rank,
- tv_ptr->edge_size,
- tv_ptr->checker_edge_size,
- tv_ptr->small_rank - 1,
- tv_ptr->start
- );
+ HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1));
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(
+ tv_ptr->mpi_rank, tv_ptr->mem_large_ds_sid, tv_ptr->large_rank, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start);
/* verify that H5Sselect_shape_same() reports the in
* memory checkerboard selection of the slice through the
@@ -3722,90 +3243,67 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_1, tv_ptr->mem_large_ds_sid);
VRFY((check == TRUE), "H5Sselect_shape_same passed.");
-
/* write the checker board selection of the slice from the in
* memory large data set to the slice of the on disk small
* dataset.
*/
#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, tv_ptr->mpi_rank,
- tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2],
- tv_ptr->start[3], tv_ptr->start[4]);
- HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
- fcnName, tv_ptr->mpi_rank,
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank,
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3],
+ tv_ptr->start[4]);
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid),
H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_1));
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
- ret = H5Dwrite(tv_ptr->small_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->mem_large_ds_sid,
- tv_ptr->file_small_ds_sid_1,
- tv_ptr->xfer_plist,
- tv_ptr->large_ds_buf_0);
+ ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_small_ds_sid_1, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0);
VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded.");
-
/* read the on disk process slice of the small dataset into memory */
- ret = H5Dread(tv_ptr->small_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->mem_small_ds_sid,
- tv_ptr->file_small_ds_sid_0,
- tv_ptr->xfer_plist,
- tv_ptr->small_ds_buf_1);
+ ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1);
VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
-
/* verify that expected data is retrieved */
expected_value = (uint32_t)(
- (i * tv_ptr->edge_size * tv_ptr->edge_size *
- tv_ptr->edge_size * tv_ptr->edge_size) +
+ (i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
- (k * tv_ptr->edge_size * tv_ptr->edge_size) +
- (l * tv_ptr->edge_size));
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
start_index = (size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size;
- stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
+ stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
- HDassert( start_index < stop_index );
- HDassert( stop_index <= tv_ptr->small_ds_size );
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= tv_ptr->small_ds_size);
data_ok = TRUE;
ptr_1 = tv_ptr->small_ds_buf_1;
- for ( u = 0; u < start_index; u++, ptr_1++ ) {
+ for (u = 0; u < start_index; u++, ptr_1++) {
- if ( *ptr_1 != 0 ) {
+ if (*ptr_1 != 0) {
data_ok = FALSE;
- *ptr_1 = 0;
+ *ptr_1 = 0;
}
}
- data_ok &= ckrbrd_hs_dr_pio_test__verify_data
- (
- tv_ptr->small_ds_buf_1 + start_index,
- tv_ptr->small_rank - 1,
- tv_ptr->edge_size,
- tv_ptr->checker_edge_size,
- expected_value,
- (hbool_t)TRUE
- );
-
+ data_ok &= ckrbrd_hs_dr_pio_test__verify_data(
+ tv_ptr->small_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE);
ptr_1 = tv_ptr->small_ds_buf_1;
- for ( u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++ ) {
+ for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) {
- if ( *ptr_1 != 0 ) {
+ if (*ptr_1 != 0) {
data_ok = FALSE;
- *ptr_1 = 0;
+ *ptr_1 = 0;
}
}
- VRFY((data_ok == TRUE),
- "large slice write slice to small slice data good.");
+ VRFY((data_ok == TRUE), "large slice write slice to small slice data good.");
(tv_ptr->tests_run)++;
}
@@ -3814,23 +3312,16 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
(tv_ptr->total_tests)++;
- } while ( ( tv_ptr->large_rank > 2 ) &&
- ( (tv_ptr->small_rank - 1) <= 1 ) &&
- ( l < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
k++;
- } while ( ( tv_ptr->large_rank > 3 ) &&
- ( (tv_ptr->small_rank - 1) <= 2 ) &&
- ( k < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
j++;
- } while ( ( tv_ptr->large_rank > 4 ) &&
- ( (tv_ptr->small_rank - 1) <= 3 ) &&
- ( j < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
return;
} /* ckrbrd_hs_dr_pio_test__m2d_l2s() */
-
/*-------------------------------------------------------------------------
* Function: ckrbrd_hs_dr_pio_test__m2d_s2l()
*
@@ -3860,27 +3351,26 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
#define CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG 0
static void
-ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
+ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
{
#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_s2l()";
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- hbool_t data_ok = FALSE;
- int i, j, k, l;
- size_t u;
- size_t start_index;
- size_t stop_index;
- uint32_t expected_value;
- uint32_t * ptr_1;
- int mpi_rank; /* needed by VRFY */
- hsize_t sel_start[PAR_SS_DR_MAX_RANK];
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
+ size_t u;
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ int mpi_rank; /* needed by VRFY */
+ hsize_t sel_start[PAR_SS_DR_MAX_RANK];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
-
/* Now write the contents of the process's slice of the in memory
* small data set to slices of the on disk large data set. After
* each write, read the process's slice of the large data set back
@@ -3889,33 +3379,25 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
* and file selections.
*/
- tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
- tv_ptr->count[0] = 1;
- tv_ptr->block[0] = 1;
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
- for ( i = 1; i < tv_ptr->large_rank; i++ ) {
+ for (i = 1; i < tv_ptr->large_rank; i++) {
- tv_ptr->start[i] = 0;
+ tv_ptr->start[i] = 0;
tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
}
- ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0,
- H5S_SELECT_SET,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) suceeded");
- ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid,
- H5S_SELECT_SET,
- tv_ptr->start,
- tv_ptr->stride,
- tv_ptr->count,
- tv_ptr->block);
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
VRFY((ret >= 0), "H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, set) suceeded");
/* setup a checkerboard selection of the slice of the in memory small
@@ -3923,30 +3405,26 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
*/
sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
- sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
-
- ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank,
- tv_ptr->mem_small_ds_sid,
- tv_ptr->small_rank,
- tv_ptr->edge_size,
- tv_ptr->checker_edge_size,
- tv_ptr->small_rank - 1,
+ sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->mem_small_ds_sid, tv_ptr->small_rank,
+ tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1,
sel_start);
/* set up start, stride, count, and block -- note that we will
* change start[] so as to write checkerboard selections of slices
* of the small data set to slices of the large data set.
*/
- for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
- tv_ptr->start[i] = 0;
+ tv_ptr->start[i] = 0;
tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
- tv_ptr->count[i] = 1;
- if ( (PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1) ) {
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
tv_ptr->block[i] = 1;
-
- } else {
+ }
+ else {
tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
}
@@ -3957,15 +3435,16 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
HDfprintf(stdout,
- "%s writing process checkerboard selections of slices of small ds to process slices of large ds on disk.\n",
- fcnName);
+ "%s writing process checkerboard selections of slices of small ds to process slices of large "
+ "ds on disk.\n",
+ fcnName);
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
i = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
i = 0;
}
@@ -3976,21 +3455,21 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
* test.
*/
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
j = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
j = 0;
}
do {
- if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2 ) {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
k = tv_ptr->mpi_rank;
-
- } else {
+ }
+ else {
k = 0;
}
@@ -4008,11 +3487,11 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
l = 0;
do {
- if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
(tv_ptr->tests_skipped)++;
-
- } else { /* run the test */
+ }
+ else { /* run the test */
tv_ptr->skips = 0; /* reset the skips counter */
@@ -4025,15 +3504,10 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
* Note that this will leave one slice with its original data
* as there is one more slice than processes.
*/
- ret = H5Dwrite(tv_ptr->large_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->mem_large_ds_sid,
- tv_ptr->file_large_ds_sid_0,
- tv_ptr->xfer_plist,
- tv_ptr->large_ds_buf_2);
+ ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_2);
VRFY((ret != FAIL), "H5Dwrite() to zero large ds suceeded");
-
/* select the portion of the in memory large cube to which we
* are going to write data.
*/
@@ -4043,23 +3517,15 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->start[3] = (hsize_t)l;
tv_ptr->start[4] = 0;
- HDassert((tv_ptr->start[0] == 0)||(0 < tv_ptr->small_ds_offset + 1));
- HDassert((tv_ptr->start[1] == 0)||(1 < tv_ptr->small_ds_offset + 1));
- HDassert((tv_ptr->start[2] == 0)||(2 < tv_ptr->small_ds_offset + 1));
- HDassert((tv_ptr->start[3] == 0)||(3 < tv_ptr->small_ds_offset + 1));
- HDassert((tv_ptr->start[4] == 0)||(4 < tv_ptr->small_ds_offset + 1));
-
- ckrbrd_hs_dr_pio_test__slct_ckrbrd
- (
- tv_ptr->mpi_rank,
- tv_ptr->file_large_ds_sid_1,
- tv_ptr->large_rank,
- tv_ptr->edge_size,
- tv_ptr->checker_edge_size,
- tv_ptr->small_rank - 1,
- tv_ptr->start
- );
+ HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1));
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(
+ tv_ptr->mpi_rank, tv_ptr->file_large_ds_sid_1, tv_ptr->large_rank, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start);
/* verify that H5Sselect_shape_same() reports the in
* memory small data set slice selection and the
@@ -4069,97 +3535,70 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_1);
VRFY((check == TRUE), "H5Sselect_shape_same passed");
-
/* write the small data set slice from memory to the
* target slice of the disk data set
*/
#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, tv_ptr->mpi_rank,
- tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2],
- tv_ptr->start[3], tv_ptr->start[4]);
- HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
- fcnName, tv_ptr->mpi_rank,
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank,
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3],
+ tv_ptr->start[4]);
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid),
H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_1));
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- ret = H5Dwrite(tv_ptr->large_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->mem_small_ds_sid,
- tv_ptr->file_large_ds_sid_1,
- tv_ptr->xfer_plist,
- tv_ptr->small_ds_buf_0);
- VRFY((ret != FAIL),
- "H5Dwrite of small ds slice to large ds succeeded");
-
+ ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_large_ds_sid_1, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0);
+ VRFY((ret != FAIL), "H5Dwrite of small ds slice to large ds succeeded");
/* read this processes slice on the on disk large
* data set into memory.
*/
- ret = H5Dread(tv_ptr->large_dataset,
- H5T_NATIVE_UINT32,
- tv_ptr->mem_large_ds_sid,
- tv_ptr->file_large_ds_sid_0,
- tv_ptr->xfer_plist,
- tv_ptr->large_ds_buf_1);
- VRFY((ret != FAIL),
- "H5Dread() of process slice of large ds succeeded");
-
+ ret = H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
+ VRFY((ret != FAIL), "H5Dread() of process slice of large ds succeeded");
/* verify that the expected data and only the
* expected data was read.
*/
- expected_value =
- (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
-
- start_index = (size_t)
- ((i * tv_ptr->edge_size * tv_ptr->edge_size *
- tv_ptr->edge_size * tv_ptr->edge_size) +
- (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
- (k * tv_ptr->edge_size * tv_ptr->edge_size) +
- (l * tv_ptr->edge_size));
- stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
+ expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
- HDassert( start_index < stop_index );
- HDassert( stop_index < tv_ptr->large_ds_size );
+ start_index = (size_t)(
+ (i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+ stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
+ HDassert(start_index < stop_index);
+ HDassert(stop_index < tv_ptr->large_ds_size);
data_ok = TRUE;
ptr_1 = tv_ptr->large_ds_buf_1;
- for ( u = 0; u < start_index; u++, ptr_1++ ) {
+ for (u = 0; u < start_index; u++, ptr_1++) {
- if ( *ptr_1 != 0 ) {
+ if (*ptr_1 != 0) {
data_ok = FALSE;
- *ptr_1 = 0;
+ *ptr_1 = 0;
}
}
- data_ok &= ckrbrd_hs_dr_pio_test__verify_data
- (
- tv_ptr->large_ds_buf_1 + start_index,
- tv_ptr->small_rank - 1,
- tv_ptr->edge_size,
- tv_ptr->checker_edge_size,
- expected_value,
- (hbool_t)TRUE
- );
-
+ data_ok &= ckrbrd_hs_dr_pio_test__verify_data(
+ tv_ptr->large_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE);
ptr_1 = tv_ptr->large_ds_buf_1;
- for ( u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++ ) {
+ for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) {
- if ( *ptr_1 != 0 ) {
+ if (*ptr_1 != 0) {
data_ok = FALSE;
- *ptr_1 = 0;
+ *ptr_1 = 0;
}
}
- VRFY((data_ok == TRUE),
- "small ds cb slice write to large ds slice data good.");
+ VRFY((data_ok == TRUE), "small ds cb slice write to large ds slice data good.");
(tv_ptr->tests_run)++;
}
@@ -4168,23 +3607,16 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
(tv_ptr->total_tests)++;
- } while ( ( tv_ptr->large_rank > 2 ) &&
- ( (tv_ptr->small_rank - 1) <= 1 ) &&
- ( l < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
k++;
- } while ( ( tv_ptr->large_rank > 3 ) &&
- ( (tv_ptr->small_rank - 1) <= 2 ) &&
- ( k < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
j++;
- } while ( ( tv_ptr->large_rank > 4 ) &&
- ( (tv_ptr->small_rank - 1) <= 3 ) &&
- ( j < tv_ptr->edge_size ) );
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
return;
} /* ckrbrd_hs_dr_pio_test__m2d_s2l() */
-
/*-------------------------------------------------------------------------
* Function: ckrbrd_hs_dr_pio_test__run_test()
*
@@ -4201,27 +3633,17 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
#define CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG 0
static void
-ckrbrd_hs_dr_pio_test__run_test(const int test_num,
- const int edge_size,
- const int checker_edge_size,
- const int chunk_edge_size,
- const int small_rank,
- const int large_rank,
- const hbool_t use_collective_io,
- const hid_t dset_type,
- const int express_test,
- int * skips_ptr,
- int max_skips,
- int64_t * total_tests_ptr,
- int64_t * tests_run_ptr,
- int64_t * tests_skipped_ptr)
+ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int checker_edge_size,
+ const int chunk_edge_size, const int small_rank, const int large_rank,
+ const hbool_t use_collective_io, const hid_t dset_type,
+ const int express_test, int *skips_ptr, int max_skips,
+ int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr)
{
#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__run_test()";
#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
- struct hs_dr_pio_test_vars_t test_vars =
- {
+ struct hs_dr_pio_test_vars_t test_vars = {
/* int mpi_size = */ -1,
/* int mpi_rank = */ -1,
/* MPI_Comm mpi_comm = */ MPI_COMM_NULL,
@@ -4243,7 +3665,7 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
/* uint32_t * large_ds_slice_buf = */ NULL,
/* int small_ds_offset = */ -1,
/* int large_ds_offset = */ -1,
- /* hid_t fid = */ -1, /* HDF5 file ID */
+ /* hid_t fid = */ -1, /* HDF5 file ID */
/* hid_t xfer_plist = */ H5P_DEFAULT,
/* hid_t full_mem_small_ds_sid = */ -1,
/* hid_t full_file_small_ds_sid = */ -1,
@@ -4259,18 +3681,18 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
/* hid_t file_large_ds_process_slice_sid = */ -1,
/* hid_t mem_large_ds_process_slice_sid = */ -1,
/* hid_t large_ds_slice_sid = */ -1,
- /* hid_t small_dataset = */ -1, /* Dataset ID */
- /* hid_t large_dataset = */ -1, /* Dataset ID */
+ /* hid_t small_dataset = */ -1, /* Dataset ID */
+ /* hid_t large_dataset = */ -1, /* Dataset ID */
/* size_t small_ds_size = */ 1,
/* size_t small_ds_slice_size = */ 1,
/* size_t large_ds_size = */ 1,
/* size_t large_ds_slice_size = */ 1,
- /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0},
- /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0},
- /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0},
- /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0},
- /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0},
- /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0},
+ /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
/* hsize_t * start_ptr = */ NULL,
/* hsize_t * stride_ptr = */ NULL,
/* hsize_t * count_ptr = */ NULL,
@@ -4279,30 +3701,23 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
/* int max_skips = */ 0,
/* int64_t total_tests = */ 0,
/* int64_t tests_run = */ 0,
- /* int64_t tests_skipped = */ 0
- };
- struct hs_dr_pio_test_vars_t * tv_ptr = &test_vars;
-
- hs_dr_pio_test__setup(test_num, edge_size, checker_edge_size,
- chunk_edge_size, small_rank, large_rank,
- use_collective_io, dset_type, express_test,
- tv_ptr);
+ /* int64_t tests_skipped = */ 0};
+ struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars;
+ hs_dr_pio_test__setup(test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank,
+ use_collective_io, dset_type, express_test, tv_ptr);
/* initialize skips & max_skips */
- tv_ptr->skips = *skips_ptr;
+ tv_ptr->skips = *skips_ptr;
tv_ptr->max_skips = max_skips;
-
#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if ( MAINPROCESS ) {
- HDfprintf(stdout, "test %d: small rank = %d, large rank = %d.\n",
- test_num, small_rank, large_rank);
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: small rank = %d, large rank = %d.\n", test_num, small_rank, large_rank);
HDfprintf(stdout, "test %d: Initialization complete.\n", test_num);
}
#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
-
/* first, verify that we can read from disk correctly using selections
* of different rank that H5Sselect_shape_same() views as being of the
* same shape.
@@ -4318,7 +3733,6 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
ckrbrd_hs_dr_pio_test__d2m_l2s(tv_ptr);
-
/* similarly, read slices of the on disk small data set into slices
* through the in memory large data set, and verify that the correct
* data (and only the correct data) is read.
@@ -4326,7 +3740,6 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
ckrbrd_hs_dr_pio_test__d2m_s2l(tv_ptr);
-
/* now we go in the opposite direction, verifying that we can write
* from memory to file using selections of different rank that
* H5Sselect_shape_same() views as being of the same shape.
@@ -4340,7 +3753,6 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
ckrbrd_hs_dr_pio_test__m2d_l2s(tv_ptr);
-
/* Now write the contents of the process's slice of the in memory
* small data set to slices of the on disk large data set. After
* each write, read the process's slice of the large data set back
@@ -4351,20 +3763,18 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
ckrbrd_hs_dr_pio_test__m2d_s2l(tv_ptr);
-
#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if ( MAINPROCESS ) {
- HDfprintf(stdout,
- "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n",
- test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped),
- (long long)(tv_ptr->total_tests));
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n",
+ test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped),
+ (long long)(tv_ptr->total_tests));
}
#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
hs_dr_pio_test__takedown(tv_ptr);
#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
- if ( MAINPROCESS ) {
+ if (MAINPROCESS) {
HDfprintf(stdout, "test %d: Takedown complete.\n", test_num);
}
#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
@@ -4378,7 +3788,6 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
} /* ckrbrd_hs_dr_pio_test__run_test() */
-
/*-------------------------------------------------------------------------
* Function: ckrbrd_hs_dr_pio_test()
*
@@ -4395,29 +3804,29 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
static void
ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
{
- int express_test;
- int local_express_test;
- int mpi_size = -1;
- int mpi_rank = -1;
- int test_num = 0;
- int edge_size;
- int checker_edge_size = 3;
- int chunk_edge_size = 0;
- int small_rank = 3;
- int large_rank = 4;
- int mpi_result;
- hid_t dset_type = H5T_NATIVE_UINT;
- int skips = 0;
- int max_skips = 0;
+ int express_test;
+ int local_express_test;
+ int mpi_size = -1;
+ int mpi_rank = -1;
+ int test_num = 0;
+ int edge_size;
+ int checker_edge_size = 3;
+ int chunk_edge_size = 0;
+ int small_rank = 3;
+ int large_rank = 4;
+ int mpi_result;
+ hid_t dset_type = H5T_NATIVE_UINT;
+ int skips = 0;
+ int max_skips = 0;
/* The following table list the number of sub-tests skipped between
* each test that is actually executed as a function of the express
* test level. Note that any value in excess of 4880 will cause all
* sub tests to be skipped.
*/
- int max_skips_tbl[4] = {0, 4, 64, 1024};
- int64_t total_tests = 0;
- int64_t tests_run = 0;
- int64_t tests_skipped = 0;
+ int max_skips_tbl[4] = {0, 4, 64, 1024};
+ int64_t total_tests = 0;
+ int64_t tests_run = 0;
+ int64_t tests_skipped = 0;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -4428,20 +3837,18 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
- mpi_result = MPI_Allreduce((void *)&local_express_test,
- (void *)&express_test,
- 1,
- MPI_INT,
- MPI_MAX,
+ mpi_result = MPI_Allreduce((void *)&local_express_test, (void *)&express_test, 1, MPI_INT, MPI_MAX,
MPI_COMM_WORLD);
- VRFY((mpi_result == MPI_SUCCESS ), "MPI_Allreduce(0) succeeded");
+ VRFY((mpi_result == MPI_SUCCESS), "MPI_Allreduce(0) succeeded");
- if ( local_express_test < 0 ) {
+ if (local_express_test < 0) {
max_skips = max_skips_tbl[0];
- } else if ( local_express_test > 3 ) {
+ }
+ else if (local_express_test > 3) {
max_skips = max_skips_tbl[3];
- } else {
+ }
+ else {
max_skips = max_skips_tbl[local_express_test];
}
@@ -4453,48 +3860,27 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
}
#endif
- for ( large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++ ) {
+ for (large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++) {
- for ( small_rank = 2; small_rank < large_rank; small_rank++ ) {
- switch(sstest_type){
+ for (small_rank = 2; small_rank < large_rank; small_rank++) {
+ switch (sstest_type) {
case IND_CONTIG:
/* contiguous data set, independent I/O */
chunk_edge_size = 0;
- ckrbrd_hs_dr_pio_test__run_test(test_num,
- edge_size,
- checker_edge_size,
- chunk_edge_size,
- small_rank,
- large_rank,
- FALSE,
- dset_type,
- express_test,
- &skips,
- max_skips,
- &total_tests,
- &tests_run,
+ ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
+ small_rank, large_rank, FALSE, dset_type, express_test,
+ &skips, max_skips, &total_tests, &tests_run,
&tests_skipped);
test_num++;
break;
/* end of case IND_CONTIG */
- case COL_CONTIG:
+ case COL_CONTIG:
/* contiguous data set, collective I/O */
chunk_edge_size = 0;
- ckrbrd_hs_dr_pio_test__run_test(test_num,
- edge_size,
- checker_edge_size,
- chunk_edge_size,
- small_rank,
- large_rank,
- TRUE,
- dset_type,
- express_test,
- &skips,
- max_skips,
- &total_tests,
- &tests_run,
- &tests_skipped);
+ ckrbrd_hs_dr_pio_test__run_test(
+ test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, TRUE,
+ dset_type, express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped);
test_num++;
break;
/* end of case COL_CONTIG */
@@ -4502,19 +3888,9 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
case IND_CHUNKED:
/* chunked data set, independent I/O */
chunk_edge_size = 5;
- ckrbrd_hs_dr_pio_test__run_test(test_num,
- edge_size,
- checker_edge_size,
- chunk_edge_size,
- small_rank,
- large_rank,
- FALSE,
- dset_type,
- express_test,
- &skips,
- max_skips,
- &total_tests,
- &tests_run,
+ ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
+ small_rank, large_rank, FALSE, dset_type, express_test,
+ &skips, max_skips, &total_tests, &tests_run,
&tests_skipped);
test_num++;
break;
@@ -4523,20 +3899,9 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
case COL_CHUNKED:
/* chunked data set, collective I/O */
chunk_edge_size = 5;
- ckrbrd_hs_dr_pio_test__run_test(test_num,
- edge_size,
- checker_edge_size,
- chunk_edge_size,
- small_rank,
- large_rank,
- TRUE,
- dset_type,
- express_test,
- &skips,
- max_skips,
- &total_tests,
- &tests_run,
- &tests_skipped);
+ ckrbrd_hs_dr_pio_test__run_test(
+ test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, TRUE,
+ dset_type, express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped);
test_num++;
break;
/* end of case COL_CHUNKED */
@@ -4547,7 +3912,7 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
} /* end of switch(sstest_type) */
#if CONTIG_HS_DR_PIO_TEST__DEBUG
- if ( ( MAINPROCESS ) && ( tests_skipped > 0 ) ) {
+ if ((MAINPROCESS) && (tests_skipped > 0)) {
HDfprintf(stdout, " run/skipped/total = %" PRId64 "/%" PRId64 "/%" PRId64 ".\n",
tests_run, tests_skipped, total_tests);
}
@@ -4555,7 +3920,7 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
}
}
- if ( ( MAINPROCESS ) && ( tests_skipped > 0 ) ) {
+ if ((MAINPROCESS) && (tests_skipped > 0)) {
HDfprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n",
tests_skipped, total_tests);
}
@@ -4573,23 +3938,23 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
#include "testphdf5.h"
#ifndef PATH_MAX
-#define PATH_MAX 512
-#endif /* !PATH_MAX */
+#define PATH_MAX 512
+#endif /* !PATH_MAX */
/* global variables */
int dim0;
int dim1;
int chunkdim0;
int chunkdim1;
-int nerrors = 0; /* errors count */
-int ndatasets = 300; /* number of datasets to create*/
-int ngroups = 512; /* number of groups to create in root
- * group. */
-int facc_type = FACC_MPIO; /*Test file access type */
+int nerrors = 0; /* errors count */
+int ndatasets = 300; /* number of datasets to create*/
+int ngroups = 512; /* number of groups to create in root
+ * group. */
+int facc_type = FACC_MPIO; /*Test file access type */
int dxfer_coll_type = DXFER_COLLECTIVE_IO;
-H5E_auto2_t old_func; /* previous error handler */
-void *old_client_data; /* previous error handler arg.*/
+H5E_auto2_t old_func; /* previous error handler */
+void * old_client_data; /* previous error handler arg.*/
/* other option flags */
@@ -4598,13 +3963,11 @@ void *old_client_data; /* previous error handler arg.*/
* created in one test is accessed by a different test.
* filenames[0] is reserved as the file name for PARATESTFILE.
*/
-#define NFILENAME 2
+#define NFILENAME 2
#define PARATESTFILE filenames[0]
-const char *FILENAME[NFILENAME]={
- "ShapeSameTest",
- NULL};
-char filenames[NFILENAME][PATH_MAX];
-hid_t fapl; /* file access property list */
+const char *FILENAME[NFILENAME] = {"ShapeSameTest", NULL};
+char filenames[NFILENAME][PATH_MAX];
+hid_t fapl; /* file access property list */
#ifdef USE_PAUSE
/* pause the process for a moment to allow debugger to attach if desired. */
@@ -4613,15 +3976,16 @@ hid_t fapl; /* file access property list */
#include <sys/types.h>
#include <sys/stat.h>
-void pause_proc(void)
+void
+pause_proc(void)
{
- int pid;
- h5_stat_t statbuf;
- char greenlight[] = "go";
- int maxloop = 10;
- int loops = 0;
- int time_int = 10;
+ int pid;
+ h5_stat_t statbuf;
+ char greenlight[] = "go";
+ int maxloop = 10;
+ int loops = 0;
+ int time_int = 10;
/* mpi variables */
int mpi_size, mpi_rank;
@@ -4634,28 +3998,28 @@ void pause_proc(void)
MPI_Get_processor_name(mpi_name, &mpi_namelen);
if (MAINPROCESS)
- while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop){
- if (!loops++){
- HDprintf("Proc %d (%*s, %d): to debug, attach %d\n",
- mpi_rank, mpi_namelen, mpi_name, pid, pid);
- }
- HDprintf("waiting(%ds) for file %s ...\n", time_int, greenlight);
- fflush(stdout);
+ while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop) {
+ if (!loops++) {
+ HDprintf("Proc %d (%*s, %d): to debug, attach %d\n", mpi_rank, mpi_namelen, mpi_name, pid,
+ pid);
+ }
+ HDprintf("waiting(%ds) for file %s ...\n", time_int, greenlight);
+ fflush(stdout);
HDsleep(time_int);
- }
+ }
MPI_Barrier(MPI_COMM_WORLD);
}
/* Use the Profile feature of MPI to call the pause_proc() */
-int MPI_Init(int *argc, char ***argv)
+int
+MPI_Init(int *argc, char ***argv)
{
int ret_code;
- ret_code=PMPI_Init(argc, argv);
+ ret_code = PMPI_Init(argc, argv);
pause_proc();
return (ret_code);
}
-#endif /* USE_PAUSE */
-
+#endif /* USE_PAUSE */
/*
* Show command usage
@@ -4664,198 +4028,198 @@ static void
usage(void)
{
HDprintf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] "
- "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
+ "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
HDprintf("\t-m<n_datasets>"
- "\tset number of datasets for the multiple dataset test\n");
+ "\tset number of datasets for the multiple dataset test\n");
HDprintf("\t-n<n_groups>"
- "\tset number of groups for the multiple group test\n");
+ "\tset number of groups for the multiple group test\n");
HDprintf("\t-f <prefix>\tfilename prefix\n");
HDprintf("\t-2\t\tuse Split-file together with MPIO\n");
- HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n",
- ROW_FACTOR, COL_FACTOR);
+ HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR,
+ COL_FACTOR);
HDprintf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
HDprintf("\n");
}
-
/*
* parse the command line options
*/
static int
parse_options(int argc, char **argv)
{
- int mpi_size, mpi_rank; /* mpi variables */
+ int mpi_size, mpi_rank; /* mpi variables */
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* setup default chunk-size. Make sure sizes are > 0 */
- chunkdim0 = (dim0+9)/10;
- chunkdim1 = (dim1+9)/10;
+ chunkdim0 = (dim0 + 9) / 10;
+ chunkdim1 = (dim1 + 9) / 10;
- while (--argc){
- if (**(++argv) != '-'){
- break;
- }else{
- switch(*(*argv+1)){
- case 'm': ndatasets = atoi((*argv+1)+1);
- if (ndatasets < 0){
- nerrors++;
- return(1);
- }
- break;
- case 'n': ngroups = atoi((*argv+1)+1);
- if (ngroups < 0){
- nerrors++;
- return(1);
- }
- break;
- case 'f': if (--argc < 1) {
- nerrors++;
- return(1);
- }
- if (**(++argv) == '-') {
- nerrors++;
- return(1);
- }
- paraprefix = *argv;
- break;
- case 'i': /* Collective MPI-IO access with independent IO */
- dxfer_coll_type = DXFER_INDEPENDENT_IO;
- break;
- case '2': /* Use the split-file driver with MPIO access */
- /* Can use $HDF5_METAPREFIX to define the */
- /* meta-file-prefix. */
- facc_type = FACC_MPIO | FACC_SPLIT;
- break;
- case 'd': /* dimensizes */
- if (--argc < 2){
- nerrors++;
- return(1);
- }
- dim0 = atoi(*(++argv))*mpi_size;
- argc--;
- dim1 = atoi(*(++argv))*mpi_size;
- /* set default chunkdim sizes too */
- chunkdim0 = (dim0+9)/10;
- chunkdim1 = (dim1+9)/10;
- break;
- case 'c': /* chunk dimensions */
- if (--argc < 2){
- nerrors++;
- return(1);
- }
- chunkdim0 = atoi(*(++argv));
- argc--;
- chunkdim1 = atoi(*(++argv));
- break;
- case 'h': /* print help message--return with nerrors set */
- return(1);
- default: HDprintf("Illegal option(%s)\n", *argv);
- nerrors++;
- return(1);
+ while (--argc) {
+ if (**(++argv) != '-') {
+ break;
+ }
+ else {
+ switch (*(*argv + 1)) {
+ case 'm':
+ ndatasets = atoi((*argv + 1) + 1);
+ if (ndatasets < 0) {
+ nerrors++;
+ return (1);
+ }
+ break;
+ case 'n':
+ ngroups = atoi((*argv + 1) + 1);
+ if (ngroups < 0) {
+ nerrors++;
+ return (1);
+ }
+ break;
+ case 'f':
+ if (--argc < 1) {
+ nerrors++;
+ return (1);
+ }
+ if (**(++argv) == '-') {
+ nerrors++;
+ return (1);
+ }
+ paraprefix = *argv;
+ break;
+ case 'i': /* Collective MPI-IO access with independent IO */
+ dxfer_coll_type = DXFER_INDEPENDENT_IO;
+ break;
+ case '2': /* Use the split-file driver with MPIO access */
+ /* Can use $HDF5_METAPREFIX to define the */
+ /* meta-file-prefix. */
+ facc_type = FACC_MPIO | FACC_SPLIT;
+ break;
+ case 'd': /* dimensizes */
+ if (--argc < 2) {
+ nerrors++;
+ return (1);
+ }
+ dim0 = atoi(*(++argv)) * mpi_size;
+ argc--;
+ dim1 = atoi(*(++argv)) * mpi_size;
+ /* set default chunkdim sizes too */
+ chunkdim0 = (dim0 + 9) / 10;
+ chunkdim1 = (dim1 + 9) / 10;
+ break;
+ case 'c': /* chunk dimensions */
+ if (--argc < 2) {
+ nerrors++;
+ return (1);
+ }
+ chunkdim0 = atoi(*(++argv));
+ argc--;
+ chunkdim1 = atoi(*(++argv));
+ break;
+ case 'h': /* print help message--return with nerrors set */
+ return (1);
+ default:
+ HDprintf("Illegal option(%s)\n", *argv);
+ nerrors++;
+ return (1);
+ }
}
- }
} /*while*/
/* check validity of dimension and chunk sizes */
- if (dim0 <= 0 || dim1 <= 0){
- HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
- nerrors++;
- return(1);
+ if (dim0 <= 0 || dim1 <= 0) {
+ HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
+ nerrors++;
+ return (1);
}
- if (chunkdim0 <= 0 || chunkdim1 <= 0){
- HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
- nerrors++;
- return(1);
+ if (chunkdim0 <= 0 || chunkdim1 <= 0) {
+ HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
+ nerrors++;
+ return (1);
}
/* Make sure datasets can be divided into equal portions by the processes */
- if ((dim0 % mpi_size) || (dim1 % mpi_size)){
- if (MAINPROCESS)
- HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n",
- dim0, dim1, mpi_size);
- nerrors++;
- return(1);
+ if ((dim0 % mpi_size) || (dim1 % mpi_size)) {
+ if (MAINPROCESS)
+ HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", dim0, dim1, mpi_size);
+ nerrors++;
+ return (1);
}
/* compose the test filenames */
{
- int i, n;
+ int i, n;
- n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1; /* exclude the NULL */
+ n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */
- for (i=0; i < n; i++)
- if (h5_fixname(FILENAME[i],fapl,filenames[i],sizeof(filenames[i]))
- == NULL){
- HDprintf("h5_fixname failed\n");
- nerrors++;
- return(1);
- }
- HDprintf("Test filenames are:\n");
- for (i=0; i < n; i++)
- HDprintf(" %s\n", filenames[i]);
+ for (i = 0; i < n; i++)
+ if (h5_fixname(FILENAME[i], fapl, filenames[i], sizeof(filenames[i])) == NULL) {
+ HDprintf("h5_fixname failed\n");
+ nerrors++;
+ return (1);
+ }
+ HDprintf("Test filenames are:\n");
+ for (i = 0; i < n; i++)
+ HDprintf(" %s\n", filenames[i]);
}
- return(0);
+ return (0);
}
-
/*
* Create the appropriate File access property list
*/
hid_t
create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
{
- hid_t ret_pl = -1;
- herr_t ret; /* generic return value */
- int mpi_rank; /* mpi variables */
+ hid_t ret_pl = -1;
+ herr_t ret; /* generic return value */
+ int mpi_rank; /* mpi variables */
/* need the rank for error checking macros */
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- ret_pl = H5Pcreate (H5P_FILE_ACCESS);
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
if (l_facc_type == FACC_DEFAULT)
- return (ret_pl);
+ return (ret_pl);
- if (l_facc_type == FACC_MPIO){
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(ret_pl, comm, info);
- VRFY((ret >= 0), "");
+ if (l_facc_type == FACC_MPIO) {
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(ret_pl, comm, info);
+ VRFY((ret >= 0), "");
ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
- VRFY((ret >= 0), "");
+ VRFY((ret >= 0), "");
ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
- VRFY((ret >= 0), "");
- return(ret_pl);
+ VRFY((ret >= 0), "");
+ return (ret_pl);
}
- if (l_facc_type == (FACC_MPIO | FACC_SPLIT)){
- hid_t mpio_pl;
-
- mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((mpio_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
- VRFY((ret >= 0), "");
-
- /* setup file access template */
- ret_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((ret_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
- VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
- H5Pclose(mpio_pl);
- return(ret_pl);
+ if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) {
+ hid_t mpio_pl;
+
+ mpio_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((mpio_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
+ VRFY((ret >= 0), "");
+
+ /* setup file access template */
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((ret_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
+ VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
+ H5Pclose(mpio_pl);
+ return (ret_pl);
}
/* unknown file access types */
return (ret_pl);
}
-
/* Shape Same test using contigous hyperslab using independent IO on contigous datasets */
static void
sscontig1(void)
@@ -4884,7 +4248,6 @@ sscontig4(void)
contig_hs_dr_pio_test(COL_CHUNKED);
}
-
/* Shape Same test using checker hyperslab using independent IO on contigous datasets */
static void
sschecker1(void)
@@ -4913,10 +4276,10 @@ sschecker4(void)
ckrbrd_hs_dr_pio_test(COL_CHUNKED);
}
-
-int main(int argc, char **argv)
+int
+main(int argc, char **argv)
{
- int mpi_size, mpi_rank; /* mpi variables */
+ int mpi_size, mpi_rank; /* mpi variables */
#ifndef H5_HAVE_WIN32_API
/* Un-buffer the stdout and stderr */
@@ -4928,14 +4291,14 @@ int main(int argc, char **argv)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- dim0 = ROW_FACTOR*mpi_size;
- dim1 = COL_FACTOR*mpi_size;
+ dim0 = ROW_FACTOR * mpi_size;
+ dim1 = COL_FACTOR * mpi_size;
- if (MAINPROCESS){
- HDprintf("===================================\n");
- HDprintf("Shape Same Tests Start\n");
+ if (MAINPROCESS) {
+ HDprintf("===================================\n");
+ HDprintf("Shape Same Tests Start\n");
HDprintf(" express_test = %d.\n", GetTestExpress());
- HDprintf("===================================\n");
+ HDprintf("===================================\n");
}
/* Attempt to turn off atexit post processing so that in case errors
@@ -4943,8 +4306,8 @@ int main(int argc, char **argv)
* hang in the atexit post processing in which it may try to make MPI
* calls. By then, MPI calls may not work.
*/
- if (H5dont_atexit() < 0){
- HDprintf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank);
+ if (H5dont_atexit() < 0) {
+ HDprintf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank);
};
H5open();
h5_show_hostname();
@@ -4953,42 +4316,33 @@ int main(int argc, char **argv)
TestInit(argv[0], usage, parse_options);
/* Shape Same tests using contigous hyperslab */
- AddTest("sscontig1", sscontig1, NULL,
- "Cntg hslab, ind IO, cntg dsets", PARATESTFILE);
- AddTest("sscontig2", sscontig2, NULL,
- "Cntg hslab, col IO, cntg dsets", PARATESTFILE);
- AddTest("sscontig3", sscontig3, NULL,
- "Cntg hslab, ind IO, chnk dsets", PARATESTFILE);
- AddTest("sscontig4", sscontig4, NULL,
- "Cntg hslab, col IO, chnk dsets", PARATESTFILE);
+ AddTest("sscontig1", sscontig1, NULL, "Cntg hslab, ind IO, cntg dsets", PARATESTFILE);
+ AddTest("sscontig2", sscontig2, NULL, "Cntg hslab, col IO, cntg dsets", PARATESTFILE);
+ AddTest("sscontig3", sscontig3, NULL, "Cntg hslab, ind IO, chnk dsets", PARATESTFILE);
+ AddTest("sscontig4", sscontig4, NULL, "Cntg hslab, col IO, chnk dsets", PARATESTFILE);
/* Shape Same tests using checker board hyperslab */
- AddTest("sschecker1", sschecker1, NULL,
- "Check hslab, ind IO, cntg dsets", PARATESTFILE);
- AddTest("sschecker2", sschecker2, NULL,
- "Check hslab, col IO, cntg dsets", PARATESTFILE);
- AddTest("sschecker3", sschecker3, NULL,
- "Check hslab, ind IO, chnk dsets", PARATESTFILE);
- AddTest("sschecker4", sschecker4, NULL,
- "Check hslab, col IO, chnk dsets", PARATESTFILE);
+ AddTest("sschecker1", sschecker1, NULL, "Check hslab, ind IO, cntg dsets", PARATESTFILE);
+ AddTest("sschecker2", sschecker2, NULL, "Check hslab, col IO, cntg dsets", PARATESTFILE);
+ AddTest("sschecker3", sschecker3, NULL, "Check hslab, ind IO, chnk dsets", PARATESTFILE);
+ AddTest("sschecker4", sschecker4, NULL, "Check hslab, col IO, chnk dsets", PARATESTFILE);
/* Display testing information */
TestInfo(argv[0]);
/* setup file access property list */
- fapl = H5Pcreate (H5P_FILE_ACCESS);
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
/* Parse command line arguments */
TestParseCmdLine(argc, argv);
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS){
- HDprintf("===================================\n"
- " Using Independent I/O with file set view to replace collective I/O \n"
- "===================================\n");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS) {
+ HDprintf("===================================\n"
+ " Using Independent I/O with file set view to replace collective I/O \n"
+ "===================================\n");
}
-
/* Perform requested testing */
PerformTests();
@@ -5010,16 +4364,16 @@ int main(int argc, char **argv)
{
int temp;
MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
- nerrors=temp;
+ nerrors = temp;
}
- if (MAINPROCESS){ /* only process 0 reports */
- HDprintf("===================================\n");
- if (nerrors)
- HDprintf("***Shape Same tests detected %d errors***\n", nerrors);
- else
- HDprintf("Shape Same tests finished with no errors\n");
- HDprintf("===================================\n");
+ if (MAINPROCESS) { /* only process 0 reports */
+ HDprintf("===================================\n");
+ if (nerrors)
+ HDprintf("***Shape Same tests detected %d errors***\n", nerrors);
+ else
+ HDprintf("Shape Same tests finished with no errors\n");
+ HDprintf("===================================\n");
}
/* close HDF5 library */
@@ -5031,6 +4385,5 @@ int main(int argc, char **argv)
MPI_Finalize();
/* cannot just return (nerrors) because exit code is limited to 1byte */
- return(nerrors!=0);
+ return (nerrors != 0);
}
-
diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c
index 8d2b61c..cb5c01a 100644
--- a/testpar/t_span_tree.c
+++ b/testpar/t_span_tree.c
@@ -35,11 +35,9 @@
#include "H5private.h"
#include "testphdf5.h"
-
static void coll_write_test(int chunk_factor);
static void coll_read_test(void);
-
/*-------------------------------------------------------------------------
* Function: coll_irregular_cont_write
*
@@ -59,12 +57,9 @@ void
coll_irregular_cont_write(void)
{
- coll_write_test(0);
-
+ coll_write_test(0);
}
-
-
/*-------------------------------------------------------------------------
* Function: coll_irregular_cont_read
*
@@ -84,11 +79,9 @@ void
coll_irregular_cont_read(void)
{
- coll_read_test();
-
+ coll_read_test();
}
-
/*-------------------------------------------------------------------------
* Function: coll_irregular_simple_chunk_write
*
@@ -108,12 +101,9 @@ void
coll_irregular_simple_chunk_write(void)
{
- coll_write_test(1);
-
+ coll_write_test(1);
}
-
-
/*-------------------------------------------------------------------------
* Function: coll_irregular_simple_chunk_read
*
@@ -133,8 +123,7 @@ void
coll_irregular_simple_chunk_read(void)
{
- coll_read_test();
-
+ coll_read_test();
}
/*-------------------------------------------------------------------------
@@ -156,12 +145,9 @@ void
coll_irregular_complex_chunk_write(void)
{
- coll_write_test(4);
-
+ coll_write_test(4);
}
-
-
/*-------------------------------------------------------------------------
* Function: coll_irregular_complex_chunk_read
*
@@ -181,11 +167,9 @@ void
coll_irregular_complex_chunk_read(void)
{
- coll_read_test();
-
+ coll_read_test();
}
-
/*-------------------------------------------------------------------------
* Function: coll_write_test
*
@@ -202,447 +186,445 @@ coll_irregular_complex_chunk_read(void)
*
*-------------------------------------------------------------------------
*/
-void coll_write_test(int chunk_factor)
+void
+coll_write_test(int chunk_factor)
{
- const char *filename;
- hid_t facc_plist,dxfer_plist,dcrt_plist;
- hid_t file, datasetc,dataseti; /* File and dataset identifiers */
- hid_t mspaceid1, mspaceid, fspaceid,fspaceid1; /* Dataspace identifiers */
-
- hsize_t mdim1[1]; /* Dimension size of the first dataset (in memory) */
- hsize_t fsdim[2]; /* Dimension sizes of the dataset (on disk) */
- hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we
- * read selection from the dataset on the disk
- */
-
- hsize_t start[2]; /* Start of hyperslab */
- hsize_t stride[2]; /* Stride of hyperslab */
- hsize_t count[2]; /* Block count */
- hsize_t block[2]; /* Block sizes */
- hsize_t chunk_dims[2];
-
- herr_t ret;
- int i;
- int fillvalue = 0; /* Fill value for the dataset */
-
- int *matrix_out = NULL;
- int *matrix_out1 = NULL; /* Buffer to read from the dataset */
- int *vector = NULL;
-
- int mpi_size,mpi_rank;
-
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- /*set up MPI parameters */
- MPI_Comm_size(comm,&mpi_size);
- MPI_Comm_rank(comm,&mpi_rank);
-
- /* Obtain file name */
- filename = GetTestParameters();
-
- /*
- * Buffers' initialization.
- */
-
- mdim1[0] = (hsize_t)(MSPACE1_DIM*mpi_size);
- mdim[0] = MSPACE_DIM1;
- mdim[1] = (hsize_t)(MSPACE_DIM2*mpi_size);
- fsdim[0] = FSPACE_DIM1;
- fsdim[1] = (hsize_t)(FSPACE_DIM2*mpi_size);
-
- vector = (int*)HDmalloc(sizeof(int)*(size_t)mdim1[0]*(size_t)mpi_size);
- matrix_out = (int*)HDmalloc(sizeof(int)*(size_t)mdim[0]*(size_t)mdim[1]*(size_t)mpi_size);
- matrix_out1 = (int*)HDmalloc(sizeof(int)*(size_t)mdim[0]*(size_t)mdim[1]*(size_t)mpi_size);
-
- HDmemset(vector,0,sizeof(int)*(size_t)mdim1[0]*(size_t)mpi_size);
- vector[0] = vector[MSPACE1_DIM*mpi_size - 1] = -1;
- for (i = 1; i < MSPACE1_DIM*mpi_size - 1; i++) H5_CHECKED_ASSIGN(vector[i], int, i, unsigned);
-
- /* Grab file access property list */
- facc_plist = create_faccess_plist(comm, info, facc_type);
- VRFY((facc_plist >= 0),"");
-
- /*
- * Create a file.
- */
- file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, facc_plist);
- VRFY((file >= 0),"H5Fcreate succeeded");
-
- /*
- * Create property list for a dataset and set up fill values.
- */
- dcrt_plist = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcrt_plist >= 0),"");
-
- ret = H5Pset_fill_value(dcrt_plist, H5T_NATIVE_INT, &fillvalue);
- VRFY((ret >= 0),"Fill value creation property list succeeded");
-
- if(chunk_factor != 0) {
- chunk_dims[0] = fsdim[0] / (hsize_t)chunk_factor;
- chunk_dims[1] = fsdim[1] / (hsize_t)chunk_factor;
- ret = H5Pset_chunk(dcrt_plist, 2, chunk_dims);
- VRFY((ret >= 0),"chunk creation property list succeeded");
- }
-
- /*
- *
- * Create dataspace for the first dataset in the disk.
- * dim1 = 9
- * dim2 = 3600
- *
- *
- */
- fspaceid = H5Screate_simple(FSPACE_RANK, fsdim, NULL);
- VRFY((fspaceid >= 0),"file dataspace created succeeded");
-
- /*
- * Create dataset in the file. Notice that creation
- * property list dcrt_plist is used.
- */
- datasetc = H5Dcreate2(file, "collect_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT);
- VRFY((datasetc >= 0),"dataset created succeeded");
-
- dataseti = H5Dcreate2(file, "independ_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT);
- VRFY((dataseti >= 0),"dataset created succeeded");
-
- /* The First selection for FILE
- *
- * block (3,2)
- * stride(4,3)
- * count (1,768/mpi_size)
- * start (0,1+768*3*mpi_rank/mpi_size)
- *
- */
-
- start[0] = FHSTART0;
- start[1] = (hsize_t)(FHSTART1 + mpi_rank * FHSTRIDE1 * FHCOUNT1);
- stride[0] = FHSTRIDE0;
- stride[1] = FHSTRIDE1;
- count[0] = FHCOUNT0;
- count[1] = FHCOUNT1;
- block[0] = FHBLOCK0;
- block[1] = FHBLOCK1;
-
- ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0),"hyperslab selection succeeded");
-
- /* The Second selection for FILE
- *
- * block (3,768)
- * stride (1,1)
- * count (1,1)
- * start (4,768*mpi_rank/mpi_size)
- *
- */
-
- start[0] = SHSTART0;
- start[1] = (hsize_t)(SHSTART1+SHCOUNT1*SHBLOCK1*mpi_rank);
- stride[0] = SHSTRIDE0;
- stride[1] = SHSTRIDE1;
- count[0] = SHCOUNT0;
- count[1] = SHCOUNT1;
- block[0] = SHBLOCK0;
- block[1] = SHBLOCK1;
-
- ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0),"hyperslab selection succeeded");
-
- /*
- * Create dataspace for the first dataset in the memory
- * dim1 = 27000
- *
- */
- mspaceid1 = H5Screate_simple(MSPACE1_RANK, mdim1, NULL);
- VRFY((mspaceid1 >= 0),"memory dataspace created succeeded");
-
- /*
- * Memory space is 1-D, this is a good test to check
- * whether a span-tree derived datatype needs to be built.
- * block 1
- * stride 1
- * count 6912/mpi_size
- * start 1
- *
- */
- start[0] = MHSTART0;
- stride[0] = MHSTRIDE0;
- count[0] = MHCOUNT0;
- block[0] = MHBLOCK0;
-
- ret = H5Sselect_hyperslab(mspaceid1, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0),"hyperslab selection succeeded");
-
- /* independent write */
- ret = H5Dwrite(dataseti, H5T_NATIVE_INT, mspaceid1, fspaceid, H5P_DEFAULT, vector);
- VRFY((ret >= 0),"dataset independent write succeed");
-
- dxfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxfer_plist >= 0),"");
-
- ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0),"MPIO data transfer property list succeed");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
- }
-
-
- /* collective write */
- ret = H5Dwrite(datasetc, H5T_NATIVE_INT, mspaceid1, fspaceid, dxfer_plist, vector);
- VRFY((ret >= 0),"dataset collective write succeed");
-
- ret = H5Sclose(mspaceid1);
- VRFY((ret >= 0),"");
-
- ret = H5Sclose(fspaceid);
- VRFY((ret >= 0),"");
-
- /*
- * Close dataset.
- */
- ret = H5Dclose(datasetc);
- VRFY((ret >= 0),"");
-
- ret = H5Dclose(dataseti);
- VRFY((ret >= 0),"");
-
- /*
- * Close the file.
- */
- ret = H5Fclose(file);
- VRFY((ret >= 0),"");
- /*
- * Close property list
- */
-
- ret = H5Pclose(facc_plist);
- VRFY((ret >= 0),"");
- ret = H5Pclose(dxfer_plist);
- VRFY((ret >= 0),"");
- ret = H5Pclose(dcrt_plist);
- VRFY((ret >= 0),"");
-
- /*
- * Open the file.
- */
-
- /***
-
- For testing collective hyperslab selection write
- In this test, we are using independent read to check
- the correctedness of collective write compared with
- independent write,
-
- In order to throughly test this feature, we choose
- a different selection set for reading the data out.
-
-
- ***/
-
- /* Obtain file access property list with MPI-IO driver */
- facc_plist = create_faccess_plist(comm, info, facc_type);
- VRFY((facc_plist >= 0),"");
-
- file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist);
- VRFY((file >= 0),"H5Fopen succeeded");
-
- /*
- * Open the dataset.
- */
- datasetc = H5Dopen2(file,"collect_write", H5P_DEFAULT);
- VRFY((datasetc >= 0),"H5Dopen2 succeeded");
-
- dataseti = H5Dopen2(file,"independ_write", H5P_DEFAULT);
- VRFY((dataseti >= 0),"H5Dopen2 succeeded");
-
- /*
- * Get dataspace of the open dataset.
- */
- fspaceid = H5Dget_space(datasetc);
- VRFY((fspaceid >= 0),"file dataspace obtained succeeded");
-
- fspaceid1 = H5Dget_space(dataseti);
- VRFY((fspaceid1 >= 0),"file dataspace obtained succeeded");
-
-
- /* The First selection for FILE to read
- *
- * block (1,1)
- * stride(1.1)
- * count (3,768/mpi_size)
- * start (1,2+768*mpi_rank/mpi_size)
- *
- */
- start[0] = RFFHSTART0;
- start[1] = (hsize_t)(RFFHSTART1+mpi_rank*RFFHCOUNT1);
- block[0] = RFFHBLOCK0;
- block[1] = RFFHBLOCK1;
- stride[0] = RFFHSTRIDE0;
- stride[1] = RFFHSTRIDE1;
- count[0] = RFFHCOUNT0;
- count[1] = RFFHCOUNT1;
-
-
- /* The first selection of the dataset generated by collective write */
- ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0),"hyperslab selection succeeded");
-
- /* The first selection of the dataset generated by independent write */
- ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0),"hyperslab selection succeeded");
-
- /* The Second selection for FILE to read
- *
- * block (1,1)
- * stride(1.1)
- * count (3,1536/mpi_size)
- * start (2,4+1536*mpi_rank/mpi_size)
- *
- */
-
- start[0] = RFSHSTART0;
- start[1] = (hsize_t)(RFSHSTART1+RFSHCOUNT1*mpi_rank);
- block[0] = RFSHBLOCK0;
- block[1] = RFSHBLOCK1;
- stride[0] = RFSHSTRIDE0;
- stride[1] = RFSHSTRIDE0;
- count[0] = RFSHCOUNT0;
- count[1] = RFSHCOUNT1;
-
- /* The second selection of the dataset generated by collective write */
- ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0),"hyperslab selection succeeded");
-
- /* The second selection of the dataset generated by independent write */
- ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0),"hyperslab selection succeeded");
-
- /*
- * Create memory dataspace.
- * rank = 2
- * mdim1 = 9
- * mdim2 = 3600
- *
- */
- mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
-
- /*
- * Select two hyperslabs in memory. Hyperslabs has the same
- * size and shape as the selected hyperslabs for the file dataspace
- * Only the starting point is different.
- * The first selection
- * block (1,1)
- * stride(1.1)
- * count (3,768/mpi_size)
- * start (0,768*mpi_rank/mpi_size)
- *
- */
-
-
- start[0] = RMFHSTART0;
- start[1] = (hsize_t)(RMFHSTART1+mpi_rank*RMFHCOUNT1);
- block[0] = RMFHBLOCK0;
- block[1] = RMFHBLOCK1;
- stride[0] = RMFHSTRIDE0;
- stride[1] = RMFHSTRIDE1;
- count[0] = RMFHCOUNT0;
- count[1] = RMFHCOUNT1;
-
- ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0),"hyperslab selection succeeded");
-
- /*
- * Select two hyperslabs in memory. Hyperslabs has the same
- * size and shape as the selected hyperslabs for the file dataspace
- * Only the starting point is different.
- * The second selection
- * block (1,1)
- * stride(1,1)
- * count (3,1536/mpi_size)
- * start (1,2+1536*mpi_rank/mpi_size)
- *
- */
- start[0] = RMSHSTART0;
- start[1] = (hsize_t)(RMSHSTART1+mpi_rank*RMSHCOUNT1);
- block[0] = RMSHBLOCK0;
- block[1] = RMSHBLOCK1;
- stride[0] = RMSHSTRIDE0;
- stride[1] = RMSHSTRIDE1;
- count[0] = RMSHCOUNT0;
- count[1] = RMSHCOUNT1;
-
- ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0),"hyperslab selection succeeded");
-
- /*
- * Initialize data buffer.
- */
-
- HDmemset(matrix_out,0,sizeof(int)*(size_t)MSPACE_DIM1*(size_t)MSPACE_DIM2*(size_t)mpi_size);
- HDmemset(matrix_out1,0,sizeof(int)*(size_t)MSPACE_DIM1*(size_t)MSPACE_DIM2*(size_t)mpi_size);
- /*
- * Read data back to the buffer matrix_out.
- */
-
- ret = H5Dread(datasetc, H5T_NATIVE_INT, mspaceid, fspaceid,
- H5P_DEFAULT, matrix_out);
- VRFY((ret >= 0),"H5D independent read succeed");
-
-
- ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid,
- H5P_DEFAULT, matrix_out1);
- VRFY((ret >= 0),"H5D independent read succeed");
-
- ret = 0;
-
- for (i = 0; i < MSPACE_DIM1*MSPACE_DIM2*mpi_size; i++){
- if(matrix_out[i]!=matrix_out1[i]) ret = -1;
- if(ret < 0) break;
+ const char *filename;
+ hid_t facc_plist, dxfer_plist, dcrt_plist;
+ hid_t file, datasetc, dataseti; /* File and dataset identifiers */
+ hid_t mspaceid1, mspaceid, fspaceid, fspaceid1; /* Dataspace identifiers */
+
+ hsize_t mdim1[1]; /* Dimension size of the first dataset (in memory) */
+ hsize_t fsdim[2]; /* Dimension sizes of the dataset (on disk) */
+ hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we
+ * read selection from the dataset on the disk
+ */
+
+ hsize_t start[2]; /* Start of hyperslab */
+ hsize_t stride[2]; /* Stride of hyperslab */
+ hsize_t count[2]; /* Block count */
+ hsize_t block[2]; /* Block sizes */
+ hsize_t chunk_dims[2];
+
+ herr_t ret;
+ int i;
+ int fillvalue = 0; /* Fill value for the dataset */
+
+ int *matrix_out = NULL;
+ int *matrix_out1 = NULL; /* Buffer to read from the dataset */
+ int *vector = NULL;
+
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*set up MPI parameters */
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /* Obtain file name */
+ filename = GetTestParameters();
+
+ /*
+ * Buffers' initialization.
+ */
+
+ mdim1[0] = (hsize_t)(MSPACE1_DIM * mpi_size);
+ mdim[0] = MSPACE_DIM1;
+ mdim[1] = (hsize_t)(MSPACE_DIM2 * mpi_size);
+ fsdim[0] = FSPACE_DIM1;
+ fsdim[1] = (hsize_t)(FSPACE_DIM2 * mpi_size);
+
+ vector = (int *)HDmalloc(sizeof(int) * (size_t)mdim1[0] * (size_t)mpi_size);
+ matrix_out = (int *)HDmalloc(sizeof(int) * (size_t)mdim[0] * (size_t)mdim[1] * (size_t)mpi_size);
+ matrix_out1 = (int *)HDmalloc(sizeof(int) * (size_t)mdim[0] * (size_t)mdim[1] * (size_t)mpi_size);
+
+ HDmemset(vector, 0, sizeof(int) * (size_t)mdim1[0] * (size_t)mpi_size);
+ vector[0] = vector[MSPACE1_DIM * mpi_size - 1] = -1;
+ for (i = 1; i < MSPACE1_DIM * mpi_size - 1; i++)
+ H5_CHECKED_ASSIGN(vector[i], int, i, unsigned);
+
+ /* Grab file access property list */
+ facc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY((facc_plist >= 0), "");
+
+ /*
+ * Create a file.
+ */
+ file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, facc_plist);
+ VRFY((file >= 0), "H5Fcreate succeeded");
+
+ /*
+ * Create property list for a dataset and set up fill values.
+ */
+ dcrt_plist = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcrt_plist >= 0), "");
+
+ ret = H5Pset_fill_value(dcrt_plist, H5T_NATIVE_INT, &fillvalue);
+ VRFY((ret >= 0), "Fill value creation property list succeeded");
+
+ if (chunk_factor != 0) {
+ chunk_dims[0] = fsdim[0] / (hsize_t)chunk_factor;
+ chunk_dims[1] = fsdim[1] / (hsize_t)chunk_factor;
+ ret = H5Pset_chunk(dcrt_plist, 2, chunk_dims);
+ VRFY((ret >= 0), "chunk creation property list succeeded");
}
- VRFY((ret >= 0),"H5D irregular collective write succeed");
+ /*
+ *
+ * Create dataspace for the first dataset in the disk.
+ * dim1 = 9
+ * dim2 = 3600
+ *
+ *
+ */
+ fspaceid = H5Screate_simple(FSPACE_RANK, fsdim, NULL);
+ VRFY((fspaceid >= 0), "file dataspace created succeeded");
- /*
- * Close memory file and memory dataspaces.
- */
- ret = H5Sclose(mspaceid);
- VRFY((ret >= 0),"");
- ret = H5Sclose(fspaceid);
- VRFY((ret >= 0),"");
+ /*
+ * Create dataset in the file. Notice that creation
+ * property list dcrt_plist is used.
+ */
+ datasetc =
+ H5Dcreate2(file, "collect_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT);
+ VRFY((datasetc >= 0), "dataset created succeeded");
- /*
- * Close dataset.
- */
- ret = H5Dclose(dataseti);
- VRFY((ret >= 0),"");
+ dataseti =
+ H5Dcreate2(file, "independ_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT);
+ VRFY((dataseti >= 0), "dataset created succeeded");
- ret = H5Dclose(datasetc);
- VRFY((ret >= 0),"");
+ /* The First selection for FILE
+ *
+ * block (3,2)
+ * stride(4,3)
+ * count (1,768/mpi_size)
+ * start (0,1+768*3*mpi_rank/mpi_size)
+ *
+ */
+
+ start[0] = FHSTART0;
+ start[1] = (hsize_t)(FHSTART1 + mpi_rank * FHSTRIDE1 * FHCOUNT1);
+ stride[0] = FHSTRIDE0;
+ stride[1] = FHSTRIDE1;
+ count[0] = FHCOUNT0;
+ count[1] = FHCOUNT1;
+ block[0] = FHBLOCK0;
+ block[1] = FHBLOCK1;
- /*
- * Close property list
- */
+ ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
- ret = H5Pclose(facc_plist);
- VRFY((ret >= 0),"");
+ /* The Second selection for FILE
+ *
+ * block (3,768)
+ * stride (1,1)
+ * count (1,1)
+ * start (4,768*mpi_rank/mpi_size)
+ *
+ */
+ start[0] = SHSTART0;
+ start[1] = (hsize_t)(SHSTART1 + SHCOUNT1 * SHBLOCK1 * mpi_rank);
+ stride[0] = SHSTRIDE0;
+ stride[1] = SHSTRIDE1;
+ count[0] = SHCOUNT0;
+ count[1] = SHCOUNT1;
+ block[0] = SHBLOCK0;
+ block[1] = SHBLOCK1;
+
+ ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Create dataspace for the first dataset in the memory
+ * dim1 = 27000
+ *
+ */
+ mspaceid1 = H5Screate_simple(MSPACE1_RANK, mdim1, NULL);
+ VRFY((mspaceid1 >= 0), "memory dataspace created succeeded");
+
+ /*
+ * Memory space is 1-D, this is a good test to check
+ * whether a span-tree derived datatype needs to be built.
+ * block 1
+ * stride 1
+ * count 6912/mpi_size
+ * start 1
+ *
+ */
+ start[0] = MHSTART0;
+ stride[0] = MHSTRIDE0;
+ count[0] = MHCOUNT0;
+ block[0] = MHBLOCK0;
+
+ ret = H5Sselect_hyperslab(mspaceid1, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /* independent write */
+ ret = H5Dwrite(dataseti, H5T_NATIVE_INT, mspaceid1, fspaceid, H5P_DEFAULT, vector);
+ VRFY((ret >= 0), "dataset independent write succeed");
+
+ dxfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxfer_plist >= 0), "");
+
+ ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "MPIO data transfer property list succeed");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* collective write */
+ ret = H5Dwrite(datasetc, H5T_NATIVE_INT, mspaceid1, fspaceid, dxfer_plist, vector);
+ VRFY((ret >= 0), "dataset collective write succeed");
+
+ ret = H5Sclose(mspaceid1);
+ VRFY((ret >= 0), "");
+
+ ret = H5Sclose(fspaceid);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close dataset.
+ */
+ ret = H5Dclose(datasetc);
+ VRFY((ret >= 0), "");
+
+ ret = H5Dclose(dataseti);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close the file.
+ */
+ ret = H5Fclose(file);
+ VRFY((ret >= 0), "");
+ /*
+ * Close property list
+ */
+
+ ret = H5Pclose(facc_plist);
+ VRFY((ret >= 0), "");
+ ret = H5Pclose(dxfer_plist);
+ VRFY((ret >= 0), "");
+ ret = H5Pclose(dcrt_plist);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Open the file.
+ */
- /*
- * Close the file.
- */
- ret = H5Fclose(file);
- VRFY((ret >= 0),"");
+ /***
- if (vector)
- HDfree(vector);
- if (matrix_out)
- HDfree(matrix_out);
- if (matrix_out1)
- HDfree(matrix_out1);
+ For testing collective hyperslab selection write
+ In this test, we are using independent read to check
+ the correctedness of collective write compared with
+ independent write,
- return ;
+ In order to throughly test this feature, we choose
+ a different selection set for reading the data out.
+
+
+ ***/
+
+ /* Obtain file access property list with MPI-IO driver */
+ facc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY((facc_plist >= 0), "");
+
+ file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist);
+ VRFY((file >= 0), "H5Fopen succeeded");
+
+ /*
+ * Open the dataset.
+ */
+ datasetc = H5Dopen2(file, "collect_write", H5P_DEFAULT);
+ VRFY((datasetc >= 0), "H5Dopen2 succeeded");
+
+ dataseti = H5Dopen2(file, "independ_write", H5P_DEFAULT);
+ VRFY((dataseti >= 0), "H5Dopen2 succeeded");
+
+ /*
+ * Get dataspace of the open dataset.
+ */
+ fspaceid = H5Dget_space(datasetc);
+ VRFY((fspaceid >= 0), "file dataspace obtained succeeded");
+
+ fspaceid1 = H5Dget_space(dataseti);
+ VRFY((fspaceid1 >= 0), "file dataspace obtained succeeded");
+
+ /* The First selection for FILE to read
+ *
+ * block (1,1)
+ * stride(1.1)
+ * count (3,768/mpi_size)
+ * start (1,2+768*mpi_rank/mpi_size)
+ *
+ */
+ start[0] = RFFHSTART0;
+ start[1] = (hsize_t)(RFFHSTART1 + mpi_rank * RFFHCOUNT1);
+ block[0] = RFFHBLOCK0;
+ block[1] = RFFHBLOCK1;
+ stride[0] = RFFHSTRIDE0;
+ stride[1] = RFFHSTRIDE1;
+ count[0] = RFFHCOUNT0;
+ count[1] = RFFHCOUNT1;
+
+ /* The first selection of the dataset generated by collective write */
+ ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /* The first selection of the dataset generated by independent write */
+ ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /* The Second selection for FILE to read
+ *
+ * block (1,1)
+ * stride(1.1)
+ * count (3,1536/mpi_size)
+ * start (2,4+1536*mpi_rank/mpi_size)
+ *
+ */
+
+ start[0] = RFSHSTART0;
+ start[1] = (hsize_t)(RFSHSTART1 + RFSHCOUNT1 * mpi_rank);
+ block[0] = RFSHBLOCK0;
+ block[1] = RFSHBLOCK1;
+ stride[0] = RFSHSTRIDE0;
+ stride[1] = RFSHSTRIDE0;
+ count[0] = RFSHCOUNT0;
+ count[1] = RFSHCOUNT1;
+
+ /* The second selection of the dataset generated by collective write */
+ ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /* The second selection of the dataset generated by independent write */
+ ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Create memory dataspace.
+ * rank = 2
+ * mdim1 = 9
+ * mdim2 = 3600
+ *
+ */
+ mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
+
+ /*
+ * Select two hyperslabs in memory. Hyperslabs has the same
+ * size and shape as the selected hyperslabs for the file dataspace
+ * Only the starting point is different.
+ * The first selection
+ * block (1,1)
+ * stride(1.1)
+ * count (3,768/mpi_size)
+ * start (0,768*mpi_rank/mpi_size)
+ *
+ */
+
+ start[0] = RMFHSTART0;
+ start[1] = (hsize_t)(RMFHSTART1 + mpi_rank * RMFHCOUNT1);
+ block[0] = RMFHBLOCK0;
+ block[1] = RMFHBLOCK1;
+ stride[0] = RMFHSTRIDE0;
+ stride[1] = RMFHSTRIDE1;
+ count[0] = RMFHCOUNT0;
+ count[1] = RMFHCOUNT1;
+
+ ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Select two hyperslabs in memory. Hyperslabs has the same
+ * size and shape as the selected hyperslabs for the file dataspace
+ * Only the starting point is different.
+ * The second selection
+ * block (1,1)
+ * stride(1,1)
+ * count (3,1536/mpi_size)
+ * start (1,2+1536*mpi_rank/mpi_size)
+ *
+ */
+ start[0] = RMSHSTART0;
+ start[1] = (hsize_t)(RMSHSTART1 + mpi_rank * RMSHCOUNT1);
+ block[0] = RMSHBLOCK0;
+ block[1] = RMSHBLOCK1;
+ stride[0] = RMSHSTRIDE0;
+ stride[1] = RMSHSTRIDE1;
+ count[0] = RMSHCOUNT0;
+ count[1] = RMSHCOUNT1;
+
+ ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Initialize data buffer.
+ */
+
+ HDmemset(matrix_out, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+ HDmemset(matrix_out1, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+ /*
+ * Read data back to the buffer matrix_out.
+ */
+
+ ret = H5Dread(datasetc, H5T_NATIVE_INT, mspaceid, fspaceid, H5P_DEFAULT, matrix_out);
+ VRFY((ret >= 0), "H5D independent read succeed");
+
+ ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid, H5P_DEFAULT, matrix_out1);
+ VRFY((ret >= 0), "H5D independent read succeed");
+
+ ret = 0;
+
+ for (i = 0; i < MSPACE_DIM1 * MSPACE_DIM2 * mpi_size; i++) {
+ if (matrix_out[i] != matrix_out1[i])
+ ret = -1;
+ if (ret < 0)
+ break;
+ }
+
+ VRFY((ret >= 0), "H5D irregular collective write succeed");
+
+ /*
+ * Close memory file and memory dataspaces.
+ */
+ ret = H5Sclose(mspaceid);
+ VRFY((ret >= 0), "");
+ ret = H5Sclose(fspaceid);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close dataset.
+ */
+ ret = H5Dclose(dataseti);
+ VRFY((ret >= 0), "");
+
+ ret = H5Dclose(datasetc);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close property list
+ */
+
+ ret = H5Pclose(facc_plist);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close the file.
+ */
+ ret = H5Fclose(file);
+ VRFY((ret >= 0), "");
+
+ if (vector)
+ HDfree(vector);
+ if (matrix_out)
+ HDfree(matrix_out);
+ if (matrix_out1)
+ HDfree(matrix_out1);
+
+ return;
}
/*-------------------------------------------------------------------------
@@ -665,243 +647,235 @@ static void
coll_read_test(void)
{
- const char *filename;
- hid_t facc_plist,dxfer_plist;
- hid_t file, dataseti; /* File and dataset identifiers */
- hid_t mspaceid, fspaceid1; /* Dataspace identifiers */
-
-
- /* Dimension sizes of the dataset (on disk) */
- hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we
- * read selection from the dataset on the disk
- */
-
- hsize_t start[2]; /* Start of hyperslab */
- hsize_t stride[2]; /* Stride of hyperslab */
- hsize_t count[2]; /* Block count */
- hsize_t block[2]; /* Block sizes */
- herr_t ret;
-
- int i;
-
- int *matrix_out;
- int *matrix_out1; /* Buffer to read from the dataset */
-
- int mpi_size,mpi_rank;
-
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- /*set up MPI parameters */
- MPI_Comm_size(comm,&mpi_size);
- MPI_Comm_rank(comm,&mpi_rank);
-
-
- /* Obtain file name */
- filename = GetTestParameters();
-
-
- /* Initialize the buffer */
-
- mdim[0] = MSPACE_DIM1;
- mdim[1] = (hsize_t)(MSPACE_DIM2*mpi_size);
- matrix_out =(int*)HDmalloc(sizeof(int)*(size_t)MSPACE_DIM1*(size_t)MSPACE_DIM2*(size_t)mpi_size);
- matrix_out1=(int*)HDmalloc(sizeof(int)*(size_t)MSPACE_DIM1*(size_t)MSPACE_DIM2*(size_t)mpi_size);
-
- /*** For testing collective hyperslab selection read ***/
-
- /* Obtain file access property list */
- facc_plist = create_faccess_plist(comm, info, facc_type);
- VRFY((facc_plist >= 0),"");
-
- /*
- * Open the file.
- */
- file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist);
- VRFY((file >= 0),"H5Fopen succeeded");
-
- /*
- * Open the dataset.
- */
- dataseti = H5Dopen2(file,"independ_write", H5P_DEFAULT);
- VRFY((dataseti >= 0),"H5Dopen2 succeeded");
-
- /*
- * Get dataspace of the open dataset.
- */
- fspaceid1 = H5Dget_space(dataseti);
- VRFY((fspaceid1 >= 0),"file dataspace obtained succeeded");
-
- /* The First selection for FILE to read
- *
- * block (1,1)
- * stride(1.1)
- * count (3,768/mpi_size)
- * start (1,2+768*mpi_rank/mpi_size)
- *
- */
- start[0] = RFFHSTART0;
- start[1] = (hsize_t)(RFFHSTART1+mpi_rank*RFFHCOUNT1);
- block[0] = RFFHBLOCK0;
- block[1] = RFFHBLOCK1;
- stride[0] = RFFHSTRIDE0;
- stride[1] = RFFHSTRIDE1;
- count[0] = RFFHCOUNT0;
- count[1] = RFFHCOUNT1;
-
- ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0),"hyperslab selection succeeded");
-
- /* The Second selection for FILE to read
- *
- * block (1,1)
- * stride(1.1)
- * count (3,1536/mpi_size)
- * start (2,4+1536*mpi_rank/mpi_size)
- *
- */
- start[0] = RFSHSTART0;
- start[1] = (hsize_t)(RFSHSTART1+RFSHCOUNT1*mpi_rank);
- block[0] = RFSHBLOCK0;
- block[1] = RFSHBLOCK1;
- stride[0] = RFSHSTRIDE0;
- stride[1] = RFSHSTRIDE0;
- count[0] = RFSHCOUNT0;
- count[1] = RFSHCOUNT1;
-
- ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0),"hyperslab selection succeeded");
-
-
- /*
- * Create memory dataspace.
- */
- mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
-
- /*
- * Select two hyperslabs in memory. Hyperslabs has the same
- * size and shape as the selected hyperslabs for the file dataspace.
- * Only the starting point is different.
- * The first selection
- * block (1,1)
- * stride(1.1)
- * count (3,768/mpi_size)
- * start (0,768*mpi_rank/mpi_size)
- *
- */
-
- start[0] = RMFHSTART0;
- start[1] = (hsize_t)(RMFHSTART1+mpi_rank*RMFHCOUNT1);
- block[0] = RMFHBLOCK0;
- block[1] = RMFHBLOCK1;
- stride[0] = RMFHSTRIDE0;
- stride[1] = RMFHSTRIDE1;
- count[0] = RMFHCOUNT0;
- count[1] = RMFHCOUNT1;
- ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0),"hyperslab selection succeeded");
-
- /*
- * Select two hyperslabs in memory. Hyperslabs has the same
- * size and shape as the selected hyperslabs for the file dataspace
- * Only the starting point is different.
- * The second selection
- * block (1,1)
- * stride(1,1)
- * count (3,1536/mpi_size)
- * start (1,2+1536*mpi_rank/mpi_size)
- *
- */
- start[0] = RMSHSTART0;
- start[1] = (hsize_t)(RMSHSTART1+mpi_rank*RMSHCOUNT1);
- block[0] = RMSHBLOCK0;
- block[1] = RMSHBLOCK1;
- stride[0] = RMSHSTRIDE0;
- stride[1] = RMSHSTRIDE1;
- count[0] = RMSHCOUNT0;
- count[1] = RMSHCOUNT1;
- ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0),"hyperslab selection succeeded");
-
-
- /*
- * Initialize data buffer.
- */
-
- HDmemset(matrix_out,0,sizeof(int)*(size_t)MSPACE_DIM1*(size_t)MSPACE_DIM2*(size_t)mpi_size);
- HDmemset(matrix_out1,0,sizeof(int)*(size_t)MSPACE_DIM1*(size_t)MSPACE_DIM2*(size_t)mpi_size);
-
- /*
- * Read data back to the buffer matrix_out.
- */
-
- dxfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxfer_plist >= 0),"");
-
- ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0),"MPIO data transfer property list succeed");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
- }
-
-
- /* Collective read */
- ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1,
- dxfer_plist, matrix_out);
- VRFY((ret >= 0),"H5D collecive read succeed");
-
- ret = H5Pclose(dxfer_plist);
- VRFY((ret >= 0),"");
-
- /* Independent read */
- ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1,
- H5P_DEFAULT, matrix_out1);
- VRFY((ret >= 0),"H5D independent read succeed");
-
- ret = 0;
- for (i = 0; i < MSPACE_DIM1*MSPACE_DIM2*mpi_size; i++){
- if(matrix_out[i]!=matrix_out1[i])ret = -1;
- if(ret < 0) break;
- }
- VRFY((ret >= 0),"H5D contiguous irregular collective read succeed");
-
- /*
- * Free read buffers.
- */
- HDfree(matrix_out);
- HDfree(matrix_out1);
-
- /*
- * Close memory file and memory dataspaces.
- */
- ret = H5Sclose(mspaceid);
- VRFY((ret >= 0),"");
- ret = H5Sclose(fspaceid1);
- VRFY((ret >= 0),"");
-
- /*
- * Close dataset.
- */
- ret = H5Dclose(dataseti);
- VRFY((ret >= 0),"");
-
- /*
- * Close property list
- */
- ret = H5Pclose(facc_plist);
- VRFY((ret >= 0),"");
-
-
- /*
- * Close the file.
- */
- ret = H5Fclose(file);
- VRFY((ret >= 0),"");
-
- return;
-}
+ const char *filename;
+ hid_t facc_plist, dxfer_plist;
+ hid_t file, dataseti; /* File and dataset identifiers */
+ hid_t mspaceid, fspaceid1; /* Dataspace identifiers */
+
+ /* Dimension sizes of the dataset (on disk) */
+ hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we
+ * read selection from the dataset on the disk
+ */
+
+ hsize_t start[2]; /* Start of hyperslab */
+ hsize_t stride[2]; /* Stride of hyperslab */
+ hsize_t count[2]; /* Block count */
+ hsize_t block[2]; /* Block sizes */
+ herr_t ret;
+
+ int i;
+
+ int *matrix_out;
+ int *matrix_out1; /* Buffer to read from the dataset */
+
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*set up MPI parameters */
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /* Obtain file name */
+ filename = GetTestParameters();
+
+ /* Initialize the buffer */
+
+ mdim[0] = MSPACE_DIM1;
+ mdim[1] = (hsize_t)(MSPACE_DIM2 * mpi_size);
+ matrix_out = (int *)HDmalloc(sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+ matrix_out1 = (int *)HDmalloc(sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+
+ /*** For testing collective hyperslab selection read ***/
+
+ /* Obtain file access property list */
+ facc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY((facc_plist >= 0), "");
+
+ /*
+ * Open the file.
+ */
+ file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist);
+ VRFY((file >= 0), "H5Fopen succeeded");
+
+ /*
+ * Open the dataset.
+ */
+ dataseti = H5Dopen2(file, "independ_write", H5P_DEFAULT);
+ VRFY((dataseti >= 0), "H5Dopen2 succeeded");
+
+ /*
+ * Get dataspace of the open dataset.
+ */
+ fspaceid1 = H5Dget_space(dataseti);
+ VRFY((fspaceid1 >= 0), "file dataspace obtained succeeded");
+
+ /* The First selection for FILE to read
+ *
+ * block (1,1)
+ * stride(1.1)
+ * count (3,768/mpi_size)
+ * start (1,2+768*mpi_rank/mpi_size)
+ *
+ */
+ start[0] = RFFHSTART0;
+ start[1] = (hsize_t)(RFFHSTART1 + mpi_rank * RFFHCOUNT1);
+ block[0] = RFFHBLOCK0;
+ block[1] = RFFHBLOCK1;
+ stride[0] = RFFHSTRIDE0;
+ stride[1] = RFFHSTRIDE1;
+ count[0] = RFFHCOUNT0;
+ count[1] = RFFHCOUNT1;
+
+ ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /* The Second selection for FILE to read
+ *
+ * block (1,1)
+ * stride(1.1)
+ * count (3,1536/mpi_size)
+ * start (2,4+1536*mpi_rank/mpi_size)
+ *
+ */
+ start[0] = RFSHSTART0;
+ start[1] = (hsize_t)(RFSHSTART1 + RFSHCOUNT1 * mpi_rank);
+ block[0] = RFSHBLOCK0;
+ block[1] = RFSHBLOCK1;
+ stride[0] = RFSHSTRIDE0;
+ stride[1] = RFSHSTRIDE0;
+ count[0] = RFSHCOUNT0;
+ count[1] = RFSHCOUNT1;
+
+ ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Create memory dataspace.
+ */
+ mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
+
+ /*
+ * Select two hyperslabs in memory. Hyperslabs has the same
+ * size and shape as the selected hyperslabs for the file dataspace.
+ * Only the starting point is different.
+ * The first selection
+ * block (1,1)
+ * stride(1.1)
+ * count (3,768/mpi_size)
+ * start (0,768*mpi_rank/mpi_size)
+ *
+ */
+
+ start[0] = RMFHSTART0;
+ start[1] = (hsize_t)(RMFHSTART1 + mpi_rank * RMFHCOUNT1);
+ block[0] = RMFHBLOCK0;
+ block[1] = RMFHBLOCK1;
+ stride[0] = RMFHSTRIDE0;
+ stride[1] = RMFHSTRIDE1;
+ count[0] = RMFHCOUNT0;
+ count[1] = RMFHCOUNT1;
+ ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Select two hyperslabs in memory. Hyperslabs has the same
+ * size and shape as the selected hyperslabs for the file dataspace
+ * Only the starting point is different.
+ * The second selection
+ * block (1,1)
+ * stride(1,1)
+ * count (3,1536/mpi_size)
+ * start (1,2+1536*mpi_rank/mpi_size)
+ *
+ */
+ start[0] = RMSHSTART0;
+ start[1] = (hsize_t)(RMSHSTART1 + mpi_rank * RMSHCOUNT1);
+ block[0] = RMSHBLOCK0;
+ block[1] = RMSHBLOCK1;
+ stride[0] = RMSHSTRIDE0;
+ stride[1] = RMSHSTRIDE1;
+ count[0] = RMSHCOUNT0;
+ count[1] = RMSHCOUNT1;
+ ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Initialize data buffer.
+ */
+
+ HDmemset(matrix_out, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+ HDmemset(matrix_out1, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+
+ /*
+ * Read data back to the buffer matrix_out.
+ */
+
+ dxfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxfer_plist >= 0), "");
+
+ ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "MPIO data transfer property list succeed");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* Collective read */
+ ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1, dxfer_plist, matrix_out);
+ VRFY((ret >= 0), "H5D collecive read succeed");
+ ret = H5Pclose(dxfer_plist);
+ VRFY((ret >= 0), "");
+
+ /* Independent read */
+ ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1, H5P_DEFAULT, matrix_out1);
+ VRFY((ret >= 0), "H5D independent read succeed");
+
+ ret = 0;
+ for (i = 0; i < MSPACE_DIM1 * MSPACE_DIM2 * mpi_size; i++) {
+ if (matrix_out[i] != matrix_out1[i])
+ ret = -1;
+ if (ret < 0)
+ break;
+ }
+ VRFY((ret >= 0), "H5D contiguous irregular collective read succeed");
+
+ /*
+ * Free read buffers.
+ */
+ HDfree(matrix_out);
+ HDfree(matrix_out1);
+
+ /*
+ * Close memory file and memory dataspaces.
+ */
+ ret = H5Sclose(mspaceid);
+ VRFY((ret >= 0), "");
+ ret = H5Sclose(fspaceid1);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close dataset.
+ */
+ ret = H5Dclose(dataseti);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close property list
+ */
+ ret = H5Pclose(facc_plist);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close the file.
+ */
+ ret = H5Fclose(file);
+ VRFY((ret >= 0), "");
+
+ return;
+}
/****************************************************************
**
@@ -928,7 +902,7 @@ coll_read_test(void)
**
****************************************************************/
-#define LDSCT_DS_RANK 5
+#define LDSCT_DS_RANK 5
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
#define LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK 0
#endif
@@ -936,64 +910,54 @@ coll_read_test(void)
#define LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG 0
static void
-lower_dim_size_comp_test__select_checker_board(
- const int mpi_rank,
- const hid_t tgt_sid,
- const int tgt_rank,
- const hsize_t dims[LDSCT_DS_RANK],
- const int checker_edge_size,
- const int sel_rank,
- hsize_t sel_start[])
+lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t tgt_sid, const int tgt_rank,
+ const hsize_t dims[LDSCT_DS_RANK], const int checker_edge_size,
+ const int sel_rank, hsize_t sel_start[])
{
#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- const char * fcnName =
- "lower_dim_size_comp_test__select_checker_board():";
+ const char *fcnName = "lower_dim_size_comp_test__select_checker_board():";
#endif
- hbool_t first_selection = TRUE;
- int i, j, k, l, m;
- int ds_offset;
- int sel_offset;
- const int test_max_rank = LDSCT_DS_RANK; /* must update code if */
- /* this changes */
- hsize_t base_count;
- hsize_t offset_count;
- hsize_t start[LDSCT_DS_RANK];
- hsize_t stride[LDSCT_DS_RANK];
- hsize_t count[LDSCT_DS_RANK];
- hsize_t block[LDSCT_DS_RANK];
- herr_t ret; /* Generic return value */
+ hbool_t first_selection = TRUE;
+ int i, j, k, l, m;
+ int ds_offset;
+ int sel_offset;
+ const int test_max_rank = LDSCT_DS_RANK; /* must update code if */
+ /* this changes */
+ hsize_t base_count;
+ hsize_t offset_count;
+ hsize_t start[LDSCT_DS_RANK];
+ hsize_t stride[LDSCT_DS_RANK];
+ hsize_t count[LDSCT_DS_RANK];
+ hsize_t block[LDSCT_DS_RANK];
+ herr_t ret; /* Generic return value */
#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s:%d: dims/checker_edge_size = %d %d %d %d %d / %d\n",
- fcnName, mpi_rank, (int)dims[0], (int)dims[1], (int)dims[2],
- (int)dims[3], (int)dims[4], checker_edge_size);
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: dims/checker_edge_size = %d %d %d %d %d / %d\n", fcnName, mpi_rank,
+ (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4], checker_edge_size);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
- HDassert( 0 < checker_edge_size );
- HDassert( 0 < sel_rank );
- HDassert( sel_rank <= tgt_rank );
- HDassert( tgt_rank <= test_max_rank );
- HDassert( test_max_rank <= LDSCT_DS_RANK );
+ HDassert(0 < checker_edge_size);
+ HDassert(0 < sel_rank);
+ HDassert(sel_rank <= tgt_rank);
+ HDassert(tgt_rank <= test_max_rank);
+ HDassert(test_max_rank <= LDSCT_DS_RANK);
sel_offset = test_max_rank - sel_rank;
- HDassert( sel_offset >= 0 );
+ HDassert(sel_offset >= 0);
ds_offset = test_max_rank - tgt_rank;
- HDassert( ds_offset >= 0 );
- HDassert( ds_offset <= sel_offset );
+ HDassert(ds_offset >= 0);
+ HDassert(ds_offset <= sel_offset);
- HDassert( (hsize_t)checker_edge_size <= dims[sel_offset] );
- HDassert( dims[sel_offset] == 10 );
+ HDassert((hsize_t)checker_edge_size <= dims[sel_offset]);
+ HDassert(dims[sel_offset] == 10);
#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n",
- fcnName, mpi_rank, sel_rank, sel_offset);
- HDfprintf(stdout, "%s:%d: tgt_rank/ds_offset = %d/%d.\n",
- fcnName, mpi_rank, tgt_rank, ds_offset);
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n", fcnName, mpi_rank, sel_rank, sel_offset);
+ HDfprintf(stdout, "%s:%d: tgt_rank/ds_offset = %d/%d.\n", fcnName, mpi_rank, tgt_rank, ds_offset);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
@@ -1012,25 +976,23 @@ lower_dim_size_comp_test__select_checker_board(
base_count = dims[sel_offset] / (hsize_t)(checker_edge_size * 2);
- if ( (dims[sel_rank] % (hsize_t)(checker_edge_size * 2)) > 0 ) {
+ if ((dims[sel_rank] % (hsize_t)(checker_edge_size * 2)) > 0) {
base_count++;
}
offset_count =
- (hsize_t)((dims[sel_offset] - (hsize_t)checker_edge_size) /
- ((hsize_t)(checker_edge_size * 2)));
+ (hsize_t)((dims[sel_offset] - (hsize_t)checker_edge_size) / ((hsize_t)(checker_edge_size * 2)));
- if ( ((dims[sel_rank] - (hsize_t)checker_edge_size) %
- ((hsize_t)(checker_edge_size * 2))) > 0 ) {
+ if (((dims[sel_rank] - (hsize_t)checker_edge_size) % ((hsize_t)(checker_edge_size * 2))) > 0) {
offset_count++;
}
#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s:%d: base_count/offset_count = %d/%d.\n",
- fcnName, mpi_rank, base_count, offset_count);
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: base_count/offset_count = %d/%d.\n", fcnName, mpi_rank, base_count,
+ offset_count);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
@@ -1039,235 +1001,193 @@ lower_dim_size_comp_test__select_checker_board(
* the checker board.
*/
i = 0;
- while ( i < ds_offset ) {
+ while (i < ds_offset) {
/* these values should never be used */
- start[i] = 0;
+ start[i] = 0;
stride[i] = 0;
- count[i] = 0;
- block[i] = 0;
+ count[i] = 0;
+ block[i] = 0;
i++;
}
- while ( i < sel_offset ) {
+ while (i < sel_offset) {
- start[i] = sel_start[i];
+ start[i] = sel_start[i];
stride[i] = 2 * dims[i];
- count[i] = 1;
- block[i] = 1;
+ count[i] = 1;
+ block[i] = 1;
i++;
}
- while ( i < test_max_rank ) {
+ while (i < test_max_rank) {
stride[i] = (hsize_t)(2 * checker_edge_size);
- block[i] = (hsize_t)checker_edge_size;
+ block[i] = (hsize_t)checker_edge_size;
i++;
}
i = 0;
do {
- if ( 0 >= sel_offset ) {
+ if (0 >= sel_offset) {
- if ( i == 0 ) {
+ if (i == 0) {
start[0] = 0;
count[0] = base_count;
-
- } else {
+ }
+ else {
start[0] = (hsize_t)checker_edge_size;
count[0] = offset_count;
-
}
}
j = 0;
do {
- if ( 1 >= sel_offset ) {
+ if (1 >= sel_offset) {
- if ( j == 0 ) {
+ if (j == 0) {
start[1] = 0;
count[1] = base_count;
-
- } else {
+ }
+ else {
start[1] = (hsize_t)checker_edge_size;
count[1] = offset_count;
-
}
}
k = 0;
do {
- if ( 2 >= sel_offset ) {
+ if (2 >= sel_offset) {
- if ( k == 0 ) {
+ if (k == 0) {
start[2] = 0;
count[2] = base_count;
-
- } else {
+ }
+ else {
start[2] = (hsize_t)checker_edge_size;
count[2] = offset_count;
-
}
}
l = 0;
do {
- if ( 3 >= sel_offset ) {
+ if (3 >= sel_offset) {
- if ( l == 0 ) {
+ if (l == 0) {
start[3] = 0;
count[3] = base_count;
-
- } else {
+ }
+ else {
start[3] = (hsize_t)checker_edge_size;
count[3] = offset_count;
-
}
}
m = 0;
do {
- if ( 4 >= sel_offset ) {
+ if (4 >= sel_offset) {
- if ( m == 0 ) {
+ if (m == 0) {
start[4] = 0;
count[4] = base_count;
-
- } else {
+ }
+ else {
start[4] = (hsize_t)checker_edge_size;
count[4] = offset_count;
-
}
}
- if ( ((i + j + k + l + m) % 2) == 0 ) {
+ if (((i + j + k + l + m) % 2) == 0) {
#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- if ( mpi_rank ==
- LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
-
- HDfprintf(stdout,
- "%s%d: *** first_selection = %d ***\n",
- fcnName, mpi_rank, (int)first_selection);
- HDfprintf(stdout,
- "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n",
- fcnName, mpi_rank, i, j, k, l, m);
- HDfprintf(stdout,
- "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, mpi_rank,
- (int)start[0], (int)start[1],
- (int)start[2], (int)start[3],
- (int)start[4]);
- HDfprintf(stdout,
- "%s:%d: stride = %d %d %d %d %d.\n",
- fcnName, mpi_rank,
- (int)stride[0], (int)stride[1],
- (int)stride[2], (int)stride[3],
- (int)stride[4]);
- HDfprintf(stdout,
- "%s:%d: count = %d %d %d %d %d.\n",
- fcnName, mpi_rank,
- (int)count[0], (int)count[1],
- (int)count[2], (int)count[3],
- (int)count[4]);
- HDfprintf(stdout,
- "%s:%d: block = %d %d %d %d %d.\n",
- fcnName, mpi_rank,
- (int)block[0], (int)block[1],
- (int)block[2], (int)block[3],
- (int)block[4]);
- HDfprintf(stdout,
- "%s:%d: n-cube extent dims = %d.\n",
- fcnName, mpi_rank,
- H5Sget_simple_extent_ndims(tgt_sid));
- HDfprintf(stdout,
- "%s:%d: selection rank = %d.\n",
- fcnName, mpi_rank, sel_rank);
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+
+ HDfprintf(stdout, "%s%d: *** first_selection = %d ***\n", fcnName, mpi_rank,
+ (int)first_selection);
+ HDfprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n", fcnName, mpi_rank, i,
+ j, k, l, m);
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)start[0], (int)start[1], (int)start[2], (int)start[3],
+ (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)stride[0], (int)stride[1], (int)stride[2], (int)stride[3],
+ (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)count[0], (int)count[1], (int)count[2], (int)count[3],
+ (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)block[0], (int)block[1], (int)block[2], (int)block[3],
+ (int)block[4]);
+ HDfprintf(stdout, "%s:%d: n-cube extent dims = %d.\n", fcnName, mpi_rank,
+ H5Sget_simple_extent_ndims(tgt_sid));
+ HDfprintf(stdout, "%s:%d: selection rank = %d.\n", fcnName, mpi_rank,
+ sel_rank);
}
#endif
- if ( first_selection ) {
+ if (first_selection) {
first_selection = FALSE;
- ret = H5Sselect_hyperslab
- (
- tgt_sid,
- H5S_SELECT_SET,
- &(start[ds_offset]),
- &(stride[ds_offset]),
- &(count[ds_offset]),
- &(block[ds_offset])
- );
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[ds_offset]),
+ &(stride[ds_offset]), &(count[ds_offset]),
+ &(block[ds_offset]));
VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded");
+ }
+ else {
- } else {
-
- ret = H5Sselect_hyperslab
- (
- tgt_sid,
- H5S_SELECT_OR,
- &(start[ds_offset]),
- &(stride[ds_offset]),
- &(count[ds_offset]),
- &(block[ds_offset])
- );
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_OR, &(start[ds_offset]),
+ &(stride[ds_offset]), &(count[ds_offset]),
+ &(block[ds_offset]));
VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded");
-
}
}
m++;
- } while ( ( m <= 1 ) &&
- ( 4 >= sel_offset ) );
+ } while ((m <= 1) && (4 >= sel_offset));
l++;
- } while ( ( l <= 1 ) &&
- ( 3 >= sel_offset ) );
+ } while ((l <= 1) && (3 >= sel_offset));
k++;
- } while ( ( k <= 1 ) &&
- ( 2 >= sel_offset ) );
+ } while ((k <= 1) && (2 >= sel_offset));
j++;
- } while ( ( j <= 1 ) &&
- ( 1 >= sel_offset ) );
-
+ } while ((j <= 1) && (1 >= sel_offset));
i++;
- } while ( ( i <= 1 ) &&
- ( 0 >= sel_offset ) );
+ } while ((i <= 1) && (0 >= sel_offset));
#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n",
- fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid));
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(tgt_sid));
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
/* Clip the selection back to the dataspace proper. */
- for ( i = 0; i < test_max_rank; i++ ) {
+ for (i = 0; i < test_max_rank; i++) {
start[i] = 0;
stride[i] = dims[i];
@@ -1275,15 +1195,14 @@ lower_dim_size_comp_test__select_checker_board(
block[i] = dims[i];
}
- ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND,
- start, stride, count, block);
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND, start, stride, count, block);
VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded");
#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n",
- fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid));
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(tgt_sid));
HDfprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
@@ -1292,7 +1211,6 @@ lower_dim_size_comp_test__select_checker_board(
} /* lower_dim_size_comp_test__select_checker_board() */
-
/****************************************************************
**
** lower_dim_size_comp_test__verify_data():
@@ -1352,138 +1270,123 @@ lower_dim_size_comp_test__select_checker_board(
#define LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG 0
static hbool_t
-lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
+lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr,
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
const int mpi_rank,
#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
- const int rank,
- const int edge_size,
- const int checker_edge_size,
- uint32_t first_expected_val,
- hbool_t buf_starts_in_checker)
+ const int rank, const int edge_size, const int checker_edge_size,
+ uint32_t first_expected_val, hbool_t buf_starts_in_checker)
{
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- const char * fcnName =
- "lower_dim_size_comp_test__verify_data():";
+ const char *fcnName = "lower_dim_size_comp_test__verify_data():";
#endif
- hbool_t good_data = TRUE;
- hbool_t in_checker;
- hbool_t start_in_checker[5];
- uint32_t expected_value;
- uint32_t * val_ptr;
- int i, j, k, l, m; /* to track position in n-cube */
- int v, w, x, y, z; /* to track position in checker */
+ hbool_t good_data = TRUE;
+ hbool_t in_checker;
+ hbool_t start_in_checker[5];
+ uint32_t expected_value;
+ uint32_t *val_ptr;
+ int i, j, k, l, m; /* to track position in n-cube */
+ int v, w, x, y, z; /* to track position in checker */
const int test_max_rank = 5; /* code changes needed if this is increased */
- HDassert( buf_ptr != NULL );
- HDassert( 0 < rank );
- HDassert( rank <= test_max_rank );
- HDassert( edge_size >= 6 );
- HDassert( 0 < checker_edge_size );
- HDassert( checker_edge_size <= edge_size );
- HDassert( test_max_rank <= LDSCT_DS_RANK );
+ HDassert(buf_ptr != NULL);
+ HDassert(0 < rank);
+ HDassert(rank <= test_max_rank);
+ HDassert(edge_size >= 6);
+ HDassert(0 < checker_edge_size);
+ HDassert(checker_edge_size <= edge_size);
+ HDassert(test_max_rank <= LDSCT_DS_RANK);
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
HDfprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank);
HDfprintf(stdout, "%s rank = %d.\n", fcnName, rank);
HDfprintf(stdout, "%s edge_size = %d.\n", fcnName, edge_size);
- HDfprintf(stdout, "%s checker_edge_size = %d.\n",
- fcnName, checker_edge_size);
- HDfprintf(stdout, "%s first_expected_val = %d.\n",
- fcnName, (int)first_expected_val);
- HDfprintf(stdout, "%s starts_in_checker = %d.\n",
- fcnName, (int)buf_starts_in_checker);
+ HDfprintf(stdout, "%s checker_edge_size = %d.\n", fcnName, checker_edge_size);
+ HDfprintf(stdout, "%s first_expected_val = %d.\n", fcnName, (int)first_expected_val);
+ HDfprintf(stdout, "%s starts_in_checker = %d.\n", fcnName, (int)buf_starts_in_checker);
}
#endif
- val_ptr = buf_ptr;
+ val_ptr = buf_ptr;
expected_value = first_expected_val;
- i = 0;
- v = 0;
+ i = 0;
+ v = 0;
start_in_checker[0] = buf_starts_in_checker;
- do
- {
- if ( v >= checker_edge_size ) {
+ do {
+ if (v >= checker_edge_size) {
- start_in_checker[0] = ! start_in_checker[0];
- v = 0;
+ start_in_checker[0] = !start_in_checker[0];
+ v = 0;
}
- j = 0;
- w = 0;
+ j = 0;
+ w = 0;
start_in_checker[1] = start_in_checker[0];
- do
- {
- if ( w >= checker_edge_size ) {
+ do {
+ if (w >= checker_edge_size) {
- start_in_checker[1] = ! start_in_checker[1];
- w = 0;
+ start_in_checker[1] = !start_in_checker[1];
+ w = 0;
}
- k = 0;
- x = 0;
+ k = 0;
+ x = 0;
start_in_checker[2] = start_in_checker[1];
- do
- {
- if ( x >= checker_edge_size ) {
+ do {
+ if (x >= checker_edge_size) {
- start_in_checker[2] = ! start_in_checker[2];
- x = 0;
+ start_in_checker[2] = !start_in_checker[2];
+ x = 0;
}
- l = 0;
- y = 0;
+ l = 0;
+ y = 0;
start_in_checker[3] = start_in_checker[2];
- do
- {
- if ( y >= checker_edge_size ) {
+ do {
+ if (y >= checker_edge_size) {
- start_in_checker[3] = ! start_in_checker[3];
- y = 0;
+ start_in_checker[3] = !start_in_checker[3];
+ y = 0;
}
m = 0;
z = 0;
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- if ( mpi_rank ==
- LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m);
}
#endif
in_checker = start_in_checker[3];
- do
- {
+ do {
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- if ( mpi_rank ==
- LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
HDfprintf(stdout, " %d", (int)(*val_ptr));
}
#endif
- if ( z >= checker_edge_size ) {
+ if (z >= checker_edge_size) {
- in_checker = ! in_checker;
- z = 0;
+ in_checker = !in_checker;
+ z = 0;
}
- if ( in_checker ) {
+ if (in_checker) {
- if ( *val_ptr != expected_value ) {
+ if (*val_ptr != expected_value) {
good_data = FALSE;
}
/* zero out buffer for re-use */
*val_ptr = 0;
-
- } else if ( *val_ptr != 0 ) {
+ }
+ else if (*val_ptr != 0) {
good_data = FALSE;
/* zero out buffer for re-use */
*val_ptr = 0;
-
}
val_ptr++;
@@ -1491,36 +1394,29 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
m++;
z++;
- } while ( ( rank >= (test_max_rank - 4) ) &&
- ( m < edge_size ) );
+ } while ((rank >= (test_max_rank - 4)) && (m < edge_size));
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- if ( mpi_rank ==
- LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
HDfprintf(stdout, "\n");
}
#endif
l++;
y++;
- } while ( ( rank >= (test_max_rank - 3) ) &&
- ( l < edge_size ) );
+ } while ((rank >= (test_max_rank - 3)) && (l < edge_size));
k++;
x++;
- } while ( ( rank >= (test_max_rank - 2) ) &&
- ( k < edge_size ) );
+ } while ((rank >= (test_max_rank - 2)) && (k < edge_size));
j++;
w++;
- } while ( ( rank >= (test_max_rank - 1) ) &&
- ( j < edge_size ) );
+ } while ((rank >= (test_max_rank - 1)) && (j < edge_size));
i++;
v++;
- } while ( ( rank >= test_max_rank ) &&
- ( i < edge_size ) );
+ } while ((rank >= test_max_rank) && (i < edge_size));
- return(good_data);
+ return (good_data);
} /* lower_dim_size_comp_test__verify_data() */
-
/*-------------------------------------------------------------------------
* Function: lower_dim_size_comp_test__run_test()
*
@@ -1535,103 +1431,97 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
*-------------------------------------------------------------------------
*/
-#define LDSCT_DS_RANK 5
+#define LDSCT_DS_RANK 5
#define LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG 0
static void
-lower_dim_size_comp_test__run_test(const int chunk_edge_size,
- const hbool_t use_collective_io,
+lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_collective_io,
const hid_t dset_type)
{
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- const char *fcnName = "lower_dim_size_comp_test__run_test()";
- int rank;
- hsize_t dims[32];
- hsize_t max_dims[32];
+ const char *fcnName = "lower_dim_size_comp_test__run_test()";
+ int rank;
+ hsize_t dims[32];
+ hsize_t max_dims[32];
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
- const char *filename;
- hbool_t data_ok = FALSE;
- hbool_t mis_match = FALSE;
- int i;
- int start_index;
- int stop_index;
- int mrc;
- int mpi_rank;
- int mpi_size;
- MPI_Comm mpi_comm = MPI_COMM_NULL;
- MPI_Info mpi_info = MPI_INFO_NULL;
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist = H5P_DEFAULT;
- size_t small_ds_size;
- size_t small_ds_slice_size;
- size_t large_ds_size;
+ const char *filename;
+ hbool_t data_ok = FALSE;
+ hbool_t mis_match = FALSE;
+ int i;
+ int start_index;
+ int stop_index;
+ int mrc;
+ int mpi_rank;
+ int mpi_size;
+ MPI_Comm mpi_comm = MPI_COMM_NULL;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist = H5P_DEFAULT;
+ size_t small_ds_size;
+ size_t small_ds_slice_size;
+ size_t large_ds_size;
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- size_t large_ds_slice_size;
+ size_t large_ds_slice_size;
#endif
- uint32_t expected_value;
- uint32_t * small_ds_buf_0 = NULL;
- uint32_t * small_ds_buf_1 = NULL;
- uint32_t * large_ds_buf_0 = NULL;
- uint32_t * large_ds_buf_1 = NULL;
- uint32_t * ptr_0;
- uint32_t * ptr_1;
- hsize_t small_chunk_dims[LDSCT_DS_RANK];
- hsize_t large_chunk_dims[LDSCT_DS_RANK];
- hsize_t small_dims[LDSCT_DS_RANK];
- hsize_t large_dims[LDSCT_DS_RANK];
- hsize_t start[LDSCT_DS_RANK];
- hsize_t stride[LDSCT_DS_RANK];
- hsize_t count[LDSCT_DS_RANK];
- hsize_t block[LDSCT_DS_RANK];
- hsize_t small_sel_start[LDSCT_DS_RANK];
- hsize_t large_sel_start[LDSCT_DS_RANK];
- hid_t full_mem_small_ds_sid;
- hid_t full_file_small_ds_sid;
- hid_t mem_small_ds_sid;
- hid_t file_small_ds_sid;
- hid_t full_mem_large_ds_sid;
- hid_t full_file_large_ds_sid;
- hid_t mem_large_ds_sid;
- hid_t file_large_ds_sid;
- hid_t small_ds_dcpl_id = H5P_DEFAULT;
- hid_t large_ds_dcpl_id = H5P_DEFAULT;
- hid_t small_dataset; /* Dataset ID */
- hid_t large_dataset; /* Dataset ID */
- htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ uint32_t expected_value;
+ uint32_t *small_ds_buf_0 = NULL;
+ uint32_t *small_ds_buf_1 = NULL;
+ uint32_t *large_ds_buf_0 = NULL;
+ uint32_t *large_ds_buf_1 = NULL;
+ uint32_t *ptr_0;
+ uint32_t *ptr_1;
+ hsize_t small_chunk_dims[LDSCT_DS_RANK];
+ hsize_t large_chunk_dims[LDSCT_DS_RANK];
+ hsize_t small_dims[LDSCT_DS_RANK];
+ hsize_t large_dims[LDSCT_DS_RANK];
+ hsize_t start[LDSCT_DS_RANK];
+ hsize_t stride[LDSCT_DS_RANK];
+ hsize_t count[LDSCT_DS_RANK];
+ hsize_t block[LDSCT_DS_RANK];
+ hsize_t small_sel_start[LDSCT_DS_RANK];
+ hsize_t large_sel_start[LDSCT_DS_RANK];
+ hid_t full_mem_small_ds_sid;
+ hid_t full_file_small_ds_sid;
+ hid_t mem_small_ds_sid;
+ hid_t file_small_ds_sid;
+ hid_t full_mem_large_ds_sid;
+ hid_t full_file_large_ds_sid;
+ hid_t mem_large_ds_sid;
+ hid_t file_large_ds_sid;
+ hid_t small_ds_dcpl_id = H5P_DEFAULT;
+ hid_t large_ds_dcpl_id = H5P_DEFAULT;
+ hid_t small_dataset; /* Dataset ID */
+ hid_t large_dataset; /* Dataset ID */
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- HDassert( mpi_size >= 1 );
+ HDassert(mpi_size >= 1);
mpi_comm = MPI_COMM_WORLD;
mpi_info = MPI_INFO_NULL;
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s:%d: chunk_edge_size = %d.\n",
- fcnName, mpi_rank, (int)chunk_edge_size);
- HDfprintf(stdout, "%s:%d: use_collective_io = %d.\n",
- fcnName, mpi_rank, (int)use_collective_io);
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: chunk_edge_size = %d.\n", fcnName, mpi_rank, (int)chunk_edge_size);
+ HDfprintf(stdout, "%s:%d: use_collective_io = %d.\n", fcnName, mpi_rank, (int)use_collective_io);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
-
- small_ds_size = (size_t)((mpi_size + 1) * 1 * 1 * 10 * 10);
- small_ds_slice_size = (size_t) ( 1 * 1 * 10 * 10);
+ small_ds_size = (size_t)((mpi_size + 1) * 1 * 1 * 10 * 10);
+ small_ds_slice_size = (size_t)(1 * 1 * 10 * 10);
large_ds_size = (size_t)((mpi_size + 1) * 10 * 10 * 10 * 10);
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- large_ds_slice_size = (size_t) (10 * 10 * 10 * 10);
+ large_ds_slice_size = (size_t)(10 * 10 * 10 * 10);
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s:%d: small ds size / slice size = %d / %d.\n",
- fcnName, mpi_rank,
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: small ds size / slice size = %d / %d.\n", fcnName, mpi_rank,
(int)small_ds_size, (int)small_ds_slice_size);
- HDfprintf(stdout, "%s:%d: large ds size / slice size = %d / %d.\n",
- fcnName, mpi_rank,
+ HDfprintf(stdout, "%s:%d: large ds size / slice size = %d / %d.\n", fcnName, mpi_rank,
(int)large_ds_size, (int)large_ds_slice_size);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
@@ -1649,13 +1539,12 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
large_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_size);
VRFY((large_ds_buf_1 != NULL), "malloc of large_ds_buf_1 succeeded");
-
/* initialize the buffers */
ptr_0 = small_ds_buf_0;
ptr_1 = small_ds_buf_1;
- for ( i = 0; i < (int)small_ds_size; i++ ) {
+ for (i = 0; i < (int)small_ds_size; i++) {
*ptr_0 = (uint32_t)i;
*ptr_1 = 0;
@@ -1667,7 +1556,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
ptr_0 = large_ds_buf_0;
ptr_1 = large_ds_buf_1;
- for ( i = 0; i < (int)large_ds_size; i++ ) {
+ for (i = 0; i < (int)large_ds_size; i++) {
*ptr_0 = (uint32_t)i;
*ptr_1 = 0;
@@ -1676,12 +1565,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
ptr_1++;
}
-
/* get the file name */
filename = (const char *)GetTestParameters();
- HDassert( filename != NULL );
-
+ HDassert(filename != NULL);
/* ----------------------------------------
* CREATE AN HDF5 FILE WITH PARALLEL ACCESS
@@ -1700,11 +1587,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded");
-
/* setup dims: */
small_dims[0] = (hsize_t)(mpi_size + 1);
- small_dims[1] = 1;
- small_dims[2] = 1;
+ small_dims[1] = 1;
+ small_dims[2] = 1;
small_dims[3] = 10;
small_dims[4] = 10;
@@ -1715,51 +1601,39 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
large_dims[4] = 10;
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s:%d: small_dims[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)small_dims[0], (int)small_dims[1],
- (int)small_dims[2], (int)small_dims[3], (int)small_dims[4]);
- HDfprintf(stdout, "%s:%d: large_dims[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)large_dims[0], (int)large_dims[1],
- (int)large_dims[2], (int)large_dims[3], (int)large_dims[4]);
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: small_dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)small_dims[0],
+ (int)small_dims[1], (int)small_dims[2], (int)small_dims[3], (int)small_dims[4]);
+ HDfprintf(stdout, "%s:%d: large_dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)large_dims[0],
+ (int)large_dims[1], (int)large_dims[2], (int)large_dims[3], (int)large_dims[4]);
}
#endif
/* create dataspaces */
full_mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
- VRFY((full_mem_small_ds_sid != 0),
- "H5Screate_simple() full_mem_small_ds_sid succeeded");
+ VRFY((full_mem_small_ds_sid != 0), "H5Screate_simple() full_mem_small_ds_sid succeeded");
full_file_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
- VRFY((full_file_small_ds_sid != 0),
- "H5Screate_simple() full_file_small_ds_sid succeeded");
+ VRFY((full_file_small_ds_sid != 0), "H5Screate_simple() full_file_small_ds_sid succeeded");
mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
- VRFY((mem_small_ds_sid != 0),
- "H5Screate_simple() mem_small_ds_sid succeeded");
+ VRFY((mem_small_ds_sid != 0), "H5Screate_simple() mem_small_ds_sid succeeded");
file_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
- VRFY((file_small_ds_sid != 0),
- "H5Screate_simple() file_small_ds_sid succeeded");
-
+ VRFY((file_small_ds_sid != 0), "H5Screate_simple() file_small_ds_sid succeeded");
full_mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
- VRFY((full_mem_large_ds_sid != 0),
- "H5Screate_simple() full_mem_large_ds_sid succeeded");
+ VRFY((full_mem_large_ds_sid != 0), "H5Screate_simple() full_mem_large_ds_sid succeeded");
full_file_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
- VRFY((full_file_large_ds_sid != 0),
- "H5Screate_simple() full_file_large_ds_sid succeeded");
+ VRFY((full_file_large_ds_sid != 0), "H5Screate_simple() full_file_large_ds_sid succeeded");
mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
- VRFY((mem_large_ds_sid != 0),
- "H5Screate_simple() mem_large_ds_sid succeeded");
+ VRFY((mem_large_ds_sid != 0), "H5Screate_simple() mem_large_ds_sid succeeded");
file_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
- VRFY((file_large_ds_sid != 0),
- "H5Screate_simple() file_large_ds_sid succeeded");
-
+ VRFY((file_large_ds_sid != 0), "H5Screate_simple() file_large_ds_sid succeeded");
/* Select the entire extent of the full small ds dataspaces */
ret = H5Sselect_all(full_mem_small_ds_sid);
@@ -1768,7 +1642,6 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
ret = H5Sselect_all(full_file_small_ds_sid);
VRFY((ret != FAIL), "H5Sselect_all(full_file_small_ds_sid) succeeded");
-
/* Select the entire extent of the full large ds dataspaces */
ret = H5Sselect_all(full_mem_large_ds_sid);
VRFY((ret != FAIL), "H5Sselect_all(full_mem_large_ds_sid) succeeded");
@@ -1776,22 +1649,20 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
ret = H5Sselect_all(full_file_large_ds_sid);
VRFY((ret != FAIL), "H5Sselect_all(full_file_large_ds_sid) succeeded");
-
/* if chunk edge size is greater than zero, set up the small and
* large data set creation property lists to specify chunked
* datasets.
*/
- if ( chunk_edge_size > 0 ) {
+ if (chunk_edge_size > 0) {
small_chunk_dims[0] = (hsize_t)(1);
small_chunk_dims[1] = small_chunk_dims[2] = (hsize_t)1;
small_chunk_dims[3] = small_chunk_dims[4] = (hsize_t)chunk_edge_size;
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s:%d: small chunk dims[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)small_chunk_dims[0],
- (int)small_chunk_dims[1], (int)small_chunk_dims[2],
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: small chunk dims[] = %d %d %d %d %d\n", fcnName, mpi_rank,
+ (int)small_chunk_dims[0], (int)small_chunk_dims[1], (int)small_chunk_dims[2],
(int)small_chunk_dims[3], (int)small_chunk_dims[4]);
}
#endif
@@ -1806,15 +1677,13 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
large_chunk_dims[0] = (hsize_t)(1);
- large_chunk_dims[1] = large_chunk_dims[2] =
- large_chunk_dims[3] = large_chunk_dims[4] = (hsize_t)chunk_edge_size;
-
+ large_chunk_dims[1] = large_chunk_dims[2] = large_chunk_dims[3] = large_chunk_dims[4] =
+ (hsize_t)chunk_edge_size;
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s:%d: large chunk dims[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)large_chunk_dims[0],
- (int)large_chunk_dims[1], (int)large_chunk_dims[2],
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: large chunk dims[] = %d %d %d %d %d\n", fcnName, mpi_rank,
+ (int)large_chunk_dims[0], (int)large_chunk_dims[1], (int)large_chunk_dims[2],
(int)large_chunk_dims[3], (int)large_chunk_dims[4]);
}
#endif
@@ -1829,30 +1698,23 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
VRFY((ret != FAIL), "H5Pset_chunk() large_ds_dcpl_id succeeded");
}
-
/* create the small dataset */
- small_dataset = H5Dcreate2(fid, "small_dataset", dset_type,
- file_small_ds_sid, H5P_DEFAULT,
+ small_dataset = H5Dcreate2(fid, "small_dataset", dset_type, file_small_ds_sid, H5P_DEFAULT,
small_ds_dcpl_id, H5P_DEFAULT);
VRFY((ret >= 0), "H5Dcreate2() small_dataset succeeded");
-
/* create the large dataset */
- large_dataset = H5Dcreate2(fid, "large_dataset", dset_type,
- file_large_ds_sid, H5P_DEFAULT,
+ large_dataset = H5Dcreate2(fid, "large_dataset", dset_type, file_large_ds_sid, H5P_DEFAULT,
large_ds_dcpl_id, H5P_DEFAULT);
VRFY((ret >= 0), "H5Dcreate2() large_dataset succeeded");
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s:%d: small/large ds id = %d / %d.\n",
- fcnName, mpi_rank, (int)small_dataset,
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: small/large ds id = %d / %d.\n", fcnName, mpi_rank, (int)small_dataset,
(int)large_dataset);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
-
/* setup xfer property list */
xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
@@ -1860,14 +1722,12 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if ( ! use_collective_io ) {
+ if (!use_collective_io) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,
- H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0), "H5Pset_dxpl_mpio_collective_opt() suceeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_collective_opt() suceeded");
}
-
/* setup selection to write initial data to the small data sets */
start[0] = (hsize_t)(mpi_rank + 1);
start[1] = start[2] = start[3] = start[4] = 0;
@@ -1882,135 +1742,89 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
block[3] = block[4] = 10;
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s:%d: settings for small data set initialization.\n",
- fcnName, mpi_rank);
- HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)start[0], (int)start[1],
- (int)start[2], (int)start[3], (int)start[4]);
- HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)stride[0], (int)stride[1],
- (int)stride[2], (int)stride[3], (int)stride[4]);
- HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)count[0], (int)count[1],
- (int)count[2], (int)count[3], (int)count[4]);
- HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)block[0], (int)block[1],
- (int)block[2], (int)block[3], (int)block[4]);
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: settings for small data set initialization.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0],
+ (int)start[1], (int)start[2], (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0],
+ (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0],
+ (int)count[1], (int)count[2], (int)count[3], (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0],
+ (int)block[1], (int)block[2], (int)block[3], (int)block[4]);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
/* setup selections for writing initial data to the small data set */
- ret = H5Sselect_hyperslab(mem_small_ds_sid,
- H5S_SELECT_SET,
- start,
- stride,
- count,
- block);
+ ret = H5Sselect_hyperslab(mem_small_ds_sid, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded");
- ret = H5Sselect_hyperslab(file_small_ds_sid,
- H5S_SELECT_SET,
- start,
- stride,
- count,
- block);
+ ret = H5Sselect_hyperslab(file_small_ds_sid, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, set) suceeded");
- if ( MAINPROCESS ) { /* add an additional slice to the selections */
+ if (MAINPROCESS) { /* add an additional slice to the selections */
start[0] = 0;
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s:%d: added settings for main process.\n",
- fcnName, mpi_rank);
- HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)start[0], (int)start[1],
- (int)start[2], (int)start[3], (int)start[4]);
- HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)stride[0], (int)stride[1],
- (int)stride[2], (int)stride[3], (int)stride[4]);
- HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)count[0], (int)count[1],
- (int)count[2], (int)count[3], (int)count[4]);
- HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)block[0], (int)block[1],
- (int)block[2], (int)block[3], (int)block[4]);
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: added settings for main process.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0],
+ (int)start[1], (int)start[2], (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0],
+ (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0],
+ (int)count[1], (int)count[2], (int)count[3], (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0],
+ (int)block[1], (int)block[2], (int)block[3], (int)block[4]);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
- ret = H5Sselect_hyperslab(mem_small_ds_sid,
- H5S_SELECT_OR,
- start,
- stride,
- count,
- block);
- VRFY((ret>= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) suceeded");
-
- ret = H5Sselect_hyperslab(file_small_ds_sid,
- H5S_SELECT_OR,
- start,
- stride,
- count,
- block);
- VRFY((ret>= 0), "H5Sselect_hyperslab(file_small_ds_sid, or) suceeded");
- }
+ ret = H5Sselect_hyperslab(mem_small_ds_sid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) suceeded");
- check = H5Sselect_valid(mem_small_ds_sid);
- VRFY((check == TRUE),"H5Sselect_valid(mem_small_ds_sid) returns TRUE");
+ ret = H5Sselect_hyperslab(file_small_ds_sid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, or) suceeded");
+ }
- check = H5Sselect_valid(file_small_ds_sid);
- VRFY((check == TRUE),"H5Sselect_valid(file_small_ds_sid) returns TRUE");
+ check = H5Sselect_valid(mem_small_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_valid(mem_small_ds_sid) returns TRUE");
+ check = H5Sselect_valid(file_small_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_valid(file_small_ds_sid) returns TRUE");
/* write the initial value of the small data set to file */
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s:%d: writing init value of small ds to file.\n",
- fcnName, mpi_rank);
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: writing init value of small ds to file.\n", fcnName, mpi_rank);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
- ret = H5Dwrite(small_dataset,
- dset_type,
- mem_small_ds_sid,
- file_small_ds_sid,
- xfer_plist,
- small_ds_buf_0);
+ ret = H5Dwrite(small_dataset, dset_type, mem_small_ds_sid, file_small_ds_sid, xfer_plist, small_ds_buf_0);
VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded");
-
/* read the small data set back to verify that it contains the
* expected data. Note that each process reads in the entire
* data set and verifies it.
*/
- ret = H5Dread(small_dataset,
- H5T_NATIVE_UINT32,
- full_mem_small_ds_sid,
- full_file_small_ds_sid,
- xfer_plist,
+ ret = H5Dread(small_dataset, H5T_NATIVE_UINT32, full_mem_small_ds_sid, full_file_small_ds_sid, xfer_plist,
small_ds_buf_1);
VRFY((ret >= 0), "H5Dread() small_dataset initial read succeeded");
-
/* sync with the other processes before checking data */
mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc==MPI_SUCCESS), "Sync after small dataset writes");
-
+ VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes");
/* verify that the correct data was written to the small data set,
* and reset the buffer to zero in passing.
*/
expected_value = 0;
- mis_match = FALSE;
- ptr_1 = small_ds_buf_1;
+ mis_match = FALSE;
+ ptr_1 = small_ds_buf_1;
i = 0;
- for ( i = 0; i < (int)small_ds_size; i++ ) {
+ for (i = 0; i < (int)small_ds_size; i++) {
- if ( *ptr_1 != expected_value ) {
+ if (*ptr_1 != expected_value) {
mis_match = TRUE;
}
@@ -2020,9 +1834,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
ptr_1++;
expected_value++;
}
- VRFY( (mis_match == FALSE), "small ds init data good.");
-
-
+ VRFY((mis_match == FALSE), "small ds init data good.");
/* setup selections for writing initial data to the large data set */
start[0] = (hsize_t)(mpi_rank + 1);
@@ -2037,104 +1849,64 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
block[1] = block[2] = block[3] = block[4] = (hsize_t)10;
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s:%d: settings for large data set initialization.\n",
- fcnName, mpi_rank);
- HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)start[0], (int)start[1],
- (int)start[2], (int)start[3], (int)start[4]);
- HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)stride[0], (int)stride[1],
- (int)stride[2], (int)stride[3], (int)stride[4]);
- HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)count[0], (int)count[1],
- (int)count[2], (int)count[3], (int)count[4]);
- HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)block[0], (int)block[1],
- (int)block[2], (int)block[3], (int)block[4]);
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: settings for large data set initialization.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0],
+ (int)start[1], (int)start[2], (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0],
+ (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0],
+ (int)count[1], (int)count[2], (int)count[3], (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0],
+ (int)block[1], (int)block[2], (int)block[3], (int)block[4]);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
- ret = H5Sselect_hyperslab(mem_large_ds_sid,
- H5S_SELECT_SET,
- start,
- stride,
- count,
- block);
+ ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) suceeded");
- ret = H5Sselect_hyperslab(file_large_ds_sid,
- H5S_SELECT_SET,
- start,
- stride,
- count,
- block);
+ ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, set) suceeded");
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n",
- fcnName, mpi_rank,
- (int)H5Sget_select_npoints(mem_large_ds_sid));
- HDfprintf(stdout,
- "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n",
- fcnName, mpi_rank,
- (int)H5Sget_select_npoints(file_large_ds_sid));
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(mem_large_ds_sid));
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(file_large_ds_sid));
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
- if ( MAINPROCESS ) { /* add an additional slice to the selections */
+ if (MAINPROCESS) { /* add an additional slice to the selections */
start[0] = (hsize_t)0;
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s:%d: added settings for main process.\n",
- fcnName, mpi_rank);
- HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)start[0], (int)start[1],
- (int)start[2], (int)start[3], (int)start[4]);
- HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)stride[0], (int)stride[1],
- (int)stride[2], (int)stride[3], (int)stride[4]);
- HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)count[0], (int)count[1],
- (int)count[2], (int)count[3], (int)count[4]);
- HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)block[0], (int)block[1],
- (int)block[2], (int)block[3], (int)block[4]);
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: added settings for main process.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0],
+ (int)start[1], (int)start[2], (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0],
+ (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0],
+ (int)count[1], (int)count[2], (int)count[3], (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0],
+ (int)block[1], (int)block[2], (int)block[3], (int)block[4]);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
- ret = H5Sselect_hyperslab(mem_large_ds_sid,
- H5S_SELECT_OR,
- start,
- stride,
- count,
- block);
- VRFY((ret>= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) suceeded");
-
- ret = H5Sselect_hyperslab(file_large_ds_sid,
- H5S_SELECT_OR,
- start,
- stride,
- count,
- block);
- VRFY((ret>= 0), "H5Sselect_hyperslab(file_large_ds_sid, or) suceeded");
+ ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) suceeded");
+
+ ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, or) suceeded");
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n",
- fcnName, mpi_rank,
- (int)H5Sget_select_npoints(mem_large_ds_sid));
- HDfprintf(stdout,
- "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n",
- fcnName, mpi_rank,
- (int)H5Sget_select_npoints(file_large_ds_sid));
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(mem_large_ds_sid));
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(file_large_ds_sid));
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
}
@@ -2150,93 +1922,70 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
block[0] = (hsize_t)(mpi_size + 1);
block[1] = block[2] = block[3] = block[4] = (hsize_t)10;
- ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_AND,
- start, stride, count, block);
- VRFY((ret != FAIL),"H5Sselect_hyperslab(mem_large_ds_sid, and) succeeded");
+ ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_AND, start, stride, count, block);
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(mem_large_ds_sid, and) succeeded");
- ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_AND,
- start, stride, count, block);
- VRFY((ret != FAIL),"H5Sselect_hyperslab(file_large_ds_sid, and) succeeded");
+ ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_AND, start, stride, count, block);
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(file_large_ds_sid, and) succeeded");
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
rank = H5Sget_simple_extent_dims(mem_large_ds_sid, dims, max_dims);
- HDfprintf(stdout,
- "%s:%d: mem_large_ds_sid dims[%d] = %d %d %d %d %d\n",
- fcnName, mpi_rank, rank, (int)dims[0], (int)dims[1],
- (int)dims[2], (int)dims[3], (int)dims[4]);
+ HDfprintf(stdout, "%s:%d: mem_large_ds_sid dims[%d] = %d %d %d %d %d\n", fcnName, mpi_rank, rank,
+ (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4]);
rank = H5Sget_simple_extent_dims(file_large_ds_sid, dims, max_dims);
- HDfprintf(stdout,
- "%s:%d: file_large_ds_sid dims[%d] = %d %d %d %d %d\n",
- fcnName, mpi_rank, rank, (int)dims[0], (int)dims[1],
- (int)dims[2], (int)dims[3], (int)dims[4]);
+ HDfprintf(stdout, "%s:%d: file_large_ds_sid dims[%d] = %d %d %d %d %d\n", fcnName, mpi_rank, rank,
+ (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4]);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
- check = H5Sselect_valid(mem_large_ds_sid);
- VRFY((check == TRUE),"H5Sselect_valid(mem_large_ds_sid) returns TRUE");
-
- check = H5Sselect_valid(file_large_ds_sid);
- VRFY((check == TRUE),"H5Sselect_valid(file_large_ds_sid) returns TRUE");
+ check = H5Sselect_valid(mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_valid(mem_large_ds_sid) returns TRUE");
+ check = H5Sselect_valid(file_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_valid(file_large_ds_sid) returns TRUE");
/* write the initial value of the large data set to file */
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s:%d: writing init value of large ds to file.\n",
- fcnName, mpi_rank);
- HDfprintf(stdout,
- "%s:%d: large_dataset = %d.\n",
- fcnName, mpi_rank,
- (int)large_dataset);
- HDfprintf(stdout,
- "%s:%d: mem_large_ds_sid = %d, file_large_ds_sid = %d.\n",
- fcnName, mpi_rank,
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: writing init value of large ds to file.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: large_dataset = %d.\n", fcnName, mpi_rank, (int)large_dataset);
+ HDfprintf(stdout, "%s:%d: mem_large_ds_sid = %d, file_large_ds_sid = %d.\n", fcnName, mpi_rank,
(int)mem_large_ds_sid, (int)file_large_ds_sid);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
- ret = H5Dwrite(large_dataset,
- dset_type,
- mem_large_ds_sid,
- file_large_ds_sid,
- xfer_plist,
- large_ds_buf_0);
+ ret = H5Dwrite(large_dataset, dset_type, mem_large_ds_sid, file_large_ds_sid, xfer_plist, large_ds_buf_0);
- if ( ret < 0 ) H5Eprint2(H5E_DEFAULT, stderr);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stderr);
VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded");
-
/* sync with the other processes before checking data */
mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc==MPI_SUCCESS), "Sync after large dataset writes");
+ VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes");
/* read the large data set back to verify that it contains the
* expected data. Note that each process reads in the entire
* data set.
*/
- ret = H5Dread(large_dataset,
- H5T_NATIVE_UINT32,
- full_mem_large_ds_sid,
- full_file_large_ds_sid,
- xfer_plist,
+ ret = H5Dread(large_dataset, H5T_NATIVE_UINT32, full_mem_large_ds_sid, full_file_large_ds_sid, xfer_plist,
large_ds_buf_1);
VRFY((ret >= 0), "H5Dread() large_dataset initial read succeeded");
-
/* verify that the correct data was written to the large data set.
* in passing, reset the buffer to zeros
*/
expected_value = 0;
- mis_match = FALSE;
- ptr_1 = large_ds_buf_1;
+ mis_match = FALSE;
+ ptr_1 = large_ds_buf_1;
i = 0;
- for ( i = 0; i < (int)large_ds_size; i++ ) {
+ for (i = 0; i < (int)large_ds_size; i++) {
- if ( *ptr_1 != expected_value ) {
+ if (*ptr_1 != expected_value) {
mis_match = TRUE;
}
@@ -2246,52 +1995,39 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
ptr_1++;
expected_value++;
}
- VRFY( (mis_match == FALSE), "large ds init data good.");
+ VRFY((mis_match == FALSE), "large ds init data good.");
/***********************************/
/***** INITIALIZATION COMPLETE *****/
/***********************************/
-
/* read a checkerboard selection of the process slice of the
* small on disk data set into the process slice of the large
* in memory data set, and verify the data read.
*/
small_sel_start[0] = (hsize_t)(mpi_rank + 1);
- small_sel_start[1] = small_sel_start[2] =
- small_sel_start[3] = small_sel_start[4] = 0;
-
- lower_dim_size_comp_test__select_checker_board(mpi_rank,
- file_small_ds_sid,
- /* tgt_rank = */ 5,
- small_dims,
- /* checker_edge_size = */ 3,
- /* sel_rank */ 2,
- small_sel_start);
-
- expected_value = (uint32_t)
- ((small_sel_start[0] * small_dims[1] * small_dims[2] *
- small_dims[3] * small_dims[4]) +
- (small_sel_start[1] * small_dims[2] * small_dims[3] *
- small_dims[4]) +
- (small_sel_start[2] * small_dims[3] * small_dims[4]) +
- (small_sel_start[3] * small_dims[4]) +
- (small_sel_start[4]));
+ small_sel_start[1] = small_sel_start[2] = small_sel_start[3] = small_sel_start[4] = 0;
+ lower_dim_size_comp_test__select_checker_board(mpi_rank, file_small_ds_sid,
+ /* tgt_rank = */ 5, small_dims,
+ /* checker_edge_size = */ 3,
+ /* sel_rank */ 2, small_sel_start);
+
+ expected_value =
+ (uint32_t)((small_sel_start[0] * small_dims[1] * small_dims[2] * small_dims[3] * small_dims[4]) +
+ (small_sel_start[1] * small_dims[2] * small_dims[3] * small_dims[4]) +
+ (small_sel_start[2] * small_dims[3] * small_dims[4]) +
+ (small_sel_start[3] * small_dims[4]) + (small_sel_start[4]));
large_sel_start[0] = (hsize_t)(mpi_rank + 1);
large_sel_start[1] = 5;
large_sel_start[2] = large_sel_start[3] = large_sel_start[4] = 0;
- lower_dim_size_comp_test__select_checker_board(mpi_rank,
- mem_large_ds_sid,
- /* tgt_rank = */ 5,
- large_dims,
- /* checker_edge_size = */ 3,
- /* sel_rank = */ 2,
- large_sel_start);
-
+ lower_dim_size_comp_test__select_checker_board(mpi_rank, mem_large_ds_sid,
+ /* tgt_rank = */ 5, large_dims,
+ /* checker_edge_size = */ 3,
+ /* sel_rank = */ 2, large_sel_start);
/* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
@@ -2299,18 +2035,13 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
check = H5Sselect_shape_same(mem_large_ds_sid, file_small_ds_sid);
VRFY((check == TRUE), "H5Sselect_shape_same passed (1)");
-
- ret = H5Dread(small_dataset,
- H5T_NATIVE_UINT32,
- mem_large_ds_sid,
- file_small_ds_sid,
- xfer_plist,
+ ret = H5Dread(small_dataset, H5T_NATIVE_UINT32, mem_large_ds_sid, file_small_ds_sid, xfer_plist,
large_ds_buf_1);
VRFY((ret >= 0), "H5Sread() slice from small ds succeeded.");
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
@@ -2319,28 +2050,25 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
data_ok = TRUE;
- start_index = (int)((large_sel_start[0] * large_dims[1] * large_dims[2] *
- large_dims[3] * large_dims[4]) +
- (large_sel_start[1] * large_dims[2] * large_dims[3] *
- large_dims[4]) +
+ start_index = (int)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
(large_sel_start[2] * large_dims[3] * large_dims[4]) +
- (large_sel_start[3] * large_dims[4]) +
- (large_sel_start[4]));
+ (large_sel_start[3] * large_dims[4]) + (large_sel_start[4]));
- stop_index = start_index + (int)small_ds_slice_size;
+ stop_index = start_index + (int)small_ds_slice_size;
- HDassert( 0 <= start_index );
- HDassert( start_index < stop_index );
- HDassert( stop_index <= (int)large_ds_size );
+ HDassert(0 <= start_index);
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= (int)large_ds_size);
ptr_1 = large_ds_buf_1;
- for ( i = 0; i < start_index; i++ ) {
+ for (i = 0; i < start_index; i++) {
- if ( *ptr_1 != (uint32_t)0 ) {
+ if (*ptr_1 != (uint32_t)0) {
data_ok = FALSE;
- *ptr_1 = (uint32_t)0;
+ *ptr_1 = (uint32_t)0;
}
ptr_1++;
@@ -2348,16 +2076,14 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
VRFY((data_ok == TRUE), "slice read from small ds data good(1).");
-
data_ok = lower_dim_size_comp_test__verify_data(ptr_1,
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- mpi_rank,
+ mpi_rank,
#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
- /* rank */ 2,
- /* edge_size */ 10,
- /* checker_edge_size */ 3,
- expected_value,
- /* buf_starts_in_checker */ TRUE);
+ /* rank */ 2,
+ /* edge_size */ 10,
+ /* checker_edge_size */ 3, expected_value,
+ /* buf_starts_in_checker */ TRUE);
VRFY((data_ok == TRUE), "slice read from small ds data good(2).");
@@ -2365,13 +2091,12 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
ptr_1 += small_ds_slice_size;
+ for (i = stop_index; i < (int)large_ds_size; i++) {
- for ( i = stop_index; i < (int)large_ds_size; i++ ) {
-
- if ( *ptr_1 != (uint32_t)0 ) {
+ if (*ptr_1 != (uint32_t)0) {
data_ok = FALSE;
- *ptr_1 = (uint32_t)0;
+ *ptr_1 = (uint32_t)0;
}
ptr_1++;
@@ -2379,39 +2104,27 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
VRFY((data_ok == TRUE), "slice read from small ds data good(3).");
-
-
-
-
/* read a checkerboard selection of a slice of the process slice of
* the large on disk data set into the process slice of the small
* in memory data set, and verify the data read.
*/
small_sel_start[0] = (hsize_t)(mpi_rank + 1);
- small_sel_start[1] = small_sel_start[2] =
- small_sel_start[3] = small_sel_start[4] = 0;
+ small_sel_start[1] = small_sel_start[2] = small_sel_start[3] = small_sel_start[4] = 0;
- lower_dim_size_comp_test__select_checker_board(mpi_rank,
- mem_small_ds_sid,
- /* tgt_rank = */ 5,
- small_dims,
- /* checker_edge_size = */ 3,
- /* sel_rank */ 2,
- small_sel_start);
+ lower_dim_size_comp_test__select_checker_board(mpi_rank, mem_small_ds_sid,
+ /* tgt_rank = */ 5, small_dims,
+ /* checker_edge_size = */ 3,
+ /* sel_rank */ 2, small_sel_start);
large_sel_start[0] = (hsize_t)(mpi_rank + 1);
large_sel_start[1] = 5;
large_sel_start[2] = large_sel_start[3] = large_sel_start[4] = 0;
- lower_dim_size_comp_test__select_checker_board(mpi_rank,
- file_large_ds_sid,
- /* tgt_rank = */ 5,
- large_dims,
- /* checker_edge_size = */ 3,
- /* sel_rank = */ 2,
- large_sel_start);
-
+ lower_dim_size_comp_test__select_checker_board(mpi_rank, file_large_ds_sid,
+ /* tgt_rank = */ 5, large_dims,
+ /* checker_edge_size = */ 3,
+ /* sel_rank = */ 2, large_sel_start);
/* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
@@ -2419,18 +2132,13 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
check = H5Sselect_shape_same(mem_small_ds_sid, file_large_ds_sid);
VRFY((check == TRUE), "H5Sselect_shape_same passed (2)");
-
- ret = H5Dread(large_dataset,
- H5T_NATIVE_UINT32,
- mem_small_ds_sid,
- file_large_ds_sid,
- xfer_plist,
+ ret = H5Dread(large_dataset, H5T_NATIVE_UINT32, mem_small_ds_sid, file_large_ds_sid, xfer_plist,
small_ds_buf_1);
VRFY((ret >= 0), "H5Sread() slice from large ds succeeded.");
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
@@ -2439,31 +2147,28 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
data_ok = TRUE;
- expected_value = (uint32_t)
- ((large_sel_start[0] * large_dims[1] * large_dims[2] *
- large_dims[3] * large_dims[4]) +
- (large_sel_start[1] * large_dims[2] * large_dims[3] *
- large_dims[4]) +
- (large_sel_start[2] * large_dims[3] * large_dims[4]) +
- (large_sel_start[3] * large_dims[4]) +
- (large_sel_start[4]));
+ expected_value =
+ (uint32_t)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[3] * large_dims[4]) + (large_sel_start[4]));
start_index = (int)(mpi_rank + 1) * (int)small_ds_slice_size;
- stop_index = start_index + (int)small_ds_slice_size;
+ stop_index = start_index + (int)small_ds_slice_size;
- HDassert( 0 <= start_index );
- HDassert( start_index < stop_index );
- HDassert( stop_index <= (int)small_ds_size );
+ HDassert(0 <= start_index);
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= (int)small_ds_size);
ptr_1 = small_ds_buf_1;
- for ( i = 0; i < start_index; i++ ) {
+ for (i = 0; i < start_index; i++) {
- if ( *ptr_1 != (uint32_t)0 ) {
+ if (*ptr_1 != (uint32_t)0) {
data_ok = FALSE;
- *ptr_1 = (uint32_t)0;
+ *ptr_1 = (uint32_t)0;
}
ptr_1++;
@@ -2471,15 +2176,13 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
VRFY((data_ok == TRUE), "slice read from large ds data good(1).");
-
data_ok = lower_dim_size_comp_test__verify_data(ptr_1,
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
mpi_rank,
#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
/* rank */ 2,
/* edge_size */ 10,
- /* checker_edge_size */ 3,
- expected_value,
+ /* checker_edge_size */ 3, expected_value,
/* buf_starts_in_checker */ TRUE);
VRFY((data_ok == TRUE), "slice read from large ds data good(2).");
@@ -2488,20 +2191,19 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
ptr_1 += small_ds_slice_size;
+ for (i = stop_index; i < (int)small_ds_size; i++) {
- for ( i = stop_index; i < (int)small_ds_size; i++ ) {
-
- if ( *ptr_1 != (uint32_t)0 ) {
+ if (*ptr_1 != (uint32_t)0) {
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s:%d: unexpected value at index %d: %d.\n",
- fcnName, mpi_rank, (int)i, (int)(*ptr_1));
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: unexpected value at index %d: %d.\n", fcnName, mpi_rank, (int)i,
+ (int)(*ptr_1));
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
data_ok = FALSE;
- *ptr_1 = (uint32_t)0;
+ *ptr_1 = (uint32_t)0;
}
ptr_1++;
@@ -2509,7 +2211,6 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
VRFY((data_ok == TRUE), "slice read from large ds data good(3).");
-
/* Close dataspaces */
ret = H5Sclose(full_mem_small_ds_sid);
VRFY((ret != FAIL), "H5Sclose(full_mem_small_ds_sid) succeeded");
@@ -2523,7 +2224,6 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
ret = H5Sclose(file_small_ds_sid);
VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid) succeeded");
-
ret = H5Sclose(full_mem_large_ds_sid);
VRFY((ret != FAIL), "H5Sclose(full_mem_large_ds_sid) succeeded");
@@ -2536,7 +2236,6 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
ret = H5Sclose(file_large_ds_sid);
VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid) succeeded");
-
/* Close Datasets */
ret = H5Dclose(small_dataset);
VRFY((ret != FAIL), "H5Dclose(small_dataset) succeeded");
@@ -2544,24 +2243,26 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
ret = H5Dclose(large_dataset);
VRFY((ret != FAIL), "H5Dclose(large_dataset) succeeded");
-
/* close the file collectively */
MESG("about to close file.");
ret = H5Fclose(fid);
VRFY((ret != FAIL), "file close succeeded");
/* Free memory buffers */
- if ( small_ds_buf_0 != NULL ) HDfree(small_ds_buf_0);
- if ( small_ds_buf_1 != NULL ) HDfree(small_ds_buf_1);
+ if (small_ds_buf_0 != NULL)
+ HDfree(small_ds_buf_0);
+ if (small_ds_buf_1 != NULL)
+ HDfree(small_ds_buf_1);
- if ( large_ds_buf_0 != NULL ) HDfree(large_ds_buf_0);
- if ( large_ds_buf_1 != NULL ) HDfree(large_ds_buf_1);
+ if (large_ds_buf_0 != NULL)
+ HDfree(large_ds_buf_0);
+ if (large_ds_buf_1 != NULL)
+ HDfree(large_ds_buf_1);
return;
} /* lower_dim_size_comp_test__run_test() */
-
/*-------------------------------------------------------------------------
* Function: lower_dim_size_comp_test()
*
@@ -2580,26 +2281,21 @@ void
lower_dim_size_comp_test(void)
{
/* const char *fcnName = "lower_dim_size_comp_test()"; */
- int chunk_edge_size = 0;
- int use_collective_io;
+ int chunk_edge_size = 0;
+ int use_collective_io;
HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
- for(use_collective_io = 0; use_collective_io <= 1; use_collective_io++) {
+ for (use_collective_io = 0; use_collective_io <= 1; use_collective_io++) {
chunk_edge_size = 0;
- lower_dim_size_comp_test__run_test(chunk_edge_size,
- (hbool_t)use_collective_io,
- H5T_NATIVE_UINT);
+ lower_dim_size_comp_test__run_test(chunk_edge_size, (hbool_t)use_collective_io, H5T_NATIVE_UINT);
chunk_edge_size = 5;
- lower_dim_size_comp_test__run_test(chunk_edge_size,
- (hbool_t)use_collective_io,
- H5T_NATIVE_UINT);
+ lower_dim_size_comp_test__run_test(chunk_edge_size, (hbool_t)use_collective_io, H5T_NATIVE_UINT);
} /* end for */
return;
} /* lower_dim_size_comp_test() */
-
/*-------------------------------------------------------------------------
* Function: link_chunk_collective_io_test()
*
@@ -2629,48 +2325,48 @@ lower_dim_size_comp_test(void)
*-------------------------------------------------------------------------
*/
-#define LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE 16
+#define LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE 16
void
link_chunk_collective_io_test(void)
{
/* const char *fcnName = "link_chunk_collective_io_test()"; */
const char *filename;
- hbool_t mis_match = FALSE;
- int i;
- int mrc;
- int mpi_rank;
- int mpi_size;
- MPI_Comm mpi_comm = MPI_COMM_WORLD;
- MPI_Info mpi_info = MPI_INFO_NULL;
- hsize_t count[1] = {1};
- hsize_t stride[1] = {2 * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
- hsize_t block[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
- hsize_t start[1];
- hsize_t dims[1];
- hsize_t chunk_dims[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
- herr_t ret; /* Generic return value */
- hid_t file_id;
- hid_t acc_tpl;
- hid_t dset_id;
- hid_t file_ds_sid;
- hid_t write_mem_ds_sid;
- hid_t read_mem_ds_sid;
- hid_t ds_dcpl_id;
- hid_t xfer_plist;
- double diff;
- double expected_value;
- double local_data_written[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE];
- double local_data_read[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE];
+ hbool_t mis_match = FALSE;
+ int i;
+ int mrc;
+ int mpi_rank;
+ int mpi_size;
+ MPI_Comm mpi_comm = MPI_COMM_WORLD;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hsize_t count[1] = {1};
+ hsize_t stride[1] = {2 * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
+ hsize_t block[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
+ hsize_t start[1];
+ hsize_t dims[1];
+ hsize_t chunk_dims[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
+ herr_t ret; /* Generic return value */
+ hid_t file_id;
+ hid_t acc_tpl;
+ hid_t dset_id;
+ hid_t file_ds_sid;
+ hid_t write_mem_ds_sid;
+ hid_t read_mem_ds_sid;
+ hid_t ds_dcpl_id;
+ hid_t xfer_plist;
+ double diff;
+ double expected_value;
+ double local_data_written[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE];
+ double local_data_read[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE];
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- HDassert( mpi_size > 0 );
+ HDassert(mpi_size > 0);
/* get the file name */
filename = (const char *)GetTestParameters();
- HDassert( filename != NULL );
+ HDassert(filename != NULL);
/* setup file access template */
acc_tpl = create_faccess_plist(mpi_comm, mpi_info, facc_type);
@@ -2691,16 +2387,13 @@ link_chunk_collective_io_test(void)
/* setup mem and file dataspaces */
write_mem_ds_sid = H5Screate_simple(1, chunk_dims, NULL);
- VRFY((write_mem_ds_sid != 0),
- "H5Screate_simple() write_mem_ds_sid succeeded");
+ VRFY((write_mem_ds_sid != 0), "H5Screate_simple() write_mem_ds_sid succeeded");
read_mem_ds_sid = H5Screate_simple(1, chunk_dims, NULL);
- VRFY((read_mem_ds_sid != 0),
- "H5Screate_simple() read_mem_ds_sid succeeded");
+ VRFY((read_mem_ds_sid != 0), "H5Screate_simple() read_mem_ds_sid succeeded");
file_ds_sid = H5Screate_simple(1, dims, NULL);
- VRFY((file_ds_sid != 0),
- "H5Screate_simple() file_ds_sid succeeded");
+ VRFY((file_ds_sid != 0), "H5Screate_simple() file_ds_sid succeeded");
/* setup data set creation property list */
ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
@@ -2713,9 +2406,8 @@ link_chunk_collective_io_test(void)
VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
/* create the data set */
- dset_id = H5Dcreate2(file_id, "dataset", H5T_NATIVE_DOUBLE,
- file_ds_sid, H5P_DEFAULT,
- ds_dcpl_id, H5P_DEFAULT);
+ dset_id =
+ H5Dcreate2(file_id, "dataset", H5T_NATIVE_DOUBLE, file_ds_sid, H5P_DEFAULT, ds_dcpl_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "H5Dcreate2() dataset succeeded");
/* close the dataset creation property list */
@@ -2723,23 +2415,17 @@ link_chunk_collective_io_test(void)
VRFY((ret >= 0), "H5Pclose(ds_dcpl_id) succeeded");
/* setup local data */
- expected_value = (double)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE) *
- (double)(mpi_rank);
- for ( i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++ ) {
+ expected_value = (double)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE) * (double)(mpi_rank);
+ for (i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++) {
local_data_written[i] = expected_value;
- local_data_read[i] = 0.0;
- expected_value += 1.0;
+ local_data_read[i] = 0.0;
+ expected_value += 1.0;
}
/* select the file and mem spaces */
start[0] = (hsize_t)(mpi_rank * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE);
- ret = H5Sselect_hyperslab(file_ds_sid,
- H5S_SELECT_SET,
- start,
- stride,
- count,
- block);
+ ret = H5Sselect_hyperslab(file_ds_sid, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sselect_hyperslab(file_ds_sid, set) suceeded");
ret = H5Sselect_all(write_mem_ds_sid);
@@ -2755,26 +2441,16 @@ link_chunk_collective_io_test(void)
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
/* write the data set */
- ret = H5Dwrite(dset_id,
- H5T_NATIVE_DOUBLE,
- write_mem_ds_sid,
- file_ds_sid,
- xfer_plist,
- local_data_written);
+ ret = H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, write_mem_ds_sid, file_ds_sid, xfer_plist, local_data_written);
VRFY((ret >= 0), "H5Dwrite() dataset initial write succeeded");
/* sync with the other processes before checking data */
mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc==MPI_SUCCESS), "Sync after dataset write");
+ VRFY((mrc == MPI_SUCCESS), "Sync after dataset write");
/* read this processes slice of the dataset back in */
- ret = H5Dread(dset_id,
- H5T_NATIVE_DOUBLE,
- read_mem_ds_sid,
- file_ds_sid,
- xfer_plist,
- local_data_read);
+ ret = H5Dread(dset_id, H5T_NATIVE_DOUBLE, read_mem_ds_sid, file_ds_sid, xfer_plist, local_data_read);
VRFY((ret >= 0), "H5Dread() dataset read succeeded");
/* close the xfer property list */
@@ -2783,17 +2459,17 @@ link_chunk_collective_io_test(void)
/* verify the data */
mis_match = FALSE;
- for ( i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++ ) {
+ for (i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++) {
diff = local_data_written[i] - local_data_read[i];
diff = fabs(diff);
- if ( diff >= 0.001 ) {
+ if (diff >= 0.001) {
mis_match = TRUE;
}
}
- VRFY( (mis_match == FALSE), "dataset data good.");
+ VRFY((mis_match == FALSE), "dataset data good.");
/* Close dataspaces */
ret = H5Sclose(write_mem_ds_sid);
@@ -2816,4 +2492,3 @@ link_chunk_collective_io_test(void)
return;
} /* link_chunk_collective_io_test() */
-
diff --git a/testpar/testpar.h b/testpar/testpar.h
index f76de51..b2f0f30 100644
--- a/testpar/testpar.h
+++ b/testpar/testpar.h
@@ -22,7 +22,7 @@
#include "h5test.h"
/* Constants definitions */
-#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */
+#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */
/* Define some handy debugging shorthands, routines, ... */
/* debugging tools */
@@ -30,9 +30,9 @@
/* Print message mesg if verbose level is at least medium and
* mesg is not an empty string.
*/
-#define MESG(mesg) \
- if (VERBOSE_MED && *mesg != '\0') \
- HDprintf("%s\n", mesg)
+#define MESG(mesg) \
+ if (VERBOSE_MED && *mesg != '\0') \
+ HDprintf("%s\n", mesg)
/*
* VRFY: Verify if the condition val is true.
@@ -44,60 +44,63 @@
* This will allow program to continue and can be used for debugging.
* (The "do {...} while(0)" is to group all the statements as one unit.)
*/
-#define VRFY_IMPL(val, mesg, rankvar) do { \
- if (val) { \
- MESG(mesg); \
- } \
- else { \
- HDprintf("Proc %d: ", rankvar); \
- HDprintf("*** Parallel ERROR ***\n"); \
- HDprintf(" VRFY (%s) failed at line %4d in %s\n", \
- mesg, (int)__LINE__, __FILE__); \
- ++nerrors; \
- fflush(stdout); \
- if (!VERBOSE_MED) { \
- HDprintf("aborting MPI processes\n"); \
- MPI_Abort(MPI_COMM_WORLD, 1); \
- } \
- } \
-} while(0)
+#define VRFY_IMPL(val, mesg, rankvar) \
+ do { \
+ if (val) { \
+ MESG(mesg); \
+ } \
+ else { \
+ HDprintf("Proc %d: ", rankvar); \
+ HDprintf("*** Parallel ERROR ***\n"); \
+ HDprintf(" VRFY (%s) failed at line %4d in %s\n", mesg, (int)__LINE__, __FILE__); \
+ ++nerrors; \
+ fflush(stdout); \
+ if (!VERBOSE_MED) { \
+ HDprintf("aborting MPI processes\n"); \
+ MPI_Abort(MPI_COMM_WORLD, 1); \
+ } \
+ } \
+ } while (0)
#define VRFY_G(val, mesg) VRFY_IMPL(val, mesg, mpi_rank_g)
-#define VRFY(val, mesg) VRFY_IMPL(val, mesg, mpi_rank)
+#define VRFY(val, mesg) VRFY_IMPL(val, mesg, mpi_rank)
/*
* Checking for information purpose.
* If val is false, print mesg; else nothing.
* Either case, no error setting.
*/
-#define INFO(val, mesg) do { \
- if (val) { \
- MESG(mesg); \
- } else { \
- HDprintf("Proc %d: ", mpi_rank); \
- HDprintf("*** PHDF5 REMARK (not an error) ***\n"); \
- HDprintf(" Condition (%s) failed at line %4d in %s\n", \
- mesg, (int)__LINE__, __FILE__); \
- fflush(stdout); \
- } \
-} while(0)
+#define INFO(val, mesg) \
+ do { \
+ if (val) { \
+ MESG(mesg); \
+ } \
+ else { \
+ HDprintf("Proc %d: ", mpi_rank); \
+ HDprintf("*** PHDF5 REMARK (not an error) ***\n"); \
+ HDprintf(" Condition (%s) failed at line %4d in %s\n", mesg, (int)__LINE__, __FILE__); \
+ fflush(stdout); \
+ } \
+ } while (0)
-#define MPI_BANNER(mesg) do { \
- if (VERBOSE_MED || MAINPROCESS){ \
- HDprintf("--------------------------------\n"); \
- HDprintf("Proc %d: ", mpi_rank); \
- HDprintf("*** %s\n", mesg); \
- HDprintf("--------------------------------\n"); \
- } \
-} while(0)
+#define MPI_BANNER(mesg) \
+ do { \
+ if (VERBOSE_MED || MAINPROCESS) { \
+ HDprintf("--------------------------------\n"); \
+ HDprintf("Proc %d: ", mpi_rank); \
+ HDprintf("*** %s\n", mesg); \
+ HDprintf("--------------------------------\n"); \
+ } \
+ } while (0)
-#define MAINPROCESS (!mpi_rank) /* define process 0 as main process */
+#define MAINPROCESS (!mpi_rank) /* define process 0 as main process */
-#define SYNC(comm) do { \
- MPI_BANNER("doing a SYNC"); \
- MPI_Barrier(comm); \
- MPI_BANNER("SYNC DONE"); \
-} while(0)
+#define SYNC(comm) \
+ do { \
+ MPI_BANNER("doing a SYNC"); \
+ MPI_Barrier(comm); \
+ MPI_BANNER("SYNC DONE"); \
+ } while (0)
/* End of Define some handy debugging shorthands, routines, ... */
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index b89c790..ca38623 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -18,23 +18,23 @@
#include "testphdf5.h"
#ifndef PATH_MAX
-#define PATH_MAX 512
-#endif /* !PATH_MAX */
+#define PATH_MAX 512
+#endif /* !PATH_MAX */
/* global variables */
int dim0;
int dim1;
int chunkdim0;
int chunkdim1;
-int nerrors = 0; /* errors count */
-int ndatasets = 300; /* number of datasets to create*/
-int ngroups = 512; /* number of groups to create in root
- * group. */
-int facc_type = FACC_MPIO; /*Test file access type */
+int nerrors = 0; /* errors count */
+int ndatasets = 300; /* number of datasets to create*/
+int ngroups = 512; /* number of groups to create in root
+ * group. */
+int facc_type = FACC_MPIO; /*Test file access type */
int dxfer_coll_type = DXFER_COLLECTIVE_IO;
-H5E_auto2_t old_func; /* previous error handler */
-void *old_client_data; /* previous error handler arg.*/
+H5E_auto2_t old_func; /* previous error handler */
+void * old_client_data; /* previous error handler arg.*/
/* other option flags */
@@ -43,13 +43,11 @@ void *old_client_data; /* previous error handler arg.*/
* created in one test is accessed by a different test.
* filenames[0] is reserved as the file name for PARATESTFILE.
*/
-#define NFILENAME 2
+#define NFILENAME 2
#define PARATESTFILE filenames[0]
-const char *FILENAME[NFILENAME]={
- "ParaTest",
- NULL};
-char filenames[NFILENAME][PATH_MAX];
-hid_t fapl; /* file access property list */
+const char *FILENAME[NFILENAME] = {"ParaTest", NULL};
+char filenames[NFILENAME][PATH_MAX];
+hid_t fapl; /* file access property list */
#ifdef USE_PAUSE
/* pause the process for a moment to allow debugger to attach if desired. */
@@ -58,15 +56,16 @@ hid_t fapl; /* file access property list */
#include <sys/types.h>
#include <sys/stat.h>
-void pause_proc(void)
+void
+pause_proc(void)
{
- int pid;
- h5_stat_t statbuf;
- char greenlight[] = "go";
- int maxloop = 10;
- int loops = 0;
- int time_int = 10;
+ int pid;
+ h5_stat_t statbuf;
+ char greenlight[] = "go";
+ int maxloop = 10;
+ int loops = 0;
+ int time_int = 10;
/* mpi variables */
int mpi_size, mpi_rank;
@@ -79,10 +78,10 @@ void pause_proc(void)
MPI_Get_processor_name(mpi_name, &mpi_namelen);
if (MAINPROCESS)
- while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop){
- if (!loops++){
- HDprintf("Proc %d (%*s, %d): to debug, attach %d\n",
- mpi_rank, mpi_namelen, mpi_name, pid, pid);
+ while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop) {
+ if (!loops++) {
+ HDprintf("Proc %d (%*s, %d): to debug, attach %d\n", mpi_rank, mpi_namelen, mpi_name, pid,
+ pid);
}
HDprintf("waiting(%ds) for file %s ...\n", time_int, greenlight);
HDfflush(stdout);
@@ -92,15 +91,15 @@ void pause_proc(void)
}
/* Use the Profile feature of MPI to call the pause_proc() */
-int MPI_Init(int *argc, char ***argv)
+int
+MPI_Init(int *argc, char ***argv)
{
int ret_code;
- ret_code=PMPI_Init(argc, argv);
+ ret_code = PMPI_Init(argc, argv);
pause_proc();
return (ret_code);
}
-#endif /* USE_PAUSE */
-
+#endif /* USE_PAUSE */
/*
* Show command usage
@@ -109,164 +108,165 @@ static void
usage(void)
{
HDprintf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] "
- "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
+ "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
HDprintf("\t-m<n_datasets>"
- "\tset number of datasets for the multiple dataset test\n");
+ "\tset number of datasets for the multiple dataset test\n");
HDprintf("\t-n<n_groups>"
- "\tset number of groups for the multiple group test\n");
+ "\tset number of groups for the multiple group test\n");
HDprintf("\t-f <prefix>\tfilename prefix\n");
HDprintf("\t-2\t\tuse Split-file together with MPIO\n");
- HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n",
- ROW_FACTOR, COL_FACTOR);
+ HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR,
+ COL_FACTOR);
HDprintf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
HDprintf("\n");
}
-
/*
* parse the command line options
*/
static int
parse_options(int argc, char **argv)
{
- int mpi_size, mpi_rank; /* mpi variables */
+ int mpi_size, mpi_rank; /* mpi variables */
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* setup default chunk-size. Make sure sizes are > 0 */
- chunkdim0 = (dim0+9)/10;
- chunkdim1 = (dim1+9)/10;
+ chunkdim0 = (dim0 + 9) / 10;
+ chunkdim1 = (dim1 + 9) / 10;
- while (--argc){
- if (**(++argv) != '-'){
- break;
- }else{
- switch(*(*argv+1)){
- case 'm': ndatasets = atoi((*argv+1)+1);
- if (ndatasets < 0){
- nerrors++;
- return(1);
- }
- break;
- case 'n': ngroups = atoi((*argv+1)+1);
- if (ngroups < 0){
- nerrors++;
- return(1);
- }
- break;
- case 'f': if (--argc < 1) {
- nerrors++;
- return(1);
- }
- if (**(++argv) == '-') {
- nerrors++;
- return(1);
- }
- paraprefix = *argv;
+ while (--argc) {
+ if (**(++argv) != '-') {
break;
- case 'i': /* Collective MPI-IO access with independent IO */
- dxfer_coll_type = DXFER_INDEPENDENT_IO;
- break;
- case '2': /* Use the split-file driver with MPIO access */
- /* Can use $HDF5_METAPREFIX to define the */
- /* meta-file-prefix. */
- facc_type = FACC_MPIO | FACC_SPLIT;
- break;
- case 'd': /* dimensizes */
- if (--argc < 2){
- nerrors++;
- return(1);
- }
- dim0 = atoi(*(++argv))*mpi_size;
- argc--;
- dim1 = atoi(*(++argv))*mpi_size;
- /* set default chunkdim sizes too */
- chunkdim0 = (dim0+9)/10;
- chunkdim1 = (dim1+9)/10;
- break;
- case 'c': /* chunk dimensions */
- if (--argc < 2){
+ }
+ else {
+ switch (*(*argv + 1)) {
+ case 'm':
+ ndatasets = atoi((*argv + 1) + 1);
+ if (ndatasets < 0) {
+ nerrors++;
+ return (1);
+ }
+ break;
+ case 'n':
+ ngroups = atoi((*argv + 1) + 1);
+ if (ngroups < 0) {
+ nerrors++;
+ return (1);
+ }
+ break;
+ case 'f':
+ if (--argc < 1) {
+ nerrors++;
+ return (1);
+ }
+ if (**(++argv) == '-') {
+ nerrors++;
+ return (1);
+ }
+ paraprefix = *argv;
+ break;
+ case 'i': /* Collective MPI-IO access with independent IO */
+ dxfer_coll_type = DXFER_INDEPENDENT_IO;
+ break;
+ case '2': /* Use the split-file driver with MPIO access */
+ /* Can use $HDF5_METAPREFIX to define the */
+ /* meta-file-prefix. */
+ facc_type = FACC_MPIO | FACC_SPLIT;
+ break;
+ case 'd': /* dimensizes */
+ if (--argc < 2) {
+ nerrors++;
+ return (1);
+ }
+ dim0 = atoi(*(++argv)) * mpi_size;
+ argc--;
+ dim1 = atoi(*(++argv)) * mpi_size;
+ /* set default chunkdim sizes too */
+ chunkdim0 = (dim0 + 9) / 10;
+ chunkdim1 = (dim1 + 9) / 10;
+ break;
+ case 'c': /* chunk dimensions */
+ if (--argc < 2) {
+ nerrors++;
+ return (1);
+ }
+ chunkdim0 = atoi(*(++argv));
+ argc--;
+ chunkdim1 = atoi(*(++argv));
+ break;
+ case 'h': /* print help message--return with nerrors set */
+ return (1);
+ default:
+ HDprintf("Illegal option(%s)\n", *argv);
nerrors++;
- return(1);
- }
- chunkdim0 = atoi(*(++argv));
- argc--;
- chunkdim1 = atoi(*(++argv));
- break;
- case 'h': /* print help message--return with nerrors set */
- return(1);
- default: HDprintf("Illegal option(%s)\n", *argv);
- nerrors++;
- return(1);
+ return (1);
}
}
} /*while*/
/* check validity of dimension and chunk sizes */
- if (dim0 <= 0 || dim1 <= 0){
+ if (dim0 <= 0 || dim1 <= 0) {
HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
nerrors++;
- return(1);
+ return (1);
}
- if (chunkdim0 <= 0 || chunkdim1 <= 0){
+ if (chunkdim0 <= 0 || chunkdim1 <= 0) {
HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
nerrors++;
- return(1);
+ return (1);
}
/* Make sure datasets can be divided into equal portions by the processes */
- if ((dim0 % mpi_size) || (dim1 % mpi_size)){
+ if ((dim0 % mpi_size) || (dim1 % mpi_size)) {
if (MAINPROCESS)
- HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n",
- dim0, dim1, mpi_size);
+ HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", dim0, dim1, mpi_size);
nerrors++;
- return(1);
+ return (1);
}
/* compose the test filenames */
{
int i, n;
- n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1; /* exclude the NULL */
+ n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */
- for (i=0; i < n; i++)
- if (h5_fixname(FILENAME[i],fapl,filenames[i],sizeof(filenames[i]))
- == NULL){
+ for (i = 0; i < n; i++)
+ if (h5_fixname(FILENAME[i], fapl, filenames[i], sizeof(filenames[i])) == NULL) {
HDprintf("h5_fixname failed\n");
nerrors++;
- return(1);
+ return (1);
}
HDprintf("Test filenames are:\n");
- for (i=0; i < n; i++)
+ for (i = 0; i < n; i++)
HDprintf(" %s\n", filenames[i]);
}
- return(0);
+ return (0);
}
-
/*
* Create the appropriate File access property list
*/
hid_t
create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
{
- hid_t ret_pl = -1;
- herr_t ret; /* generic return value */
- int mpi_rank; /* mpi variables */
+ hid_t ret_pl = -1;
+ herr_t ret; /* generic return value */
+ int mpi_rank; /* mpi variables */
/* need the rank for error checking macros */
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- ret_pl = H5Pcreate (H5P_FILE_ACCESS);
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
if (l_facc_type == FACC_DEFAULT)
return (ret_pl);
- if (l_facc_type == FACC_MPIO){
+ if (l_facc_type == FACC_MPIO) {
/* set Parallel access with communicator */
ret = H5Pset_fapl_mpio(ret_pl, comm, info);
VRFY((ret >= 0), "");
@@ -274,36 +274,36 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
VRFY((ret >= 0), "");
ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
VRFY((ret >= 0), "");
- return(ret_pl);
+ return (ret_pl);
}
- if (l_facc_type == (FACC_MPIO | FACC_SPLIT)){
+ if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) {
hid_t mpio_pl;
- mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
+ mpio_pl = H5Pcreate(H5P_FILE_ACCESS);
VRFY((mpio_pl >= 0), "");
/* set Parallel access with communicator */
ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
VRFY((ret >= 0), "");
/* setup file access template */
- ret_pl = H5Pcreate (H5P_FILE_ACCESS);
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
VRFY((ret_pl >= 0), "");
/* set Parallel access with communicator */
ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
H5Pclose(mpio_pl);
- return(ret_pl);
+ return (ret_pl);
}
/* unknown file access types */
return (ret_pl);
}
-
-int main(int argc, char **argv)
+int
+main(int argc, char **argv)
{
- int mpi_size, mpi_rank; /* mpi variables */
+ int mpi_size, mpi_rank; /* mpi variables */
H5Ptest_param_t ndsets_params, ngroups_params;
H5Ptest_param_t collngroups_params;
H5Ptest_param_t io_mode_confusion_params;
@@ -319,21 +319,21 @@ int main(int argc, char **argv)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- dim0 = ROW_FACTOR*mpi_size;
- dim1 = COL_FACTOR*mpi_size;
+ dim0 = ROW_FACTOR * mpi_size;
+ dim1 = COL_FACTOR * mpi_size;
- if (MAINPROCESS){
+ if (MAINPROCESS) {
HDprintf("===================================\n");
HDprintf("PHDF5 TESTS START\n");
HDprintf("===================================\n");
}
/* Attempt to turn off atexit post processing so that in case errors
- * happen during the test and the process is aborted, it will not get
- * hang in the atexit post processing in which it may try to make MPI
- * calls. By then, MPI calls may not work.
- */
- if (H5dont_atexit() < 0){
+ * happen during the test and the process is aborted, it will not get
+ * hang in the atexit post processing in which it may try to make MPI
+ * calls. By then, MPI calls may not work.
+ */
+ if (H5dont_atexit() < 0) {
HDprintf("Failed to turn off atexit processing. Continue.\n");
};
H5open();
@@ -343,209 +343,149 @@ int main(int argc, char **argv)
TestInit(argv[0], usage, parse_options);
/* Tests are generally arranged from least to most complexity... */
- AddTest("mpiodup", test_fapl_mpio_dup, NULL,
- "fapl_mpio duplicate", NULL);
+ AddTest("mpiodup", test_fapl_mpio_dup, NULL, "fapl_mpio duplicate", NULL);
- AddTest("split", test_split_comm_access, NULL,
- "dataset using split communicators", PARATESTFILE);
+ AddTest("split", test_split_comm_access, NULL, "dataset using split communicators", PARATESTFILE);
#ifdef PB_OUT /* temporary: disable page buffering when parallel */
- AddTest("page_buffer", test_page_buffer_access, NULL,
- "page buffer usage in parallel", PARATESTFILE);
+ AddTest("page_buffer", test_page_buffer_access, NULL, "page buffer usage in parallel", PARATESTFILE);
#endif
- AddTest("props", test_file_properties, NULL,
- "Coll Metadata file property settings", PARATESTFILE);
-
- AddTest("idsetw", dataset_writeInd, NULL,
- "dataset independent write", PARATESTFILE);
- AddTest("idsetr", dataset_readInd, NULL,
- "dataset independent read", PARATESTFILE);
-
- AddTest("cdsetw", dataset_writeAll, NULL,
- "dataset collective write", PARATESTFILE);
- AddTest("cdsetr", dataset_readAll, NULL,
- "dataset collective read", PARATESTFILE);
-
- AddTest("eidsetw", extend_writeInd, NULL,
- "extendible dataset independent write", PARATESTFILE);
- AddTest("eidsetr", extend_readInd, NULL,
- "extendible dataset independent read", PARATESTFILE);
- AddTest("ecdsetw", extend_writeAll, NULL,
- "extendible dataset collective write", PARATESTFILE);
- AddTest("ecdsetr", extend_readAll, NULL,
- "extendible dataset collective read", PARATESTFILE);
- AddTest("eidsetw2", extend_writeInd2, NULL,
- "extendible dataset independent write #2", PARATESTFILE);
- AddTest("selnone", none_selection_chunk, NULL,
- "chunked dataset with none-selection", PARATESTFILE);
- AddTest("calloc", test_chunk_alloc, NULL,
- "parallel extend Chunked allocation on serial file", PARATESTFILE);
- AddTest("fltread", test_filter_read, NULL,
- "parallel read of dataset written serially with filters", PARATESTFILE);
+ AddTest("props", test_file_properties, NULL, "Coll Metadata file property settings", PARATESTFILE);
+
+ AddTest("idsetw", dataset_writeInd, NULL, "dataset independent write", PARATESTFILE);
+ AddTest("idsetr", dataset_readInd, NULL, "dataset independent read", PARATESTFILE);
+
+ AddTest("cdsetw", dataset_writeAll, NULL, "dataset collective write", PARATESTFILE);
+ AddTest("cdsetr", dataset_readAll, NULL, "dataset collective read", PARATESTFILE);
+
+ AddTest("eidsetw", extend_writeInd, NULL, "extendible dataset independent write", PARATESTFILE);
+ AddTest("eidsetr", extend_readInd, NULL, "extendible dataset independent read", PARATESTFILE);
+ AddTest("ecdsetw", extend_writeAll, NULL, "extendible dataset collective write", PARATESTFILE);
+ AddTest("ecdsetr", extend_readAll, NULL, "extendible dataset collective read", PARATESTFILE);
+ AddTest("eidsetw2", extend_writeInd2, NULL, "extendible dataset independent write #2", PARATESTFILE);
+ AddTest("selnone", none_selection_chunk, NULL, "chunked dataset with none-selection", PARATESTFILE);
+ AddTest("calloc", test_chunk_alloc, NULL, "parallel extend Chunked allocation on serial file",
+ PARATESTFILE);
+ AddTest("fltread", test_filter_read, NULL, "parallel read of dataset written serially with filters",
+ PARATESTFILE);
#ifdef H5_HAVE_FILTER_DEFLATE
- AddTest("cmpdsetr", compress_readAll, NULL,
- "compressed dataset collective read", PARATESTFILE);
+ AddTest("cmpdsetr", compress_readAll, NULL, "compressed dataset collective read", PARATESTFILE);
#endif /* H5_HAVE_FILTER_DEFLATE */
- AddTest("zerodsetr", zero_dim_dset, NULL,
- "zero dim dset", PARATESTFILE);
+ AddTest("zerodsetr", zero_dim_dset, NULL, "zero dim dset", PARATESTFILE);
- ndsets_params.name = PARATESTFILE;
+ ndsets_params.name = PARATESTFILE;
ndsets_params.count = ndatasets;
- AddTest("ndsetw", multiple_dset_write, NULL,
- "multiple datasets write", &ndsets_params);
+ AddTest("ndsetw", multiple_dset_write, NULL, "multiple datasets write", &ndsets_params);
- ngroups_params.name = PARATESTFILE;
+ ngroups_params.name = PARATESTFILE;
ngroups_params.count = ngroups;
- AddTest("ngrpw", multiple_group_write, NULL,
- "multiple groups write", &ngroups_params);
- AddTest("ngrpr", multiple_group_read, NULL,
- "multiple groups read", &ngroups_params);
+ AddTest("ngrpw", multiple_group_write, NULL, "multiple groups write", &ngroups_params);
+ AddTest("ngrpr", multiple_group_read, NULL, "multiple groups read", &ngroups_params);
- AddTest("compact", compact_dataset, NULL,
- "compact dataset test", PARATESTFILE);
+ AddTest("compact", compact_dataset, NULL, "compact dataset test", PARATESTFILE);
- collngroups_params.name = PARATESTFILE;
+ collngroups_params.name = PARATESTFILE;
collngroups_params.count = ngroups;
/* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */
AddTest("cngrpw-ingrpr", collective_group_write_independent_group_read, NULL,
- "collective grp/dset write - independent grp/dset read",
- &collngroups_params);
+ "collective grp/dset write - independent grp/dset read", &collngroups_params);
#ifndef H5_HAVE_WIN32_API
- AddTest("bigdset", big_dataset, NULL,
- "big dataset test", PARATESTFILE);
+ AddTest("bigdset", big_dataset, NULL, "big dataset test", PARATESTFILE);
#else
HDprintf("big dataset test will be skipped on Windows (JIRA HDDFV-8064)\n");
#endif
- AddTest("fill", dataset_fillvalue, NULL,
- "dataset fill value", PARATESTFILE);
-
- AddTest("cchunk1",
- coll_chunk1,NULL, "simple collective chunk io",PARATESTFILE);
- AddTest("cchunk2",
- coll_chunk2,NULL, "noncontiguous collective chunk io",PARATESTFILE);
- AddTest("cchunk3",
- coll_chunk3,NULL, "multi-chunk collective chunk io",PARATESTFILE);
- AddTest("cchunk4",
- coll_chunk4,NULL, "collective chunk io with partial non-selection ",PARATESTFILE);
-
- if((mpi_size < 3)&& MAINPROCESS ) {
+ AddTest("fill", dataset_fillvalue, NULL, "dataset fill value", PARATESTFILE);
+
+ AddTest("cchunk1", coll_chunk1, NULL, "simple collective chunk io", PARATESTFILE);
+ AddTest("cchunk2", coll_chunk2, NULL, "noncontiguous collective chunk io", PARATESTFILE);
+ AddTest("cchunk3", coll_chunk3, NULL, "multi-chunk collective chunk io", PARATESTFILE);
+ AddTest("cchunk4", coll_chunk4, NULL, "collective chunk io with partial non-selection ", PARATESTFILE);
+
+ if ((mpi_size < 3) && MAINPROCESS) {
HDprintf("Collective chunk IO optimization APIs ");
HDprintf("needs at least 3 processes to participate\n");
HDprintf("Collective chunk IO API tests will be skipped \n");
}
- AddTest((mpi_size <3)? "-cchunk5":"cchunk5" ,
- coll_chunk5,NULL,
- "linked chunk collective IO without optimization",PARATESTFILE);
- AddTest((mpi_size < 3)? "-cchunk6" : "cchunk6",
- coll_chunk6,NULL,
- "multi-chunk collective IO with direct request",PARATESTFILE);
- AddTest((mpi_size < 3)? "-cchunk7" : "cchunk7",
- coll_chunk7,NULL,
- "linked chunk collective IO with optimization",PARATESTFILE);
- AddTest((mpi_size < 3)? "-cchunk8" : "cchunk8",
- coll_chunk8,NULL,
- "linked chunk collective IO transferring to multi-chunk",PARATESTFILE);
- AddTest((mpi_size < 3)? "-cchunk9" : "cchunk9",
- coll_chunk9,NULL,
- "multiple chunk collective IO with optimization",PARATESTFILE);
- AddTest((mpi_size < 3)? "-cchunk10" : "cchunk10",
- coll_chunk10,NULL,
- "multiple chunk collective IO transferring to independent IO",PARATESTFILE);
-
-
+ AddTest((mpi_size < 3) ? "-cchunk5" : "cchunk5", coll_chunk5, NULL,
+ "linked chunk collective IO without optimization", PARATESTFILE);
+ AddTest((mpi_size < 3) ? "-cchunk6" : "cchunk6", coll_chunk6, NULL,
+ "multi-chunk collective IO with direct request", PARATESTFILE);
+ AddTest((mpi_size < 3) ? "-cchunk7" : "cchunk7", coll_chunk7, NULL,
+ "linked chunk collective IO with optimization", PARATESTFILE);
+ AddTest((mpi_size < 3) ? "-cchunk8" : "cchunk8", coll_chunk8, NULL,
+ "linked chunk collective IO transferring to multi-chunk", PARATESTFILE);
+ AddTest((mpi_size < 3) ? "-cchunk9" : "cchunk9", coll_chunk9, NULL,
+ "multiple chunk collective IO with optimization", PARATESTFILE);
+ AddTest((mpi_size < 3) ? "-cchunk10" : "cchunk10", coll_chunk10, NULL,
+ "multiple chunk collective IO transferring to independent IO", PARATESTFILE);
/* irregular collective IO tests*/
- AddTest("ccontw",
- coll_irregular_cont_write,NULL,
- "collective irregular contiguous write",PARATESTFILE);
- AddTest("ccontr",
- coll_irregular_cont_read,NULL,
- "collective irregular contiguous read",PARATESTFILE);
- AddTest("cschunkw",
- coll_irregular_simple_chunk_write,NULL,
- "collective irregular simple chunk write",PARATESTFILE);
- AddTest("cschunkr",
- coll_irregular_simple_chunk_read,NULL,
- "collective irregular simple chunk read",PARATESTFILE);
- AddTest("ccchunkw",
- coll_irregular_complex_chunk_write,NULL,
- "collective irregular complex chunk write",PARATESTFILE);
- AddTest("ccchunkr",
- coll_irregular_complex_chunk_read,NULL,
- "collective irregular complex chunk read",PARATESTFILE);
-
- AddTest("null", null_dataset, NULL,
- "null dataset test", PARATESTFILE);
+ AddTest("ccontw", coll_irregular_cont_write, NULL, "collective irregular contiguous write", PARATESTFILE);
+ AddTest("ccontr", coll_irregular_cont_read, NULL, "collective irregular contiguous read", PARATESTFILE);
+ AddTest("cschunkw", coll_irregular_simple_chunk_write, NULL, "collective irregular simple chunk write",
+ PARATESTFILE);
+ AddTest("cschunkr", coll_irregular_simple_chunk_read, NULL, "collective irregular simple chunk read",
+ PARATESTFILE);
+ AddTest("ccchunkw", coll_irregular_complex_chunk_write, NULL, "collective irregular complex chunk write",
+ PARATESTFILE);
+ AddTest("ccchunkr", coll_irregular_complex_chunk_read, NULL, "collective irregular complex chunk read",
+ PARATESTFILE);
+
+ AddTest("null", null_dataset, NULL, "null dataset test", PARATESTFILE);
io_mode_confusion_params.name = PARATESTFILE;
io_mode_confusion_params.count = 0; /* value not used */
- AddTest("I/Omodeconf", io_mode_confusion, NULL,
- "I/O mode confusion test -- hangs quickly on failure",
+ AddTest("I/Omodeconf", io_mode_confusion, NULL, "I/O mode confusion test -- hangs quickly on failure",
&io_mode_confusion_params);
- if((mpi_size < 3) && MAINPROCESS) {
+ if ((mpi_size < 3) && MAINPROCESS) {
HDprintf("rr_obj_hdr_flush_confusion test needs at least 3 processes.\n");
HDprintf("rr_obj_hdr_flush_confusion test will be skipped \n");
}
- if(mpi_size > 2) {
- rr_obj_flush_confusion_params.name = PARATESTFILE;
+ if (mpi_size > 2) {
+ rr_obj_flush_confusion_params.name = PARATESTFILE;
rr_obj_flush_confusion_params.count = 0; /* value not used */
AddTest("rrobjflushconf", rr_obj_hdr_flush_confusion, NULL,
- "round robin object header flush confusion test",
- &rr_obj_flush_confusion_params);
+ "round robin object header flush confusion test", &rr_obj_flush_confusion_params);
}
- AddTest("alnbg1",
- chunk_align_bug_1, NULL,
- "Chunk allocation with alignment bug.",
- PARATESTFILE);
+ AddTest("alnbg1", chunk_align_bug_1, NULL, "Chunk allocation with alignment bug.", PARATESTFILE);
- AddTest("tldsc",
- lower_dim_size_comp_test, NULL,
- "test lower dim size comp in span tree to mpi derived type",
- PARATESTFILE);
+ AddTest("tldsc", lower_dim_size_comp_test, NULL,
+ "test lower dim size comp in span tree to mpi derived type", PARATESTFILE);
- AddTest("lccio",
- link_chunk_collective_io_test, NULL,
- "test mpi derived type management",
- PARATESTFILE);
+ AddTest("lccio", link_chunk_collective_io_test, NULL, "test mpi derived type management", PARATESTFILE);
- AddTest("actualio", actual_io_mode_tests, NULL,
- "test actual io mode proprerty",
- PARATESTFILE);
+ AddTest("actualio", actual_io_mode_tests, NULL, "test actual io mode proprerty", PARATESTFILE);
- AddTest("nocolcause", no_collective_cause_tests, NULL,
- "test cause for broken collective io",
+ AddTest("nocolcause", no_collective_cause_tests, NULL, "test cause for broken collective io",
PARATESTFILE);
- AddTest("edpl", test_plist_ed, NULL,
- "encode/decode Property Lists", NULL);
+ AddTest("edpl", test_plist_ed, NULL, "encode/decode Property Lists", NULL);
- if((mpi_size < 2) && MAINPROCESS) {
+ if ((mpi_size < 2) && MAINPROCESS) {
HDprintf("File Image Ops daisy chain test needs at least 2 processes.\n");
HDprintf("File Image Ops daisy chain test will be skipped \n");
}
- AddTest((mpi_size < 2)? "-fiodc" : "fiodc", file_image_daisy_chain_test, NULL,
+ AddTest((mpi_size < 2) ? "-fiodc" : "fiodc", file_image_daisy_chain_test, NULL,
"file image ops daisy chain", NULL);
- if((mpi_size < 2)&& MAINPROCESS ) {
+ if ((mpi_size < 2) && MAINPROCESS) {
HDprintf("Atomicity tests need at least 2 processes to participate\n");
HDprintf("8 is more recommended.. Atomicity tests will be skipped \n");
}
else if (facc_type != FACC_MPIO && MAINPROCESS) {
HDprintf("Atomicity tests will not work with a non MPIO VFD\n");
}
- else if(mpi_size >= 2 && facc_type == FACC_MPIO){
- AddTest("atomicity", dataset_atomicity, NULL,
- "dataset atomic updates", PARATESTFILE);
+ else if (mpi_size >= 2 && facc_type == FACC_MPIO) {
+ AddTest("atomicity", dataset_atomicity, NULL, "dataset atomic updates", PARATESTFILE);
}
- AddTest("denseattr", test_dense_attr, NULL,
- "Store Dense Attributes", PARATESTFILE);
+ AddTest("denseattr", test_dense_attr, NULL, "Store Dense Attributes", PARATESTFILE);
AddTest("noselcollmdread", test_partial_no_selection_coll_md_read, NULL,
"Collective Metadata read with some ranks having no selection", PARATESTFILE);
@@ -558,25 +498,24 @@ int main(int argc, char **argv)
TestInfo(argv[0]);
/* setup file access property list */
- fapl = H5Pcreate (H5P_FILE_ACCESS);
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
/* Parse command line arguments */
TestParseCmdLine(argc, argv);
- if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS){
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS) {
HDprintf("===================================\n"
- " Using Independent I/O with file set view to replace collective I/O \n"
- "===================================\n");
+ " Using Independent I/O with file set view to replace collective I/O \n"
+ "===================================\n");
}
-
/* Perform requested testing */
PerformTests();
/* make sure all processes are finished before final report, cleanup
- * and exit.
- */
+ * and exit.
+ */
MPI_Barrier(MPI_COMM_WORLD);
/* Display test summary, if requested */
@@ -592,10 +531,10 @@ int main(int argc, char **argv)
{
int temp;
MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
- nerrors=temp;
+ nerrors = temp;
}
- if (MAINPROCESS){ /* only process 0 reports */
+ if (MAINPROCESS) { /* only process 0 reports */
HDprintf("===================================\n");
if (nerrors)
HDprintf("***PHDF5 tests detected %d errors***\n", nerrors);
@@ -614,6 +553,5 @@ int main(int argc, char **argv)
MPI_Finalize();
/* cannot just return (nerrors) because exit code is limited to 1byte */
- return(nerrors!=0);
+ return (nerrors != 0);
}
-
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index b086366..a821e6f 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -18,165 +18,165 @@
#include "testpar.h"
-enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD,
- API_MULTI_HARD,API_LINK_TRUE,API_LINK_FALSE,
- API_MULTI_COLL,API_MULTI_IND};
+enum H5TEST_COLL_CHUNK_API {
+ API_NONE = 0,
+ API_LINK_HARD,
+ API_MULTI_HARD,
+ API_LINK_TRUE,
+ API_LINK_FALSE,
+ API_MULTI_COLL,
+ API_MULTI_IND
+};
#ifndef FALSE
-#define FALSE 0
+#define FALSE 0
#endif
#ifndef TRUE
-#define TRUE 1
+#define TRUE 1
#endif
-
/* Constants definitions */
-#define DIM0 600 /* Default dataset sizes. */
-#define DIM1 1200 /* Values are from a monitor pixel sizes */
-#define ROW_FACTOR 8 /* Nominal row factor for dataset size */
-#define COL_FACTOR 16 /* Nominal column factor for dataset size */
-#define RANK 2
-#define DATASETNAME1 "Data1"
-#define DATASETNAME2 "Data2"
-#define DATASETNAME3 "Data3"
-#define DATASETNAME4 "Data4"
-#define DATASETNAME5 "Data5"
-#define DATASETNAME6 "Data6"
-#define DATASETNAME7 "Data7"
-#define DATASETNAME8 "Data8"
-#define DATASETNAME9 "Data9"
+#define DIM0 600 /* Default dataset sizes. */
+#define DIM1 1200 /* Values are from a monitor pixel sizes */
+#define ROW_FACTOR 8 /* Nominal row factor for dataset size */
+#define COL_FACTOR 16 /* Nominal column factor for dataset size */
+#define RANK 2
+#define DATASETNAME1 "Data1"
+#define DATASETNAME2 "Data2"
+#define DATASETNAME3 "Data3"
+#define DATASETNAME4 "Data4"
+#define DATASETNAME5 "Data5"
+#define DATASETNAME6 "Data6"
+#define DATASETNAME7 "Data7"
+#define DATASETNAME8 "Data8"
+#define DATASETNAME9 "Data9"
/* point selection order */
-#define IN_ORDER 1
+#define IN_ORDER 1
#define OUT_OF_ORDER 2
/* Hyperslab layout styles */
-#define BYROW 1 /* divide into slabs of rows */
-#define BYCOL 2 /* divide into blocks of columns */
-#define ZROW 3 /* same as BYCOL except process 0 gets 0 rows */
-#define ZCOL 4 /* same as BYCOL except process 0 gets 0 columns */
+#define BYROW 1 /* divide into slabs of rows */
+#define BYCOL 2 /* divide into blocks of columns */
+#define ZROW 3 /* same as BYCOL except process 0 gets 0 rows */
+#define ZCOL 4 /* same as BYCOL except process 0 gets 0 columns */
/* File_Access_type bits */
-#define FACC_DEFAULT 0x0 /* default */
-#define FACC_MPIO 0x1 /* MPIO */
-#define FACC_SPLIT 0x2 /* Split File */
+#define FACC_DEFAULT 0x0 /* default */
+#define FACC_MPIO 0x1 /* MPIO */
+#define FACC_SPLIT 0x2 /* Split File */
-#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/
+#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/
#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */
/*Constants for collective chunk definitions */
-#define SPACE_DIM1 24
-#define SPACE_DIM2 4
-#define BYROW_CONT 1
-#define BYROW_DISCONT 2
-#define BYROW_SELECTNONE 3
+#define SPACE_DIM1 24
+#define SPACE_DIM2 4
+#define BYROW_CONT 1
+#define BYROW_DISCONT 2
+#define BYROW_SELECTNONE 3
#define BYROW_SELECTUNBALANCE 4
-#define BYROW_SELECTINCHUNK 5
-
-#define DIMO_NUM_CHUNK 4
-#define DIM1_NUM_CHUNK 2
-#define LINK_TRUE_NUM_CHUNK 2
-#define LINK_FALSE_NUM_CHUNK 6
-#define MULTI_TRUE_PERCENT 50
-#define LINK_TRUE_CHUNK_NAME "h5_link_chunk_true"
+#define BYROW_SELECTINCHUNK 5
+
+#define DIMO_NUM_CHUNK 4
+#define DIM1_NUM_CHUNK 2
+#define LINK_TRUE_NUM_CHUNK 2
+#define LINK_FALSE_NUM_CHUNK 6
+#define MULTI_TRUE_PERCENT 50
+#define LINK_TRUE_CHUNK_NAME "h5_link_chunk_true"
#define LINK_FALSE_CHUNK_NAME "h5_link_chunk_false"
-#define LINK_HARD_CHUNK_NAME "h5_link_chunk_hard"
+#define LINK_HARD_CHUNK_NAME "h5_link_chunk_hard"
#define MULTI_HARD_CHUNK_NAME "h5_multi_chunk_hard"
#define MULTI_COLL_CHUNK_NAME "h5_multi_chunk_coll"
#define MULTI_INDP_CHUNK_NAME "h5_multi_chunk_indp"
#define DSET_COLLECTIVE_CHUNK_NAME "coll_chunk_name"
-
/*Constants for MPI derived data type generated from span tree */
-#define MSPACE1_RANK 1 /* Rank of the first dataset in memory */
-#define MSPACE1_DIM 27000 /* Dataset size in memory */
-#define FSPACE_RANK 2 /* Dataset rank as it is stored in the file */
-#define FSPACE_DIM1 9 /* Dimension sizes of the dataset as it is stored in the file */
-#define FSPACE_DIM2 3600
+#define MSPACE1_RANK 1 /* Rank of the first dataset in memory */
+#define MSPACE1_DIM 27000 /* Dataset size in memory */
+#define FSPACE_RANK 2 /* Dataset rank as it is stored in the file */
+#define FSPACE_DIM1 9 /* Dimension sizes of the dataset as it is stored in the file */
+#define FSPACE_DIM2 3600
/* We will read dataset back from the file to the dataset in memory with these dataspace parameters. */
-#define MSPACE_RANK 2
-#define MSPACE_DIM1 9
-#define MSPACE_DIM2 3600
-#define FHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/
-#define FHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
-#define FHSTRIDE0 4 /* Stride of the first dimension of the first hyperslab selection*/
-#define FHSTRIDE1 3 /* Stride of the second dimension of the first hyperslab selection*/
-#define FHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/
-#define FHBLOCK1 2 /* Block of the second dimension of the first hyperslab selection*/
-#define FHSTART0 0 /* start of the first dimension of the first hyperslab selection*/
-#define FHSTART1 1 /* start of the second dimension of the first hyperslab selection*/
-
-#define SHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/
-#define SHCOUNT1 1 /* Count of the second dimension of the first hyperslab selection*/
-#define SHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define SHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
-#define SHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/
-#define SHBLOCK1 768 /* Block of the second dimension of the first hyperslab selection*/
-#define SHSTART0 4 /* start of the first dimension of the first hyperslab selection*/
-#define SHSTART1 0 /* start of the second dimension of the first hyperslab selection*/
-
-#define MHCOUNT0 6912 /* Count of the first dimension of the first hyperslab selection*/
-#define MHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define MHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
-#define MHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
-
-
-
-#define RFFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
-#define RFFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
-#define RFFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define RFFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
-#define RFFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
-#define RFFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
-#define RFFHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
-#define RFFHSTART1 2 /* start of the second dimension of the first hyperslab selection*/
-
-
-#define RFSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
-#define RFSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/
-#define RFSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define RFSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
-#define RFSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
-#define RFSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
-#define RFSHSTART0 2 /* start of the first dimension of the first hyperslab selection*/
-#define RFSHSTART1 4 /* start of the second dimension of the first hyperslab selection*/
-
-
-#define RMFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
-#define RMFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
-#define RMFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define RMFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
-#define RMFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
-#define RMFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
-#define RMFHSTART0 0 /* start of the first dimension of the first hyperslab selection*/
-#define RMFHSTART1 0 /* start of the second dimension of the first hyperslab selection*/
-
-#define RMSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
-#define RMSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/
-#define RMSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define RMSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
-#define RMSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
-#define RMSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
-#define RMSHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
-#define RMSHSTART1 2 /* start of the second dimension of the first hyperslab selection*/
-
-
-#define NPOINTS 4 /* Number of points that will be selected
- and overwritten */
+#define MSPACE_RANK 2
+#define MSPACE_DIM1 9
+#define MSPACE_DIM2 3600
+#define FHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/
+#define FHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
+#define FHSTRIDE0 4 /* Stride of the first dimension of the first hyperslab selection*/
+#define FHSTRIDE1 3 /* Stride of the second dimension of the first hyperslab selection*/
+#define FHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/
+#define FHBLOCK1 2 /* Block of the second dimension of the first hyperslab selection*/
+#define FHSTART0 0 /* start of the first dimension of the first hyperslab selection*/
+#define FHSTART1 1 /* start of the second dimension of the first hyperslab selection*/
+
+#define SHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/
+#define SHCOUNT1 1 /* Count of the second dimension of the first hyperslab selection*/
+#define SHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define SHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define SHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/
+#define SHBLOCK1 768 /* Block of the second dimension of the first hyperslab selection*/
+#define SHSTART0 4 /* start of the first dimension of the first hyperslab selection*/
+#define SHSTART1 0 /* start of the second dimension of the first hyperslab selection*/
+
+#define MHCOUNT0 6912 /* Count of the first dimension of the first hyperslab selection*/
+#define MHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define MHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define MHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
+
+#define RFFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
+#define RFFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
+#define RFFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define RFFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define RFFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define RFFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
+#define RFFHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
+#define RFFHSTART1 2 /* start of the second dimension of the first hyperslab selection*/
+
+#define RFSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
+#define RFSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/
+#define RFSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define RFSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define RFSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define RFSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
+#define RFSHSTART0 2 /* start of the first dimension of the first hyperslab selection*/
+#define RFSHSTART1 4 /* start of the second dimension of the first hyperslab selection*/
+
+#define RMFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
+#define RMFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
+#define RMFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define RMFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define RMFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define RMFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
+#define RMFHSTART0 0 /* start of the first dimension of the first hyperslab selection*/
+#define RMFHSTART1 0 /* start of the second dimension of the first hyperslab selection*/
+
+#define RMSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
+#define RMSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/
+#define RMSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define RMSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define RMSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define RMSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
+#define RMSHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
+#define RMSHSTART1 2 /* start of the second dimension of the first hyperslab selection*/
+
+#define NPOINTS \
+ 4 /* Number of points that will be selected \
+ and overwritten */
/* Definitions of the selection mode for the test_actual_io_function. */
-#define TEST_ACTUAL_IO_NO_COLLECTIVE 0
-#define TEST_ACTUAL_IO_RESET 1
-#define TEST_ACTUAL_IO_MULTI_CHUNK_IND 2
-#define TEST_ACTUAL_IO_MULTI_CHUNK_COL 3
-#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX 4
-#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE 5
-#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND 6
-#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL 7
-#define TEST_ACTUAL_IO_LINK_CHUNK 8
-#define TEST_ACTUAL_IO_CONTIGUOUS 9
+#define TEST_ACTUAL_IO_NO_COLLECTIVE 0
+#define TEST_ACTUAL_IO_RESET 1
+#define TEST_ACTUAL_IO_MULTI_CHUNK_IND 2
+#define TEST_ACTUAL_IO_MULTI_CHUNK_COL 3
+#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX 4
+#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE 5
+#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND 6
+#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL 7
+#define TEST_ACTUAL_IO_LINK_CHUNK 8
+#define TEST_ACTUAL_IO_CONTIGUOUS 9
/* Definitions of the selection mode for the no_collective_cause_tests function. */
#define TEST_COLLECTIVE 0x001
@@ -189,7 +189,7 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD,
#define TEST_FILTERS 0x080
/* TEST_FILTERS will take place of this after supporting mpio + filter for
* H5Dcreate and H5Dwrite */
-#define TEST_FILTERS_READ 0x100
+#define TEST_FILTERS_READ 0x100
/* Don't erase these lines, they are put here for debugging purposes */
/*
@@ -206,12 +206,11 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD,
#define NPOINTS 4
*/ /* end of debugging macro */
-
/* type definitions */
-typedef struct H5Ptest_param_t /* holds extra test parameters */
+typedef struct H5Ptest_param_t /* holds extra test parameters */
{
- char *name;
- int count;
+ char *name;
+ int count;
} H5Ptest_param_t;
/* Dataset data type. Int's can be easily octo dumped. */
@@ -219,20 +218,20 @@ typedef int DATATYPE;
/* Shape Same Tests Definitions */
typedef enum {
- IND_CONTIG, /* Independent IO on contigous datasets */
- COL_CONTIG, /* Collective IO on contigous datasets */
- IND_CHUNKED, /* Independent IO on chunked datasets */
- COL_CHUNKED /* Collective IO on chunked datasets */
+ IND_CONTIG, /* Independent IO on contigous datasets */
+ COL_CONTIG, /* Collective IO on contigous datasets */
+ IND_CHUNKED, /* Independent IO on chunked datasets */
+ COL_CHUNKED /* Collective IO on chunked datasets */
} ShapeSameTestMethods;
/* Shared global variables */
-extern int dim0, dim1; /*Dataset dimensions */
-extern int chunkdim0, chunkdim1; /*Chunk dimensions */
-extern int nerrors; /*errors count */
-extern H5E_auto2_t old_func; /* previous error handler */
-extern void *old_client_data; /*previous error handler arg.*/
-extern int facc_type; /*Test file access type */
-extern int dxfer_coll_type;
+extern int dim0, dim1; /*Dataset dimensions */
+extern int chunkdim0, chunkdim1; /*Chunk dimensions */
+extern int nerrors; /*errors count */
+extern H5E_auto2_t old_func; /* previous error handler */
+extern void * old_client_data; /*previous error handler arg.*/
+extern int facc_type; /*Test file access type */
+extern int dxfer_coll_type;
/* Test program prototypes */
void test_plist_ed(void);
@@ -301,10 +300,10 @@ void test_multi_chunk_io_addrmap_issue(void);
void test_link_chunk_io_sort_chunk_issue(void);
/* commonly used prototypes */
-hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type);
+hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type);
MPI_Offset h5_mpi_get_file_size(const char *filename, MPI_Comm comm, MPI_Info info);
-int dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[],
- hsize_t block[], DATATYPE *dataset, DATATYPE *original);
-void point_set (hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
- size_t num_points, hsize_t coords[], int order);
+int dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
+ DATATYPE *original);
+void point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
+ hsize_t coords[], int order);
#endif /* PHDF5TEST_H */