summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
Diffstat (limited to 'testpar')
-rw-r--r--testpar/CMakeLists.txt8
-rw-r--r--testpar/Makefile.am2
-rw-r--r--testpar/Makefile.in64
-rw-r--r--testpar/t_bigio.c531
-rw-r--r--testpar/t_cache.c23
-rw-r--r--testpar/t_coll_chunk.c2
-rw-r--r--testpar/t_coll_md_read.c2
-rw-r--r--testpar/t_dset.c2
-rw-r--r--testpar/t_file_image.c62
-rw-r--r--testpar/t_init_term.c4
-rw-r--r--testpar/t_mdset.c8
-rw-r--r--testpar/t_mpi.c8
-rw-r--r--testpar/t_pflush2.c4
-rw-r--r--testpar/t_pread.c24
-rw-r--r--testpar/t_prestart.c8
-rw-r--r--testpar/t_prop.c10
-rw-r--r--testpar/t_pshutdown.c8
-rw-r--r--testpar/t_shapesame.c120
-rw-r--r--testpar/t_span_tree.c12
-rw-r--r--testpar/testpar.h9
20 files changed, 463 insertions, 448 deletions
diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt
index 51c3420..dba9e68 100644
--- a/testpar/CMakeLists.txt
+++ b/testpar/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.10)
+cmake_minimum_required (VERSION 3.12)
project (HDF5_TEST_PAR C)
#-----------------------------------------------------------------------------
@@ -22,6 +22,7 @@ set (testphdf5_SOURCES
#-- Adding test for testhdf5
add_executable (testphdf5 ${testphdf5_SOURCES})
+target_compile_options(testphdf5 PRIVATE "${HDF5_CMAKE_C_FLAGS}")
target_include_directories (testphdf5
PRIVATE "${HDF5_SRC_DIR};${HDF5_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>"
)
@@ -40,6 +41,7 @@ set_target_properties (testphdf5 PROPERTIES FOLDER test/par)
MACRO (ADD_H5P_EXE file)
add_executable (${file} ${HDF5_TEST_PAR_SOURCE_DIR}/${file}.c)
+ target_compile_options(${file} PRIVATE "${HDF5_CMAKE_C_FLAGS}")
target_include_directories (${file}
PRIVATE "${HDF5_SRC_DIR};${HDF5_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>"
)
@@ -78,4 +80,6 @@ foreach (h5_testp ${H5P_TESTS})
ADD_H5P_EXE(${h5_testp})
endforeach ()
-include (CMakeTests.cmake)
+if (HDF5_TEST_PARALLEL)
+ include (CMakeTests.cmake)
+endif ()
diff --git a/testpar/Makefile.am b/testpar/Makefile.am
index a11099d..918bec8 100644
--- a/testpar/Makefile.am
+++ b/testpar/Makefile.am
@@ -24,7 +24,7 @@ AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/test
# Test scripts--
# testpflush.sh:
TEST_SCRIPT_PARA = testpflush.sh
-SCRIPT_DEPEND = t_pflush1$(EXEEXT) t_pflush2$(EXEEXT)
+SCRIPT_DEPEND = t_pflush1$(EXEEXT) t_pflush2$(EXEEXT)
check_SCRIPTS = $(TEST_SCRIPT_PARA)
diff --git a/testpar/Makefile.in b/testpar/Makefile.in
index 05819f4..79467d9 100644
--- a/testpar/Makefile.in
+++ b/testpar/Makefile.in
@@ -473,13 +473,13 @@ AMTAR = @AMTAR@
# H5_CFLAGS holds flags that should be used when building hdf5,
# but which should not be exported to h5cc for building other programs.
-# AM_CFLAGS is an automake construct which should be used by Makefiles
+# AM_CFLAGS is an automake construct which should be used by Makefiles
# instead of CFLAGS, as CFLAGS is reserved solely for the user to define.
# This applies to FCFLAGS, CXXFLAGS, CPPFLAGS, and LDFLAGS as well.
-AM_CFLAGS = @AM_CFLAGS@ @H5_CFLAGS@
+AM_CFLAGS = @AM_CFLAGS@ @H5_CFLAGS@ @H5_ECFLAGS@
AM_CPPFLAGS = @AM_CPPFLAGS@ @H5_CPPFLAGS@ -I$(top_srcdir)/src \
-I$(top_srcdir)/test
-AM_CXXFLAGS = @AM_CXXFLAGS@ @H5_CXXFLAGS@
+AM_CXXFLAGS = @AM_CXXFLAGS@ @H5_CXXFLAGS@ @H5_ECXXFLAGS@
AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AM_FCFLAGS = @AM_FCFLAGS@ @H5_FCFLAGS@
AM_JAVACFLAGS = @AM_JAVACFLAGS@
@@ -499,6 +499,7 @@ CC = @CC@
CCDEPMODE = @CCDEPMODE@
CC_VERSION = @CC_VERSION@
CFLAGS = @CFLAGS@
+CLANG_SANITIZE_CHECKS = @CLANG_SANITIZE_CHECKS@
CODESTACK = @CODESTACK@
CONFIG_DATE = @CONFIG_DATE@
CONFIG_MODE = @CONFIG_MODE@
@@ -515,6 +516,7 @@ DEFAULT_API_VERSION = @DEFAULT_API_VERSION@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
DEPRECATED_SYMBOLS = @DEPRECATED_SYMBOLS@
+DESIRED_FILE_LOCKING = @DESIRED_FILE_LOCKING@
DEV_WARNINGS = @DEV_WARNINGS@
DIRECT_VFD = @DIRECT_VFD@
DLLTOOL = @DLLTOOL@
@@ -551,8 +553,11 @@ H5_CFLAGS = @H5_CFLAGS@
H5_CLASSPATH = @H5_CLASSPATH@
H5_CPPFLAGS = @H5_CPPFLAGS@
H5_CXXFLAGS = @H5_CXXFLAGS@
+H5_ECFLAGS = @H5_ECFLAGS@
+H5_ECXXFLAGS = @H5_ECXXFLAGS@
H5_FCFLAGS = @H5_FCFLAGS@
H5_FORTRAN_SHARED = @H5_FORTRAN_SHARED@
+H5_IS_DARWIN = @H5_IS_DARWIN@
H5_JAVACFLAGS = @H5_JAVACFLAGS@
H5_JAVAFLAGS = @H5_JAVAFLAGS@
H5_JNIFLAGS = @H5_JNIFLAGS@
@@ -575,6 +580,7 @@ HL = @HL@
HL_FOR = @HL_FOR@
HSIZE_T = @HSIZE_T@
HSSIZE_T = @HSSIZE_T@
+IGNORE_DISABLED_FILE_LOCKS = @IGNORE_DISABLED_FILE_LOCKS@
INSTALL = @INSTALL@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
@@ -611,6 +617,7 @@ MAINT = @MAINT@
MAKEINFO = @MAKEINFO@
MANIFEST_TOOL = @MANIFEST_TOOL@
MEMORYALLOCSANITYCHECK = @MEMORYALLOCSANITYCHECK@
+MIRROR_VFD = @MIRROR_VFD@
MKDIR_P = @MKDIR_P@
MPE = @MPE@
NM = @NM@
@@ -670,6 +677,7 @@ TIME = @TIME@
TR = @TR@
TRACE_API = @TRACE_API@
UNAME_INFO = @UNAME_INFO@
+USE_FILE_LOCKING = @USE_FILE_LOCKING@
USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@
USE_FILTER_SZIP = @USE_FILTER_SZIP@
USINGMEMCHECKER = @USINGMEMCHECKER@
@@ -705,6 +713,7 @@ docdir = $(exec_prefix)/doc
dvidir = @dvidir@
enable_shared = @enable_shared@
enable_static = @enable_static@
+examplesdir = @examplesdir@
exec_prefix = @exec_prefix@
fortran_linux_linker_option = @fortran_linux_linker_option@
host = @host@
@@ -761,15 +770,15 @@ LIBH5_HL = $(top_builddir)/hl/src/libhdf5_hl.la
LIBH5F_HL = $(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la
LIBH5CPP_HL = $(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la
-# Note that in svn revision 19400 the '/' after DESTDIR in H5* variables below
-# has been removed. According to the official description of DESTDIR by Gnu at
-# http://www.gnu.org/prep/standards/html_node/DESTDIR.html, DESTDIR is
-# prepended to the normal and complete install path that it precedes for the
-# purpose of installing in a temporary directory which is useful for building
-# rpms and other packages. The '/' after ${DESTDIR} will be followed by another
-# '/' at the beginning of the normal install path. When DESTDIR is empty the
-# path then begins with '//', which is incorrect and causes problems at least for
-# Cygwin.
+# Note that in svn revision 19400 the '/' after DESTDIR in H5* variables below
+# has been removed. According to the official description of DESTDIR by Gnu at
+# http://www.gnu.org/prep/standards/html_node/DESTDIR.html, DESTDIR is
+# prepended to the normal and complete install path that it precedes for the
+# purpose of installing in a temporary directory which is useful for building
+# rpms and other packages. The '/' after ${DESTDIR} will be followed by another
+# '/' at the beginning of the normal install path. When DESTDIR is empty the
+# path then begins with '//', which is incorrect and causes problems at least for
+# Cygwin.
# Scripts used to build examples
# If only shared libraries have been installed, have h5cc build examples with
@@ -804,7 +813,7 @@ CHECK_CLEANFILES = *.chkexe *.chklog *.clog *.clog2 MPItest.h5 \
# Test scripts--
# testpflush.sh:
TEST_SCRIPT_PARA = testpflush.sh
-SCRIPT_DEPEND = t_pflush1$(EXEEXT) t_pflush2$(EXEEXT)
+SCRIPT_DEPEND = t_pflush1$(EXEEXT) t_pflush2$(EXEEXT)
check_SCRIPTS = $(TEST_SCRIPT_PARA)
# Test programs. These are our main targets.
@@ -829,7 +838,7 @@ LIB = $(lib_LIBRARIES) $(lib_LTLIBRARIES) $(noinst_LIBRARIES) \
PROGS = $(bin_PROGRAMS) $(bin_SCRIPTS) $(noinst_PROGRAMS) $(noinst_SCRIPTS) \
$(EXTRA_PROG)
-chk_TESTS = $(check_PROGRAMS) $(check_SCRIPTS) $(EXTRA_TEST)
+chk_TESTS = $(check_PROGRAMS) $(check_SCRIPTS) $(EXTRA_TEST)
TEST_EXTENSIONS = .sh
SH_LOG_COMPILER = $(SHELL)
AM_SH_LOG_FLAGS =
@@ -1457,28 +1466,37 @@ $(TEST_PROG_CHKEXE) $(TEST_PROG_PARA_CHKEXE) dummy.chkexe_:
echo "============================" > $${log}; \
fi; \
if test "X$(FORTRAN_API)" = "Xyes"; then \
- echo "Fortran API: Testing $(HDF5_DRIVER) $${tname} $(TEST_FLAGS)"; \
+ echo "Fortran API: Testing $${tname} $(TEST_FLAGS)"; \
if test -n "$(REALTIMEOUTPUT)"; then \
- echo "Fortran API: $(HDF5_DRIVER) $${tname} $(TEST_FLAGS) Test Log" | tee -a $${log}; \
+ echo "Fortran API: Test log for $${tname} $(TEST_FLAGS)" | tee -a $${log}; \
else \
- echo "Fortran API: $(HDF5_DRIVER) $${tname} $(TEST_FLAGS) Test Log" >> $${log}; \
+ echo "Fortran API: Test log for $${tname} $(TEST_FLAGS)" >> $${log}; \
fi; \
elif test "X$(CXX_API)" = "Xyes"; then \
- echo "C++ API: Testing $(HDF5_DRIVER) $${tname} $(TEST_FLAGS)"; \
+ echo "C++ API: Testing $${tname} $(TEST_FLAGS)"; \
if test -n "$(REALTIMEOUTPUT)"; then \
- echo "C++ API: $(HDF5_DRIVER) $${tname} $(TEST_FLAGS) Test Log" | tee -a $${log};\
+ echo "C++ API: Test log for $${tname} $(TEST_FLAGS)" | tee -a $${log};\
else \
- echo "C++ API: $(HDF5_DRIVER) $${tname} $(TEST_FLAGS) Test Log" >> $${log};\
+ echo "C++ API: Test log for $${tname} $(TEST_FLAGS)" >> $${log};\
fi; \
else \
- echo "Testing $(HDF5_DRIVER) $${tname} $(TEST_FLAGS)"; \
+ echo "Testing: $${tname} $(TEST_FLAGS)"; \
if test -n "$(REALTIMEOUTPUT)"; then \
- echo "$(HDF5_DRIVER) $${tname} $(TEST_FLAGS) Test Log" | tee -a $${log}; \
+ echo "Test log for $${tname} $(TEST_FLAGS)" | tee -a $${log}; \
else \
- echo "$(HDF5_DRIVER) $${tname} $(TEST_FLAGS) Test Log" >> $${log}; \
+ echo "Test log for $${tname} $(TEST_FLAGS)" >> $${log}; \
fi; \
fi; \
if test -n "$(REALTIMEOUTPUT)"; then \
+ if test -n "$(HDF5_DRIVER)"; then \
+ echo "Virtual file driver (VFD): $(HDF5_DRIVER)" | tee -a $${log}; \
+ fi; \
+ else \
+ if test -n "$(HDF5_DRIVER)"; then \
+ echo "Virtual file driver (VFD): $(HDF5_DRIVER)" >> $${log}; \
+ fi; \
+ fi; \
+ if test -n "$(REALTIMEOUTPUT)"; then \
echo "============================" | tee -a $${log}; \
else \
echo "============================" >> $${log}; \
diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c
index 9ca077c..26323d7 100644
--- a/testpar/t_bigio.c
+++ b/testpar/t_bigio.c
@@ -14,7 +14,7 @@ const char *FILENAME[2]={ "bigio_test.h5",
/* Define some handy debugging shorthands, routines, ... */
/* debugging tools */
-#define MAINPROCESS (!mpi_rank) /* define process 0 as main process */
+#define MAIN_PROCESS (mpi_rank_g == 0) /* define process 0 as main process */
/* Constants definitions */
#define RANK 2
@@ -40,16 +40,15 @@ typedef hsize_t B_DATATYPE;
int facc_type = FACC_MPIO; /*Test file access type */
int dxfer_coll_type = DXFER_COLLECTIVE_IO;
-size_t bigcount = DXFER_BIGCOUNT;
+size_t bigcount = (size_t)DXFER_BIGCOUNT;
int nerrors = 0;
-int mpi_size, mpi_rank;
+static int mpi_size_g, mpi_rank_g;
hsize_t space_dim1 = SPACE_DIM1 * 256; // 4096
hsize_t space_dim2 = SPACE_DIM2;
static void coll_chunktest(const char* filename, int chunk_factor, int select_factor,
int api_option, int file_selection, int mem_selection, int mode);
-hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type);
/*
* Setup the coordinates for point selection.
@@ -246,7 +245,7 @@ ccslab_set(int mpi_rank,
stride[1] = 1;
count[0] = space_dim1;
count[1] = space_dim2;
- start[0] = mpi_rank*count[0];
+ start[0] = (hsize_t)mpi_rank*count[0];
start[1] = 0;
break;
@@ -255,11 +254,11 @@ ccslab_set(int mpi_rank,
/* Each process takes several disjoint blocks. */
block[0] = 1;
block[1] = 1;
- stride[0] = 3;
- stride[1] = 3;
- count[0] = space_dim1/(stride[0]*block[0]);
- count[1] = (space_dim2)/(stride[1]*block[1]);
- start[0] = space_dim1*mpi_rank;
+ stride[0] = 3;
+ stride[1] = 3;
+ count[0] = space_dim1/(stride[0]*block[0]);
+ count[1] = (space_dim2)/(stride[1]*block[1]);
+ start[0] = space_dim1*(hsize_t)mpi_rank;
start[1] = 0;
break;
@@ -273,7 +272,7 @@ ccslab_set(int mpi_rank,
stride[1] = 1;
count[0] = ((mpi_rank >= MAX(1,(mpi_size-2)))?0:space_dim1);
count[1] = space_dim2;
- start[0] = mpi_rank*count[0];
+ start[0] = (hsize_t)mpi_rank*count[0];
start[1] = 0;
break;
@@ -284,14 +283,14 @@ ccslab_set(int mpi_rank,
half of the domain. */
block[0] = 1;
- count[0] = 2;
- stride[0] = space_dim1*mpi_size/4+1;
+ count[0] = 2;
+ stride[0] = (hsize_t)(space_dim1*(hsize_t)mpi_size/4+1);
block[1] = space_dim2;
count[1] = 1;
start[1] = 0;
stride[1] = 1;
- if((mpi_rank *3)<(mpi_size*2)) start[0] = mpi_rank;
- else start[0] = 1 + space_dim1*mpi_size/2 + (mpi_rank-2*mpi_size/3);
+ if((mpi_rank *3)<(mpi_size*2)) start[0] = (hsize_t)mpi_rank;
+ else start[0] = 1 + space_dim1*(hsize_t)mpi_size/2 + (hsize_t)(mpi_rank-2*mpi_size/3);
break;
case BYROW_SELECTINCHUNK:
@@ -299,18 +298,18 @@ ccslab_set(int mpi_rank,
block[0] = 1;
count[0] = 1;
- start[0] = mpi_rank*space_dim1;
+ start[0] = (hsize_t)mpi_rank*space_dim1;
stride[0]= 1;
- block[1] = space_dim2;
- count[1] = 1;
- stride[1]= 1;
- start[1] = 0;
+ block[1] = space_dim2;
+ count[1] = 1;
+ stride[1]= 1;
+ start[1] = 0;
break;
default:
/* Unknown mode. Set it to cover the whole dataset. */
- block[0] = space_dim1*mpi_size;
+ block[0] = space_dim1*(hsize_t)mpi_size;
block[1] = space_dim2;
stride[0] = block[0];
stride[1] = block[1];
@@ -478,75 +477,72 @@ static void
dataset_big_write(void)
{
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
hid_t dataset;
- hid_t datatype; /* Datatype ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK],stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
hsize_t *coords = NULL;
- int i;
- herr_t ret; /* Generic return value */
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hsize_t h;
+ herr_t ret; /* Generic return value */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
size_t num_points;
B_DATATYPE * wdata;
/* allocate memory for data buffer */
wdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE));
- VRFY((wdata != NULL), "wdata malloc succeeded");
+ VRFY_G((wdata != NULL), "wdata malloc succeeded");
/* setup file access template */
acc_tpl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((acc_tpl >= 0), "H5P_FILE_ACCESS");
+ VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS");
H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL);
/* create the file collectively */
fid = H5Fcreate(FILENAME[0], H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
- VRFY((fid >= 0), "H5Fcreate succeeded");
+ VRFY_G((fid >= 0), "H5Fcreate succeeded");
/* Release file-access template */
ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "");
+ VRFY_G((ret >= 0), "");
/* Each process takes a slabs of rows. */
- if (mpi_rank == 0)
+ if (mpi_rank_g == 0)
HDprintf("\nTesting Dataset1 write by ROW\n");
/* Create a large dataset */
dims[0] = bigcount;
- dims[1] = mpi_size;
+ dims[1] = (hsize_t)mpi_size_g;
sid = H5Screate_simple (RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
+ VRFY_G((sid >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate2(fid, DATASET1, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+ VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
H5Sclose(sid);
- block[0] = dims[0]/mpi_size;
+ block[0] = dims[0]/(hsize_t)mpi_size_g;
block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
- start[0] = mpi_rank*block[0];
+ start[0] = (hsize_t)mpi_rank_g*block[0];
start[1] = 0;
/* create a file dataspace independently */
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
mem_dataspace = H5Screate_simple (RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
+ VRFY_G((mem_dataspace >= 0), "");
/* fill the local slab with some trivial data */
fill_datasets(start, block, wdata);
@@ -558,17 +554,17 @@ dataset_big_write(void)
/* set up the collective transfer properties list */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ VRFY_G((ret>= 0),"set independent IO collectively succeeded");
}
ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
xfer_plist, wdata);
- VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+ VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
/* release all temporary handles. */
H5Sclose(file_dataspace);
@@ -576,40 +572,40 @@ dataset_big_write(void)
H5Pclose(xfer_plist);
ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
/* Each process takes a slabs of cols. */
- if (mpi_rank == 0)
+ if (mpi_rank_g == 0)
HDprintf("\nTesting Dataset2 write by COL\n");
/* Create a large dataset */
dims[0] = bigcount;
- dims[1] = mpi_size;
+ dims[1] = (hsize_t)mpi_size_g;
sid = H5Screate_simple (RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
+ VRFY_G((sid >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate2(fid, DATASET2, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+ VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
H5Sclose(sid);
block[0] = dims[0];
- block[1] = dims[1]/mpi_size;
+ block[1] = dims[1]/(hsize_t)mpi_size_g;
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
start[0] = 0;
- start[1] = mpi_rank*block[1];
+ start[1] = (hsize_t)mpi_rank_g*block[1];
/* create a file dataspace independently */
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
mem_dataspace = H5Screate_simple (RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
+ VRFY_G((mem_dataspace >= 0), "");
/* fill the local slab with some trivial data */
fill_datasets(start, block, wdata);
@@ -621,17 +617,17 @@ dataset_big_write(void)
/* set up the collective transfer properties list */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ VRFY_G((ret>= 0),"set independent IO collectively succeeded");
}
ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
xfer_plist, wdata);
- VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+ VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
/* release all temporary handles. */
H5Sclose(file_dataspace);
@@ -639,51 +635,51 @@ dataset_big_write(void)
H5Pclose(xfer_plist);
ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
/* ALL selection */
- if (mpi_rank == 0)
+ if (mpi_rank_g == 0)
HDprintf("\nTesting Dataset3 write select ALL proc 0, NONE others\n");
/* Create a large dataset */
dims[0] = bigcount;
dims[1] = 1;
sid = H5Screate_simple (RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
+ VRFY_G((sid >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate2(fid, DATASET3, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+ VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
H5Sclose(sid);
/* create a file dataspace independently */
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- if(mpi_rank == 0) {
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ if(mpi_rank_g == 0) {
ret = H5Sselect_all(file_dataspace);
- VRFY((ret >= 0), "H5Sset_all succeeded");
+ VRFY_G((ret >= 0), "H5Sset_all succeeded");
}
else {
ret = H5Sselect_none(file_dataspace);
- VRFY((ret >= 0), "H5Sset_none succeeded");
+ VRFY_G((ret >= 0), "H5Sset_none succeeded");
}
/* create a memory dataspace independently */
mem_dataspace = H5Screate_simple (RANK, dims, NULL);
- VRFY((mem_dataspace >= 0), "");
- if(mpi_rank != 0) {
+ VRFY_G((mem_dataspace >= 0), "");
+ if(mpi_rank_g != 0) {
ret = H5Sselect_none(mem_dataspace);
- VRFY((ret >= 0), "H5Sset_none succeeded");
+ VRFY_G((ret >= 0), "H5Sset_none succeeded");
}
/* set up the collective transfer properties list */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ VRFY_G((ret>= 0),"set independent IO collectively succeeded");
}
/* fill the local slab with some trivial data */
@@ -695,7 +691,7 @@ dataset_big_write(void)
ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
xfer_plist, wdata);
- VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+ VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
/* release all temporary handles. */
H5Sclose(file_dataspace);
@@ -703,19 +699,19 @@ dataset_big_write(void)
H5Pclose(xfer_plist);
ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
/* Point selection */
- if (mpi_rank == 0)
+ if (mpi_rank_g == 0)
HDprintf("\nTesting Dataset4 write point selection\n");
/* Create a large dataset */
dims[0] = bigcount;
- dims[1] = mpi_size * 4;
+ dims[1] = (hsize_t)(mpi_size_g * 4);
sid = H5Screate_simple (RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
+ VRFY_G((sid >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate2(fid, DATASET4, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+ VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
H5Sclose(sid);
block[0] = dims[0]/2;
@@ -725,19 +721,19 @@ dataset_big_write(void)
count[0] = 1;
count[1] = 1;
start[0] = 0;
- start[1] = dims[1]/mpi_size * mpi_rank;
+ start[1] = dims[1]/(hsize_t)mpi_size_g * (hsize_t)mpi_rank_g;
num_points = bigcount;
coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
- VRFY((coords != NULL), "coords malloc succeeded");
+ VRFY_G((coords != NULL), "coords malloc succeeded");
set_coords (start, count, stride, block, num_points, coords, IN_ORDER);
/* create a file dataspace */
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((ret >= 0), "H5Sselect_elements succeeded");
+ VRFY_G((ret >= 0), "H5Sselect_elements succeeded");
if(coords) free(coords);
@@ -754,21 +750,21 @@ dataset_big_write(void)
* appears to cause problems with 32 bit compilers.
*/
mem_dataspace = H5Screate_simple (1, dims, NULL);
- VRFY((mem_dataspace >= 0), "");
+ VRFY_G((mem_dataspace >= 0), "");
/* set up the collective transfer properties list */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ VRFY_G((ret>= 0),"set independent IO collectively succeeded");
}
ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
xfer_plist, wdata);
- VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+ VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
/* release all temporary handles. */
H5Sclose(file_dataspace);
@@ -776,7 +772,7 @@ dataset_big_write(void)
H5Pclose(xfer_plist);
ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
HDfree(wdata);
H5Fclose(fid);
@@ -806,60 +802,58 @@ dataset_big_read(void)
hsize_t start[RANK]; /* for hyperslab setting */
hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
hsize_t block[RANK]; /* for hyperslab setting */
- int i,j,k;
- hsize_t h;
size_t num_points;
hsize_t *coords = NULL;
herr_t ret; /* Generic return value */
/* allocate memory for data buffer */
rdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE));
- VRFY((rdata != NULL), "rdata malloc succeeded");
+ VRFY_G((rdata != NULL), "rdata malloc succeeded");
wdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE));
- VRFY((wdata != NULL), "wdata malloc succeeded");
+ VRFY_G((wdata != NULL), "wdata malloc succeeded");
HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE));
/* setup file access template */
acc_tpl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((acc_tpl >= 0), "H5P_FILE_ACCESS");
+ VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS");
H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL);
/* open the file collectively */
fid=H5Fopen(FILENAME[0],H5F_ACC_RDONLY,acc_tpl);
- VRFY((fid >= 0), "H5Fopen succeeded");
+ VRFY_G((fid >= 0), "H5Fopen succeeded");
/* Release file-access template */
ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "");
+ VRFY_G((ret >= 0), "");
- if (mpi_rank == 0)
+ if (mpi_rank_g == 0)
HDprintf("\nRead Testing Dataset1 by COL\n");
dataset = H5Dopen2(fid, DATASET1, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dopen2 succeeded");
+ VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
dims[0] = bigcount;
- dims[1] = mpi_size;
+ dims[1] = (hsize_t)mpi_size_g;
/* Each process takes a slabs of cols. */
block[0] = dims[0];
- block[1] = dims[1]/mpi_size;
+ block[1] = dims[1]/(hsize_t)mpi_size_g;
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
start[0] = 0;
- start[1] = mpi_rank*block[1];
+ start[1] = (hsize_t)mpi_rank_g*block[1];
/* create a file dataspace independently */
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
mem_dataspace = H5Screate_simple (RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
+ VRFY_G((mem_dataspace >= 0), "");
/* fill dataset with test data */
fill_datasets(start, block, wdata);
@@ -870,18 +864,18 @@ dataset_big_read(void)
/* set up the collective transfer properties list */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
+ VRFY_G((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ VRFY_G((ret>= 0),"set independent IO collectively succeeded");
}
/* read data collectively */
ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
xfer_plist, rdata);
- VRFY((ret >= 0), "H5Dread dataset1 succeeded");
+ VRFY_G((ret >= 0), "H5Dread dataset1 succeeded");
/* verify the read data with original expected data */
ret = verify_data(start, count, stride, block, rdata, wdata);
@@ -892,36 +886,36 @@ dataset_big_read(void)
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
- if (mpi_rank == 0)
+ if (mpi_rank_g == 0)
HDprintf("\nRead Testing Dataset2 by ROW\n");
HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE));
dataset = H5Dopen2(fid, DATASET2, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dopen2 succeeded");
+ VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
dims[0] = bigcount;
- dims[1] = mpi_size;
+ dims[1] = (hsize_t)mpi_size_g;
/* Each process takes a slabs of rows. */
- block[0] = dims[0]/mpi_size;
+ block[0] = dims[0]/(hsize_t)mpi_size_g;
block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
- start[0] = mpi_rank*block[0];
+ start[0] = (hsize_t)mpi_rank_g*block[0];
start[1] = 0;
/* create a file dataspace independently */
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
mem_dataspace = H5Screate_simple (RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
+ VRFY_G((mem_dataspace >= 0), "");
/* fill dataset with test data */
fill_datasets(start, block, wdata);
@@ -932,18 +926,18 @@ dataset_big_read(void)
/* set up the collective transfer properties list */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
+ VRFY_G((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ VRFY_G((ret>= 0),"set independent IO collectively succeeded");
}
/* read data collectively */
ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
xfer_plist, rdata);
- VRFY((ret >= 0), "H5Dread dataset2 succeeded");
+ VRFY_G((ret >= 0), "H5Dread dataset2 succeeded");
/* verify the read data with original expected data */
ret = verify_data(start, count, stride, block, rdata, wdata);
@@ -954,35 +948,35 @@ dataset_big_read(void)
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
- if (mpi_rank == 0)
+ if (mpi_rank_g == 0)
HDprintf("\nRead Testing Dataset3 read select ALL proc 0, NONE others\n");
HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE));
dataset = H5Dopen2(fid, DATASET3, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dopen2 succeeded");
+ VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
dims[0] = bigcount;
dims[1] = 1;
/* create a file dataspace independently */
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- if(mpi_rank == 0) {
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ if(mpi_rank_g == 0) {
ret = H5Sselect_all(file_dataspace);
- VRFY((ret >= 0), "H5Sset_all succeeded");
+ VRFY_G((ret >= 0), "H5Sset_all succeeded");
}
else {
ret = H5Sselect_none(file_dataspace);
- VRFY((ret >= 0), "H5Sset_none succeeded");
+ VRFY_G((ret >= 0), "H5Sset_none succeeded");
}
/* create a memory dataspace independently */
mem_dataspace = H5Screate_simple (RANK, dims, NULL);
- VRFY((mem_dataspace >= 0), "");
- if(mpi_rank != 0) {
+ VRFY_G((mem_dataspace >= 0), "");
+ if(mpi_rank_g != 0) {
ret = H5Sselect_none(mem_dataspace);
- VRFY((ret >= 0), "H5Sset_none succeeded");
+ VRFY_G((ret >= 0), "H5Sset_none succeeded");
}
/* fill dataset with test data */
@@ -994,20 +988,20 @@ dataset_big_read(void)
/* set up the collective transfer properties list */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
+ VRFY_G((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ VRFY_G((ret>= 0),"set independent IO collectively succeeded");
}
/* read data collectively */
ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
xfer_plist, rdata);
- VRFY((ret >= 0), "H5Dread dataset3 succeeded");
+ VRFY_G((ret >= 0), "H5Dread dataset3 succeeded");
- if(mpi_rank == 0) {
+ if(mpi_rank_g == 0) {
/* verify the read data with original expected data */
ret = verify_data(start, count, stride, block, rdata, wdata);
if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
@@ -1018,15 +1012,15 @@ dataset_big_read(void)
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
- if (mpi_rank == 0)
+ if (mpi_rank_g == 0)
HDprintf("\nRead Testing Dataset4 with Point selection\n");
dataset = H5Dopen2(fid, DATASET4, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dopen2 succeeded");
+ VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
dims[0] = bigcount;
- dims[1] = mpi_size * 4;
+ dims[1] = (hsize_t)(mpi_size_g * 4);
block[0] = dims[0]/2;
block[1] = 2;
@@ -1035,7 +1029,7 @@ dataset_big_read(void)
count[0] = 1;
count[1] = 1;
start[0] = 0;
- start[1] = dims[1]/mpi_size * mpi_rank;
+ start[1] = dims[1]/(hsize_t)mpi_size_g * (hsize_t)mpi_rank_g;
fill_datasets(start, block, wdata);
MESG("data_array initialized");
@@ -1047,14 +1041,14 @@ dataset_big_read(void)
num_points = bigcount;
coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
- VRFY((coords != NULL), "coords malloc succeeded");
+ VRFY_G((coords != NULL), "coords malloc succeeded");
set_coords (start, count, stride, block, num_points, coords, IN_ORDER);
/* create a file dataspace */
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((ret >= 0), "H5Sselect_elements succeeded");
+ VRFY_G((ret >= 0), "H5Sselect_elements succeeded");
if(coords) HDfree(coords);
@@ -1064,22 +1058,22 @@ dataset_big_read(void)
* appears to cause problems with 32 bit compilers.
*/
mem_dataspace = H5Screate_simple (1, dims, NULL);
- VRFY((mem_dataspace >= 0), "");
+ VRFY_G((mem_dataspace >= 0), "");
/* set up the collective transfer properties list */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
+ VRFY_G((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ VRFY_G((ret>= 0),"set independent IO collectively succeeded");
}
/* read data collectively */
ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
xfer_plist, rdata);
- VRFY((ret >= 0), "H5Dread dataset1 succeeded");
+ VRFY_G((ret >= 0), "H5Dread dataset1 succeeded");
ret = verify_data(start, count, stride, block, rdata, wdata);
if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
@@ -1089,7 +1083,7 @@ dataset_big_read(void)
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
HDfree(wdata);
HDfree(rdata);
@@ -1110,7 +1104,7 @@ dataset_big_read(void)
if (xfer_plist != -1) H5Pclose(xfer_plist);
if (dataset != -1) {
ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
}
H5Fclose(fid);
@@ -1135,7 +1129,7 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
ret_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
+ VRFY_G((ret_pl >= 0), "H5P_FILE_ACCESS");
if (l_facc_type == FACC_DEFAULT)
return (ret_pl);
@@ -1143,11 +1137,11 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
if (l_facc_type == FACC_MPIO){
/* set Parallel access with communicator */
ret = H5Pset_fapl_mpio(ret_pl, comm, info);
- VRFY((ret >= 0), "");
+ VRFY_G((ret >= 0), "");
ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
- VRFY((ret >= 0), "");
+ VRFY_G((ret >= 0), "");
ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
- VRFY((ret >= 0), "");
+ VRFY_G((ret >= 0), "");
return(ret_pl);
}
@@ -1155,17 +1149,17 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
hid_t mpio_pl;
mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((mpio_pl >= 0), "");
+ VRFY_G((mpio_pl >= 0), "");
/* set Parallel access with communicator */
ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
- VRFY((ret >= 0), "");
+ VRFY_G((ret >= 0), "");
/* setup file access template */
ret_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((ret_pl >= 0), "");
+ VRFY_G((ret_pl >= 0), "");
/* set Parallel access with communicator */
ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
- VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
+ VRFY_G((ret >= 0), "H5Pset_fapl_split succeeded");
H5Pclose(mpio_pl);
return(ret_pl);
}
@@ -1214,7 +1208,7 @@ void
coll_chunk1(void)
{
const char *filename = FILENAME[0];
- if (mpi_rank == 0)
+ if (mpi_rank_g == 0)
HDprintf("coll_chunk1\n");
coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
@@ -1268,7 +1262,7 @@ void
coll_chunk2(void)
{
const char *filename = FILENAME[0];
- if (mpi_rank == 0)
+ if (mpi_rank_g == 0)
HDprintf("coll_chunk2\n");
coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
@@ -1323,18 +1317,18 @@ void
coll_chunk3(void)
{
const char *filename = FILENAME[0];
- if (mpi_rank == 0)
+ if (mpi_rank_g == 0)
HDprintf("coll_chunk3\n");
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
}
@@ -1395,34 +1389,33 @@ coll_chunktest(const char* filename,
size_t num_points; /* for point selection */
hsize_t *coords = NULL; /* for point selection */
- int i;
/* Create the data space */
acc_plist = create_faccess_plist(comm,info,facc_type);
- VRFY((acc_plist >= 0),"");
+ VRFY_G((acc_plist >= 0),"");
file = H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_plist);
- VRFY((file >= 0),"H5Fcreate succeeded");
+ VRFY_G((file >= 0),"H5Fcreate succeeded");
status = H5Pclose(acc_plist);
- VRFY((status >= 0),"");
+ VRFY_G((status >= 0),"");
/* setup dimensionality object */
- dims[0] = space_dim1*mpi_size;
+ dims[0] = space_dim1*(hsize_t)mpi_size_g;
dims[1] = space_dim2;
/* allocate memory for data buffer */
data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+ VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded");
/* set up dimensions of the slab this process accesses */
- ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
+ ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor);
/* set up the coords array selection */
num_points = block[0] * block[1] * count[0] * count[1];
coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
- VRFY((coords != NULL), "coords malloc succeeded");
+ VRFY_G((coords != NULL), "coords malloc succeeded");
point_set(start, count, stride, block, num_points, coords, mode);
/* Warning: H5Screate_simple requires an array of hsize_t elements
@@ -1430,36 +1423,36 @@ coll_chunktest(const char* filename,
* appears to cause problems with 32 bit compilers.
*/
file_dataspace = H5Screate_simple(2, dims, NULL);
- VRFY((file_dataspace >= 0), "file dataspace created succeeded");
+ VRFY_G((file_dataspace >= 0), "file dataspace created succeeded");
if(ALL != mem_selection) {
mem_dataspace = H5Screate_simple(2, dims, NULL);
- VRFY((mem_dataspace >= 0), "mem dataspace created succeeded");
+ VRFY_G((mem_dataspace >= 0), "mem dataspace created succeeded");
}
else {
/* Putting the warning about H5Screate_simple (above) into practice... */
hsize_t dsdims[1] = {num_points};
mem_dataspace = H5Screate_simple (1, dsdims, NULL);
- VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded");
}
crp_plist = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((crp_plist >= 0),"");
+ VRFY_G((crp_plist >= 0),"");
/* Set up chunk information. */
- chunk_dims[0] = dims[0]/chunk_factor;
+ chunk_dims[0] = dims[0]/(hsize_t)chunk_factor;
/* to decrease the testing time, maintain bigger chunk size */
(chunk_factor == 1) ? (chunk_dims[1] = space_dim2) : (chunk_dims[1] = space_dim2/2);
status = H5Pset_chunk(crp_plist, 2, chunk_dims);
- VRFY((status >= 0),"chunk creation property list succeeded");
+ VRFY_G((status >= 0),"chunk creation property list succeeded");
dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT,
file_dataspace, H5P_DEFAULT, crp_plist, H5P_DEFAULT);
- VRFY((dataset >= 0),"dataset created succeeded");
+ VRFY_G((dataset >= 0),"dataset created succeeded");
status = H5Pclose(crp_plist);
- VRFY((status >= 0), "");
+ VRFY_G((status >= 0), "");
/*put some trivial data in the data array */
ccdataset_fill(start, stride, count,block, data_array1, mem_selection);
@@ -1469,93 +1462,93 @@ coll_chunktest(const char* filename,
switch (file_selection) {
case HYPER:
status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((status >= 0),"hyperslab selection succeeded");
+ VRFY_G((status >= 0),"hyperslab selection succeeded");
break;
case POINT:
if (num_points) {
status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((status >= 0),"Element selection succeeded");
+ VRFY_G((status >= 0),"Element selection succeeded");
}
else {
status = H5Sselect_none(file_dataspace);
- VRFY((status >= 0),"none selection succeeded");
+ VRFY_G((status >= 0),"none selection succeeded");
}
break;
case ALL:
status = H5Sselect_all(file_dataspace);
- VRFY((status >= 0), "H5Sselect_all succeeded");
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
break;
}
switch (mem_selection) {
case HYPER:
status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((status >= 0),"hyperslab selection succeeded");
+ VRFY_G((status >= 0),"hyperslab selection succeeded");
break;
case POINT:
if (num_points) {
status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((status >= 0),"Element selection succeeded");
+ VRFY_G((status >= 0),"Element selection succeeded");
}
else {
status = H5Sselect_none(mem_dataspace);
- VRFY((status >= 0),"none selection succeeded");
+ VRFY_G((status >= 0),"none selection succeeded");
}
break;
case ALL:
status = H5Sselect_all(mem_dataspace);
- VRFY((status >= 0), "H5Sselect_all succeeded");
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
break;
}
/* set up the collective transfer property list */
xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
+ VRFY_G((xfer_plist >= 0), "");
status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((status>= 0),"MPIO collective transfer property succeeded");
+ VRFY_G((status>= 0),"MPIO collective transfer property succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((status>= 0),"set independent IO collectively succeeded");
+ VRFY_G((status>= 0),"set independent IO collectively succeeded");
}
switch(api_option){
case API_LINK_HARD:
status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_ONE_IO);
- VRFY((status>= 0),"collective chunk optimization succeeded");
+ VRFY_G((status>= 0),"collective chunk optimization succeeded");
break;
case API_MULTI_HARD:
status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_MULTI_IO);
- VRFY((status>= 0),"collective chunk optimization succeeded ");
+ VRFY_G((status>= 0),"collective chunk optimization succeeded ");
break;
case API_LINK_TRUE:
status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,2);
- VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
+ VRFY_G((status>= 0),"collective chunk optimization set chunk number succeeded");
break;
case API_LINK_FALSE:
status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,6);
- VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
+ VRFY_G((status>= 0),"collective chunk optimization set chunk number succeeded");
break;
case API_MULTI_COLL:
status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
- VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
+ VRFY_G((status>= 0),"collective chunk optimization set chunk number succeeded");
status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,50);
- VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded");
+ VRFY_G((status>= 0),"collective chunk optimization set chunk ratio succeeded");
break;
case API_MULTI_IND:
status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
- VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
+ VRFY_G((status>= 0),"collective chunk optimization set chunk number succeeded");
status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,100);
- VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded");
+ VRFY_G((status>= 0),"collective chunk optimization set chunk ratio succeeded");
break;
default:
@@ -1569,42 +1562,42 @@ coll_chunktest(const char* filename,
prop_value = H5D_XFER_COLL_CHUNK_DEF;
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
+ VRFY_G((status >= 0),"testing property list inserted succeeded");
break;
case API_MULTI_HARD:
prop_value = H5D_XFER_COLL_CHUNK_DEF;
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
+ VRFY_G((status >= 0),"testing property list inserted succeeded");
break;
case API_LINK_TRUE:
prop_value = H5D_XFER_COLL_CHUNK_DEF;
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
+ VRFY_G((status >= 0),"testing property list inserted succeeded");
break;
case API_LINK_FALSE:
prop_value = H5D_XFER_COLL_CHUNK_DEF;
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
+ VRFY_G((status >= 0),"testing property list inserted succeeded");
break;
case API_MULTI_COLL:
prop_value = H5D_XFER_COLL_CHUNK_DEF;
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
+ VRFY_G((status >= 0),"testing property list inserted succeeded");
break;
case API_MULTI_IND:
prop_value = H5D_XFER_COLL_CHUNK_DEF;
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
+ VRFY_G((status >= 0),"testing property list inserted succeeded");
break;
default:
@@ -1616,45 +1609,45 @@ coll_chunktest(const char* filename,
/* write data collectively */
status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
xfer_plist, data_array1);
- VRFY((status >= 0),"dataset write succeeded");
+ VRFY_G((status >= 0),"dataset write succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
if(facc_type == FACC_MPIO) {
switch(api_option){
case API_LINK_HARD:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_HARD_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO directly succeeded");
+ VRFY_G((status >= 0),"testing property list get succeeded");
+ VRFY_G((prop_value == 0),"API to set LINK COLLECTIVE IO directly succeeded");
break;
case API_MULTI_HARD:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
+ VRFY_G((status >= 0),"testing property list get succeeded");
+ VRFY_G((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
break;
case API_LINK_TRUE:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO succeeded");
+ VRFY_G((status >= 0),"testing property list get succeeded");
+ VRFY_G((prop_value == 0),"API to set LINK COLLECTIVE IO succeeded");
break;
case API_LINK_FALSE:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set LINK IO transferring to multi-chunk IO succeeded");
+ VRFY_G((status >= 0),"testing property list get succeeded");
+ VRFY_G((prop_value == 0),"API to set LINK IO transferring to multi-chunk IO succeeded");
break;
case API_MULTI_COLL:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
+ VRFY_G((status >= 0),"testing property list get succeeded");
+ VRFY_G((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
break;
case API_MULTI_IND:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set MULTI-CHUNK IO transferring to independent IO succeeded");
+ VRFY_G((status >= 0),"testing property list get succeeded");
+ VRFY_G((prop_value == 0),"API to set MULTI-CHUNK IO transferring to independent IO succeeded");
break;
default:
@@ -1664,20 +1657,20 @@ coll_chunktest(const char* filename,
#endif
status = H5Dclose(dataset);
- VRFY((status >= 0),"");
+ VRFY_G((status >= 0),"");
status = H5Pclose(xfer_plist);
- VRFY((status >= 0),"property list closed");
+ VRFY_G((status >= 0),"property list closed");
status = H5Sclose(file_dataspace);
- VRFY((status >= 0),"");
+ VRFY_G((status >= 0),"");
status = H5Sclose(mem_dataspace);
- VRFY((status >= 0),"");
+ VRFY_G((status >= 0),"");
status = H5Fclose(file);
- VRFY((status >= 0),"");
+ VRFY_G((status >= 0),"");
if (data_array1) HDfree(data_array1);
@@ -1685,35 +1678,35 @@ coll_chunktest(const char* filename,
/* allocate memory for data buffer */
data_array1 = (int *)HDmalloc(dims[0]*dims[1]*sizeof(int));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+ VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded");
/* allocate memory for data buffer */
data_origin1 = (int *)HDmalloc(dims[0]*dims[1]*sizeof(int));
- VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
+ VRFY_G((data_origin1 != NULL), "data_origin1 malloc succeeded");
acc_plist = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_plist >= 0),"MPIO creation property list succeeded");
+ VRFY_G((acc_plist >= 0),"MPIO creation property list succeeded");
file = H5Fopen(FILENAME[0],H5F_ACC_RDONLY,acc_plist);
- VRFY((file >= 0),"H5Fcreate succeeded");
+ VRFY_G((file >= 0),"H5Fcreate succeeded");
status = H5Pclose(acc_plist);
- VRFY((status >= 0),"");
+ VRFY_G((status >= 0),"");
/* open the collective dataset*/
dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT);
- VRFY((dataset >= 0), "");
+ VRFY_G((dataset >= 0), "");
/* set up dimensions of the slab this process accesses */
- ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
+ ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor);
/* obtain the file and mem dataspace*/
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "");
+ VRFY_G((file_dataspace >= 0), "");
if (ALL != mem_selection) {
mem_dataspace = H5Dget_space (dataset);
- VRFY((mem_dataspace >= 0), "");
+ VRFY_G((mem_dataspace >= 0), "");
}
else {
/* Warning: H5Screate_simple requires an array of hsize_t elements
@@ -1722,92 +1715,92 @@ coll_chunktest(const char* filename,
*/
hsize_t dsdims[1] = {num_points};
mem_dataspace = H5Screate_simple (1, dsdims, NULL);
- VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded");
}
switch (file_selection) {
case HYPER:
status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((status >= 0),"hyperslab selection succeeded");
+ VRFY_G((status >= 0),"hyperslab selection succeeded");
break;
case POINT:
if (num_points) {
status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((status >= 0),"Element selection succeeded");
+ VRFY_G((status >= 0),"Element selection succeeded");
}
else {
status = H5Sselect_none(file_dataspace);
- VRFY((status >= 0),"none selection succeeded");
+ VRFY_G((status >= 0),"none selection succeeded");
}
break;
case ALL:
status = H5Sselect_all(file_dataspace);
- VRFY((status >= 0), "H5Sselect_all succeeded");
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
break;
}
switch (mem_selection) {
case HYPER:
status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((status >= 0),"hyperslab selection succeeded");
+ VRFY_G((status >= 0),"hyperslab selection succeeded");
break;
case POINT:
if (num_points) {
status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((status >= 0),"Element selection succeeded");
+ VRFY_G((status >= 0),"Element selection succeeded");
}
else {
status = H5Sselect_none(mem_dataspace);
- VRFY((status >= 0),"none selection succeeded");
+ VRFY_G((status >= 0),"none selection succeeded");
}
break;
case ALL:
status = H5Sselect_all(mem_dataspace);
- VRFY((status >= 0), "H5Sselect_all succeeded");
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
break;
}
/* fill dataset with test data */
ccdataset_fill(start, stride,count,block, data_origin1, mem_selection);
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0),"");
+ VRFY_G((xfer_plist >= 0),"");
status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((status>= 0),"MPIO collective transfer property succeeded");
+ VRFY_G((status>= 0),"MPIO collective transfer property succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
status = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((status>= 0),"set independent IO collectively succeeded");
+ VRFY_G((status>= 0),"set independent IO collectively succeeded");
}
status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
xfer_plist, data_array1);
- VRFY((status >=0),"dataset read succeeded");
+ VRFY_G((status >=0),"dataset read succeeded");
/* verify the read data with original expected data */
status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection);
if (status) nerrors++;
status = H5Pclose(xfer_plist);
- VRFY((status >= 0),"property list closed");
+ VRFY_G((status >= 0),"property list closed");
/* close dataset collectively */
status=H5Dclose(dataset);
- VRFY((status >= 0), "H5Dclose");
+ VRFY_G((status >= 0), "H5Dclose");
/* release all IDs created */
status = H5Sclose(file_dataspace);
- VRFY((status >= 0),"H5Sclose");
+ VRFY_G((status >= 0),"H5Sclose");
status = H5Sclose(mem_dataspace);
- VRFY((status >= 0),"H5Sclose");
+ VRFY_G((status >= 0),"H5Sclose");
/* close the file collectively */
status = H5Fclose(file);
- VRFY((status >= 0),"H5Fclose");
+ VRFY_G((status >= 0),"H5Fclose");
/* release data buffers */
if(coords) HDfree(coords);
@@ -1873,7 +1866,7 @@ int main(int argc, char **argv)
int ExpressMode = 0;
hsize_t newsize = 1048576;
/* Set the bigio processing limit to be 'newsize' bytes */
- hsize_t oldsize = H5S_mpio_set_bigio_count(newsize);
+ hsize_t oldsize = H5_mpi_set_bigio_count(newsize);
/* Having set the bigio handling to a size that is managable,
* we'll set our 'bigcount' variable to be 2X that limit so
@@ -1885,8 +1878,8 @@ int main(int argc, char **argv)
}
MPI_Init(&argc, &argv);
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size_g);
+ MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank_g);
/* Attempt to turn off atexit post processing so that in case errors
* happen during the test and the process is aborted, it will not get
@@ -1900,7 +1893,7 @@ int main(int argc, char **argv)
/* set alarm. */
ALARM_ON;
- ExpressMode = do_express_test(mpi_rank);
+ ExpressMode = do_express_test(mpi_rank_g);
dataset_big_write();
MPI_Barrier(MPI_COMM_WORLD);
@@ -1909,7 +1902,7 @@ int main(int argc, char **argv)
MPI_Barrier(MPI_COMM_WORLD);
if (ExpressMode > 0) {
- if (mpi_rank == 0)
+ if (mpi_rank_g == 0)
HDprintf("***Express test mode on. Several tests are skipped\n");
}
else {
@@ -1923,7 +1916,7 @@ int main(int argc, char **argv)
/* turn off alarm */
ALARM_OFF;
- if (mpi_rank == 0)
+ if (mpi_rank_g == 0)
HDremove(FILENAME[0]);
/* close HDF5 library */
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index ff89ee4..4f0f554 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -24,6 +24,7 @@
#include "H5ACpkg.h"
#include "H5Cpkg.h"
+#include "H5CXprivate.h"
#include "H5Fpkg.h"
#include "H5Iprivate.h"
#include "H5MFprivate.h"
@@ -1619,7 +1620,7 @@ serve_read_request(struct mssg_t * mssg_ptr)
reply.base_addr = data[target_index].base_addr;
reply.len = data[target_index].len;
reply.ver = data[target_index].ver;
- reply.count = 0;
+ reply.count = 0;
reply.magic = MSSG_MAGIC;
/* and update the counters */
@@ -1760,7 +1761,7 @@ serve_write_request(struct mssg_t * mssg_ptr)
hbool_t report_mssg = FALSE;
hbool_t success = TRUE;
int target_index;
- int new_ver_num;
+ int new_ver_num = 0;
haddr_t target_addr;
#if DO_WRITE_REQ_ACK
struct mssg_t reply;
@@ -1925,7 +1926,7 @@ serve_total_writes_request(struct mssg_t * mssg_ptr)
reply.base_addr = 0;
reply.len = 0;
reply.ver = 0;
- reply.count = total_writes;
+ reply.count = (unsigned)total_writes;
reply.magic = MSSG_MAGIC;
}
@@ -2004,7 +2005,7 @@ serve_total_reads_request(struct mssg_t * mssg_ptr)
reply.base_addr = 0;
reply.len = 0;
reply.ver = 0;
- reply.count = total_reads;
+ reply.count = (unsigned)total_reads;
reply.magic = MSSG_MAGIC;
}
@@ -2098,7 +2099,7 @@ serve_entry_writes_request(struct mssg_t * mssg_ptr)
reply.base_addr = target_addr;
reply.len = 0;
reply.ver = 0;
- reply.count = data[target_index].writes;
+ reply.count = (unsigned)data[target_index].writes;
reply.magic = MSSG_MAGIC;
}
}
@@ -2195,7 +2196,7 @@ serve_entry_reads_request(struct mssg_t * mssg_ptr)
reply.base_addr = target_addr;
reply.len = 0;
reply.ver = 0;
- reply.count = (long)(data[target_index].reads);
+ reply.count = (unsigned)(data[target_index].reads);
reply.magic = MSSG_MAGIC;
}
}
@@ -4611,7 +4612,7 @@ verify_entry_reads(haddr_t addr,
int expected_entry_reads)
{
hbool_t success = TRUE;
- int reported_entry_reads;
+ int reported_entry_reads = 0;
struct mssg_t mssg;
if ( success ) {
@@ -4718,7 +4719,7 @@ verify_entry_writes(haddr_t addr,
int expected_entry_writes)
{
hbool_t success = TRUE;
- int reported_entry_writes;
+ int reported_entry_writes = 0;
struct mssg_t mssg;
if ( success ) {
@@ -7288,7 +7289,7 @@ smoke_check_6(int metadata_write_strategy)
}
/* Make sure coll entries do not cross the 80% threshold */
- HDassert(cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
+ HDassert((double)cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
}
/* flush the file */
@@ -7318,7 +7319,7 @@ smoke_check_6(int metadata_write_strategy)
}
/* Make sure coll entries do not cross the 80% threshold */
- HDassert(cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
+ HDassert((double)cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
}
/* protect the other half independently */
@@ -7339,7 +7340,7 @@ smoke_check_6(int metadata_write_strategy)
}
/* Make sure coll entries do not cross the 80% threshold */
- HDassert(cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
+ HDassert((double)cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
}
for ( i = 0; i < (virt_num_data_entries); i++ )
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index 40cc1ca..c092a9c 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -16,7 +16,7 @@
#define HYPER 1
#define POINT 2
-#define ALL 3
+#define ALL 3
/* some commonly used routines for collective chunk IO tests*/
diff --git a/testpar/t_coll_md_read.c b/testpar/t_coll_md_read.c
index 912388c..0485bab 100644
--- a/testpar/t_coll_md_read.c
+++ b/testpar/t_coll_md_read.c
@@ -49,7 +49,7 @@
* in strictly non-decreasing order of chunk address. For version 1 and 2 B-trees,
* this caused the non-participating ranks to issue a collective MPI_Bcast() call
* which the other ranks did not issue, thus causing a hang.
- *
+ *
* However, since these ranks are not actually reading/writing anything, this call
* can simply be removed and the address used for the read/write can be set to an
* arbitrary number (0 was chosen).
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index 6c91a41..832a47f 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -3806,7 +3806,7 @@ test_no_collective_cause_mode_filter(int selection_mode)
uint32_t no_collective_cause_global_expected = 0;
const char * filename;
- const char * test_name;
+ const char * test_name = "I/O";
hbool_t is_chunked=1;
int mpi_size = -1;
int mpi_rank = -1;
diff --git a/testpar/t_file_image.c b/testpar/t_file_image.c
index 62db11a..81bb7c2 100644
--- a/testpar/t_file_image.c
+++ b/testpar/t_file_image.c
@@ -21,11 +21,11 @@
*
* Process zero:
*
- * 1) Creates a core file with an integer vector data set of
- * length n (= mpi_size),
+ * 1) Creates a core file with an integer vector data set of
+ * length n (= mpi_size),
*
- * 2) Initializes the vector to zero in * location 0, and to -1
- * everywhere else.
+ * 2) Initializes the vector to zero in * location 0, and to -1
+ * everywhere else.
*
* 3) Flushes the core file, and gets an image of it. Closes
* the core file.
@@ -35,7 +35,7 @@
* 5) Awaits receipt on a file image from process n-1.
*
* 6) opens the image received from process n-1, verifies that
- * it contains a vector of length equal to mpi_size, and
+ * it contains a vector of length equal to mpi_size, and
* that the vector contains (0, 1, 2, ... n-1)
*
* 7) closes the core file and exits.
@@ -45,7 +45,7 @@
* 1) Await receipt of file image from process (i - 1).
*
* 2) Open the image with the core file driver, verify that i
- * contains a vector v of length, and that v[j] = j for
+ * contains a vector v of length, and that v[j] = j for
* 0 <= j < i, and that v[j] == -1 for i <= j < n
*
* 3) Set v[i] = i in the core file.
@@ -87,13 +87,13 @@ file_image_daisy_chain_test(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* setup file name */
- HDsnprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5",
+ HDsnprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5",
(int)mpi_rank);
if(mpi_rank == 0) {
-
- /* 1) Creates a core file with an integer vector data set
- * of length mpi_size,
+
+ /* 1) Creates a core file with an integer vector data set
+ * of length mpi_size,
*/
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl_id >= 0), "creating fapl");
@@ -111,10 +111,10 @@ file_image_daisy_chain_test(void)
dset_id = H5Dcreate2(file_id, "v", H5T_NATIVE_INT, space_id,
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "created data set");
-
- /* 2) Initialize the vector to zero in location 0, and
- * to -1 everywhere else.
+
+ /* 2) Initialize the vector to zero in location 0, and
+ * to -1 everywhere else.
*/
vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
@@ -131,7 +131,7 @@ file_image_daisy_chain_test(void)
HDfree(vector_ptr);
vector_ptr = NULL;
-
+
/* 3) Flush the core file, and get an image of it. Close
* the core file.
*/
@@ -159,14 +159,14 @@ file_image_daisy_chain_test(void)
err = H5Pclose(fapl_id);
VRFY((err >= 0), "closed fapl(1).");
-
+
/* 4) Send the image to process 1. */
- mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t),
+ mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t),
MPI_BYTE, 1, 0, MPI_COMM_WORLD);
VRFY((mpi_result == MPI_SUCCESS), "sent image size to process 1");
- mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len,
+ mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len,
MPI_BYTE, 1, 0, MPI_COMM_WORLD);
VRFY((mpi_result == MPI_SUCCESS), "sent image to process 1");
@@ -190,9 +190,9 @@ file_image_daisy_chain_test(void)
&rcvstat);
VRFY((mpi_result == MPI_SUCCESS), \
"received file image from process n-1");
-
+
/* 6) open the image received from process n-1, verify that
- * it contains a vector of length equal to mpi_size, and
+ * it contains a vector of length equal to mpi_size, and
* that the vector contains (0, 1, 2, ... n-1).
*/
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -229,7 +229,7 @@ file_image_daisy_chain_test(void)
vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
VRFY((vector_ptr != NULL), "allocated in memory rep of vector");
- err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
H5P_DEFAULT, (void *)vector_ptr);
VRFY((err >= 0), "read received vector.");
@@ -238,7 +238,7 @@ file_image_daisy_chain_test(void)
if(vector_ptr[i] != i)
vector_ok = FALSE;
VRFY((vector_ok), "verified received vector.");
-
+
HDfree(vector_ptr);
vector_ptr = NULL;
@@ -276,9 +276,9 @@ file_image_daisy_chain_test(void)
&rcvstat);
VRFY((mpi_result == MPI_SUCCESS), \
"received file image from process mpi_rank-1");
-
+
/* 2) Open the image with the core file driver, verify that it
- * contains a vector v of length, and that v[j] = j for
+ * contains a vector v of length, and that v[j] = j for
* 0 <= j < i, and that v[j] == -1 for i <= j < n
*/
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -316,7 +316,7 @@ file_image_daisy_chain_test(void)
vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
VRFY((vector_ptr != NULL), "allocated in memory rep of vector");
- err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
H5P_DEFAULT, (void *)vector_ptr);
VRFY((err >= 0), "read received vector.");
@@ -331,7 +331,7 @@ file_image_daisy_chain_test(void)
}
}
VRFY((vector_ok), "verified received vector.");
-
+
/* 3) Set v[i] = i in the core file. */
@@ -344,7 +344,7 @@ file_image_daisy_chain_test(void)
HDfree(vector_ptr);
vector_ptr = NULL;
-
+
/* 4) Flush the core file and send it to process (mpi_rank + 1) % n. */
err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
@@ -359,14 +359,14 @@ file_image_daisy_chain_test(void)
bytes_read = H5Fget_file_image(file_id, image_ptr, (size_t)image_len);
VRFY(bytes_read == image_len, "wrote file into image buffer");
- mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t),
- MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
+ mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t),
+ MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
MPI_COMM_WORLD);
VRFY((mpi_result == MPI_SUCCESS), \
"sent image size to process (mpi_rank + 1) % mpi_size");
- mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len,
- MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
+ mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len,
+ MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
MPI_COMM_WORLD);
VRFY((mpi_result == MPI_SUCCESS), \
"sent image to process (mpi_rank + 1) % mpi_size");
@@ -374,7 +374,7 @@ file_image_daisy_chain_test(void)
HDfree(image_ptr);
image_ptr = NULL;
image_len = 0;
-
+
/* 5) close the core file and exit. */
err = H5Sclose(space_id);
diff --git a/testpar/t_init_term.c b/testpar/t_init_term.c
index 933fbd2..0e40fe4 100644
--- a/testpar/t_init_term.c
+++ b/testpar/t_init_term.c
@@ -37,7 +37,7 @@ main (int argc, char **argv)
/* Initialize and finalize MPI */
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
+ MPI_Comm_rank(comm, &mpi_rank);
if(MAINPROCESS)
TESTING("Usage of Serial HDF5 after MPI_Finalize() is called");
@@ -65,7 +65,7 @@ main (int argc, char **argv)
if(MAINPROCESS) {
if(0 == nerrors)
- PASSED()
+ PASSED();
else
H5_FAILED()
}
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index 63ac8d3..ed8867d 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -233,6 +233,9 @@ void compact_dataset(void)
char dname[]="dataset";
herr_t ret;
const char *filename;
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ hbool_t prop_value;
+#endif
size = get_size();
@@ -312,7 +315,6 @@ void compact_dataset(void)
VRFY((dataset >= 0), "H5Dopen2 succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- hbool_t prop_value;
prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
@@ -618,6 +620,9 @@ void dataset_fillvalue(void)
int acc, i, ii, j, k, l; /* Local index variables */
herr_t ret; /* Generic return value */
const char *filename;
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ hbool_t prop_value;
+#endif
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -662,7 +667,6 @@ void dataset_fillvalue(void)
VRFY((dxpl >= 0), "H5Pcreate succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- hbool_t prop_value;
prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
diff --git a/testpar/t_mpi.c b/testpar/t_mpi.c
index 890a918..8735cbb 100644
--- a/testpar/t_mpi.c
+++ b/testpar/t_mpi.c
@@ -303,7 +303,7 @@ static int test_mpio_gb_file(char *filename) {
mpi_rank, mpi_off, mpi_off);
/* set data to some trivial pattern for easy verification */
for (j = 0; j < MB; j++)
- *(buf + j) = i * mpi_size + mpi_rank;
+ *(buf + j) = (int8_t)(i * mpi_size + mpi_rank);
if (VERBOSE_MED)
HDfprintf(stdout,
"proc %d: writing %d bytes at offset %lld\n",
@@ -351,7 +351,7 @@ static int test_mpio_gb_file(char *filename) {
mrc = MPI_File_read_at(fh, mpi_off, buf, MB, MPI_BYTE,
&mpi_stat);
INFO((mrc == MPI_SUCCESS), "GB size file read");
- expected = i * mpi_size + (mpi_size - mpi_rank - 1);
+ expected = (int8_t)(i * mpi_size + (mpi_size - mpi_rank - 1));
vrfyerrs = 0;
for (j = 0; j < MB; j++) {
if ((*(buf + j) != expected)
@@ -597,7 +597,7 @@ static int test_mpio_1wMr(char *filename, int special_request) {
return 1;
};
for (i = 0; i < DIMSIZE; i++) {
- expect_val = irank * DIMSIZE + i;
+ expect_val = (uint8_t)(irank * DIMSIZE + i);
if (readdata[i] != expect_val) {
PRINTID;
HDprintf("read data[%d:%d] got %02x, expect %02x\n", irank, i,
@@ -697,7 +697,7 @@ static int test_mpio_derived_dtype(char *filename) {
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
retcode = 0;
for (i = 0; i < 3; i++)
- buf[i] = i + 1;
+ buf[i] = (char)(i + 1);
if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename,
MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh))
diff --git a/testpar/t_pflush2.c b/testpar/t_pflush2.c
index f58e5a5..f4589c8 100644
--- a/testpar/t_pflush2.c
+++ b/testpar/t_pflush2.c
@@ -189,7 +189,7 @@ main(int argc, char *argv[])
goto error;
}
else if(mpi_rank == 0) {
- PASSED()
+ PASSED();
}
/* Check the case where the file was not flushed. This should give an error
@@ -203,7 +203,7 @@ main(int argc, char *argv[])
h5_fixname(FILENAME[1], fapl_id2, name, sizeof(name));
if(check_test_file(name, fapl_id2)) {
if(mpi_rank == 0)
- PASSED()
+ PASSED();
}
else {
H5_FAILED()
diff --git a/testpar/t_pread.c b/testpar/t_pread.c
index 74feeb6..ba4165e 100644
--- a/testpar/t_pread.c
+++ b/testpar/t_pread.c
@@ -281,7 +281,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
/* create a chunked dataset */
chunk[0] = COUNT/8;
-
+
if ( pass ) {
if ( (dcpl_id = H5Pcreate (H5P_DATASET_CREATE)) < 0 ) {
pass = false;
@@ -295,9 +295,9 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
failure_mssg = "H5Pset_chunk() failed.\n";
}
}
-
+
if ( pass ) {
-
+
if ( (dset_id_ch = H5Dcreate2(file_id, "dataset0_chunked", H5T_NATIVE_FLOAT,
filespace, H5P_DEFAULT, dcpl_id,
H5P_DEFAULT)) < 0 ) {
@@ -319,7 +319,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
failure_mssg = "H5Pclose(dcpl_id) failed.\n";
}
}
-
+
if ( pass || (dset_id_ch != -1)) {
if ( H5Dclose(dset_id_ch) < 0 ) {
pass = false;
@@ -698,8 +698,8 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
data_slice = NULL;
}
- /*
- * Test reading proc0-read-and-bcast with sub-communicators
+ /*
+ * Test reading proc0-read-and-bcast with sub-communicators
*/
/* Don't test with more than LIMIT_NPROC processes to avoid memory issues */
@@ -798,7 +798,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
nextValue = 0;
else /* test 2 group 1 */
nextValue = (float)((hsize_t)( mpi_size / 2 )*count);
-
+
i = 0;
while ( ( pass ) && ( i < (hsize_t)dset_size ) ) {
/* what we really want is data_slice[i] != nextValue --
@@ -863,7 +863,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
nextValue = 0;
else /* test 2 group 1 */
nextValue = (float)((hsize_t)( mpi_size / 2 )*count);
-
+
i = 0;
while ( ( pass ) && ( i < (hsize_t)dset_size ) ) {
/* what we really want is data_slice[i] != nextValue --
@@ -893,8 +893,8 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
data_slice = NULL;
}
- /*
- * Read an H5S_ALL filespace into a hyperslab defined memory space
+ /*
+ * Read an H5S_ALL filespace into a hyperslab defined memory space
*/
if ( (data_slice = (float *)HDmalloc((size_t)(dset_size*2)*filetype_size)) == NULL ) {
@@ -979,14 +979,14 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
i++;
}
}
-
+
if ( pass || (memspace != -1) ) {
if ( H5Sclose(memspace) < 0 ) {
pass = false;
failure_mssg = "H5Sclose(memspace) failed.\n";
}
}
-
+
/* free data_slice if it has been allocated */
if ( data_slice != NULL ) {
HDfree(data_slice);
diff --git a/testpar/t_prestart.c b/testpar/t_prestart.c
index d75e627..069c1d4 100644
--- a/testpar/t_prestart.c
+++ b/testpar/t_prestart.c
@@ -47,11 +47,11 @@ main (int argc, char **argv)
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
+ MPI_Comm_rank(comm, &mpi_rank);
if(MAINPROCESS)
TESTING("proper shutdown of HDF5 library");
-
+
/* Set up file access property list with parallel I/O access */
fapl = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl >= 0), "H5Pcreate succeeded");
@@ -120,14 +120,14 @@ main (int argc, char **argv)
HDremove(filename);
/* release data buffers */
- if(data_array)
+ if(data_array)
HDfree(data_array);
nerrors += GetTestNumErrs();
if(MAINPROCESS) {
if(0 == nerrors)
- PASSED()
+ PASSED();
else
H5_FAILED()
}
diff --git a/testpar/t_prop.c b/testpar/t_prop.c
index 3f72db9..4fdcf47 100644
--- a/testpar/t_prop.c
+++ b/testpar/t_prop.c
@@ -97,7 +97,7 @@ test_plist_ed(void)
int mpi_size, mpi_rank, recv_proc;
- hsize_t chunk_size = 16384; /* chunk size */
+ hsize_t chunk_size = 16384; /* chunk size */
double fill = 2.7f; /* Fill value */
size_t nslots = 521*2;
size_t nbytes = 1048576 * 10;
@@ -165,16 +165,16 @@ test_plist_ed(void)
VRFY((ret>=0), "set fill-value succeeded");
max_size[0] = 100;
- ret = H5Pset_external(dcpl, "ext1.data", (off_t)0,
+ ret = H5Pset_external(dcpl, "ext1.data", (off_t)0,
(hsize_t)(max_size[0] * sizeof(int)/4));
VRFY((ret>=0), "set external succeeded");
- ret = H5Pset_external(dcpl, "ext2.data", (off_t)0,
+ ret = H5Pset_external(dcpl, "ext2.data", (off_t)0,
(hsize_t)(max_size[0] * sizeof(int)/4));
VRFY((ret>=0), "set external succeeded");
- ret = H5Pset_external(dcpl, "ext3.data", (off_t)0,
+ ret = H5Pset_external(dcpl, "ext3.data", (off_t)0,
(hsize_t)(max_size[0] * sizeof(int)/4));
VRFY((ret>=0), "set external succeeded");
- ret = H5Pset_external(dcpl, "ext4.data", (off_t)0,
+ ret = H5Pset_external(dcpl, "ext4.data", (off_t)0,
(hsize_t)(max_size[0] * sizeof(int)/4));
VRFY((ret>=0), "set external succeeded");
diff --git a/testpar/t_pshutdown.c b/testpar/t_pshutdown.c
index def7071..8d90a5a 100644
--- a/testpar/t_pshutdown.c
+++ b/testpar/t_pshutdown.c
@@ -51,11 +51,11 @@ main (int argc, char **argv)
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
+ MPI_Comm_rank(comm, &mpi_rank);
if(MAINPROCESS)
TESTING("proper shutdown of HDF5 library");
-
+
/* Set up file access property list with parallel I/O access */
fapl = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl >= 0), "H5Pcreate succeeded");
@@ -107,7 +107,7 @@ main (int argc, char **argv)
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release data buffers */
- if(data_array)
+ if(data_array)
HDfree(data_array);
MPI_Finalize();
@@ -116,7 +116,7 @@ main (int argc, char **argv)
if(MAINPROCESS) {
if(0 == nerrors)
- PASSED()
+ PASSED();
else
H5_FAILED()
}
diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c
index 0eb4838..b0c4a4e 100644
--- a/testpar/t_shapesame.c
+++ b/testpar/t_shapesame.c
@@ -783,12 +783,12 @@ hs_dr_pio_test__takedown( struct hs_dr_pio_test_vars_t * tv_ptr)
* selections of different rank in the parallel.
*
* Verify that we can read from disk correctly using
- * selections of different rank that H5S_select_shape_same()
+ * selections of different rank that H5Sselect_shape_same()
* views as being of the same shape.
*
* In this function, we test this by reading small_rank - 1
* slices from the on disk large cube, and verifying that the
- * data read is correct. Verify that H5S_select_shape_same()
+ * data read is correct. Verify that H5Sselect_shape_same()
* returns true on the memory and file selections.
*
* Return: void
@@ -933,12 +933,11 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
"H5Sselect_hyperslab(file_large_cube_sid) succeeded");
- /* verify that H5S_select_shape_same() reports the two
+ /* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
*/
- check = H5S_select_shape_same_test(tv_ptr->small_ds_slice_sid,
- tv_ptr->file_large_ds_sid_0);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+ check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
/* Read selection from disk */
@@ -1020,7 +1019,7 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* selections of different rank in the parallel.
*
* Verify that we can read from disk correctly using
- * selections of different rank that H5S_select_shape_same()
+ * selections of different rank that H5Sselect_shape_same()
* views as being of the same shape.
*
* In this function, we test this by reading slices of the
@@ -1197,12 +1196,11 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
"H5Sselect_hyperslab(mem_large_ds_sid) succeeded");
- /* verify that H5S_select_shape_same() reports the two
+ /* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
*/
- check = H5S_select_shape_same_test(tv_ptr->file_small_ds_sid_0,
- tv_ptr->mem_large_ds_sid);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+ check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
/* Read selection from disk */
@@ -1299,14 +1297,14 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
* selections of different rank in the parallel.
*
* Verify that we can write from memory to file using
- * selections of different rank that H5S_select_shape_same()
+ * selections of different rank that H5Sselect_shape_same()
* views as being of the same shape.
*
* Do this by writing small_rank - 1 dimensional slices from
* the in memory large data set to the on disk small cube
* dataset. After each write, read the slice of the small
* dataset back from disk, and verify that it contains
- * the expected data. Verify that H5S_select_shape_same()
+ * the expected data. Verify that H5Sselect_shape_same()
* returns true on the memory and file selections.
*
* Return: void
@@ -1341,12 +1339,12 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/* now we go in the opposite direction, verifying that we can write
* from memory to file using selections of different rank that
- * H5S_select_shape_same() views as being of the same shape.
+ * H5Sselect_shape_same() views as being of the same shape.
*
* Start by writing small_rank - 1 dimensional slices from the in memory large
* data set to the on disk small cube dataset. After each write, read the
* slice of the small dataset back from disk, and verify that it contains
- * the expected data. Verify that H5S_select_shape_same() returns true on
+ * the expected data. Verify that H5Sselect_shape_same() returns true on
* the memory and file selections.
*/
@@ -1507,13 +1505,12 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
"H5Sselect_hyperslab() mem_large_ds_sid succeeded.");
- /* verify that H5S_select_shape_same() reports the in
+ /* verify that H5Sselect_shape_same() reports the in
* memory slice through the cube selection and the
* on disk full square selections as having the same shape.
*/
- check = H5S_select_shape_same_test(tv_ptr->file_small_ds_sid_0,
- tv_ptr->mem_large_ds_sid);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed.");
+ check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed.");
/* write the slice from the in memory large data set to the
@@ -1623,7 +1620,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* selections of different rank in the parallel.
*
* Verify that we can write from memory to file using
- * selections of different rank that H5S_select_shape_same()
+ * selections of different rank that H5Sselect_shape_same()
* views as being of the same shape.
*
* Do this by writing the contents of the process's slice of
@@ -1632,7 +1629,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* slice of the large data set back into memory, and verify
* that it contains the expected data.
*
- * Verify that H5S_select_shape_same() returns true on the
+ * Verify that H5Sselect_shape_same() returns true on the
* memory and file selections.
*
* Return: void
@@ -1668,7 +1665,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
* small data set to slices of the on disk large data set. After
* each write, read the process's slice of the large data set back
* into memory, and verify that it contains the expected data.
- * Verify that H5S_select_shape_same() returns true on the memory
+ * Verify that H5Sselect_shape_same() returns true on the memory
* and file selections.
*/
@@ -1835,14 +1832,13 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
"H5Sselect_hyperslab() target large ds slice succeeded");
- /* verify that H5S_select_shape_same() reports the in
+ /* verify that H5Sselect_shape_same() reports the in
* memory small data set slice selection and the
* on disk slice through the large data set selection
* as having the same shape.
*/
- check = H5S_select_shape_same_test(tv_ptr->mem_small_ds_sid,
- tv_ptr->file_large_ds_sid_0);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+ check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_0);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
/* write the small data set slice from memory to the
@@ -2084,12 +2080,12 @@ contig_hs_dr_pio_test__run_test(const int test_num,
#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
/* first, verify that we can read from disk correctly using selections
- * of different rank that H5S_select_shape_same() views as being of the
+ * of different rank that H5Sselect_shape_same() views as being of the
* same shape.
*
* Start by reading small_rank - 1 dimensional slice from the on disk
* large cube, and verifying that the data read is correct. Verify that
- * H5S_select_shape_same() returns true on the memory and file selections.
+ * H5Sselect_shape_same() returns true on the memory and file selections.
*/
#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
@@ -2115,12 +2111,12 @@ contig_hs_dr_pio_test__run_test(const int test_num,
/* now we go in the opposite direction, verifying that we can write
* from memory to file using selections of different rank that
- * H5S_select_shape_same() views as being of the same shape.
+ * H5Sselect_shape_same() views as being of the same shape.
*
* Start by writing small_rank - 1 D slices from the in memory large data
* set to the on disk small cube dataset. After each write, read the
* slice of the small dataset back from disk, and verify that it contains
- * the expected data. Verify that H5S_select_shape_same() returns true on
+ * the expected data. Verify that H5Sselect_shape_same() returns true on
* the memory and file selections.
*/
@@ -2136,7 +2132,7 @@ contig_hs_dr_pio_test__run_test(const int test_num,
* small data set to slices of the on disk large data set. After
* each write, read the process's slice of the large data set back
* into memory, and verify that it contains the expected data.
- * Verify that H5S_select_shape_same() returns true on the memory
+ * Verify that H5Sselect_shape_same() returns true on the memory
* and file selections.
*/
@@ -2932,12 +2928,12 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
*
* Verify that we can read from disk correctly using checker
* board selections of different rank that
- * H5S_select_shape_same() views as being of the same shape.
+ * H5Sselect_shape_same() views as being of the same shape.
*
* In this function, we test this by reading small_rank - 1
* checker board slices from the on disk large cube, and
* verifying that the data read is correct. Verify that
- * H5S_select_shape_same() returns true on the memory and
+ * H5Sselect_shape_same() returns true on the memory and
* file selections.
*
* Return: void
@@ -2969,12 +2965,12 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/* first, verify that we can read from disk correctly using selections
- * of different rank that H5S_select_shape_same() views as being of the
+ * of different rank that H5Sselect_shape_same() views as being of the
* same shape.
*
* Start by reading a (small_rank - 1)-D checker board slice from this
* processes slice of the on disk large data set, and verifying that the
- * data read is correct. Verify that H5S_select_shape_same() returns
+ * data read is correct. Verify that H5Sselect_shape_same() returns
* true on the memory and file selections.
*
* The first step is to set up the needed checker board selection in the
@@ -3118,12 +3114,11 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->start
);
- /* verify that H5S_select_shape_same() reports the two
+ /* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
*/
- check = H5S_select_shape_same_test(tv_ptr->small_ds_slice_sid,
- tv_ptr->file_large_ds_sid_0);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+ check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
/* Read selection from disk */
@@ -3203,7 +3198,7 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* selections of different rank in the parallel.
*
* Verify that we can read from disk correctly using
- * selections of different rank that H5S_select_shape_same()
+ * selections of different rank that H5Sselect_shape_same()
* views as being of the same shape.
*
* In this function, we test this by reading checker board
@@ -3380,12 +3375,11 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
);
- /* verify that H5S_select_shape_same() reports the two
+ /* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
*/
- check = H5S_select_shape_same_test(tv_ptr->file_small_ds_sid_0,
- tv_ptr->mem_large_ds_sid);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+ check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
/* Read selection from disk */
@@ -3529,14 +3523,14 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
*
* Verify that we can write from memory to file using checker
* board selections of different rank that
- * H5S_select_shape_same() views as being of the same shape.
+ * H5Sselect_shape_same() views as being of the same shape.
*
* Do this by writing small_rank - 1 dimensional checker
* board slices from the in memory large data set to the on
* disk small cube dataset. After each write, read the
* slice of the small dataset back from disk, and verify
* that it contains the expected data. Verify that
- * H5S_select_shape_same() returns true on the memory and
+ * H5Sselect_shape_same() returns true on the memory and
* file selections.
*
* Return: void
@@ -3573,12 +3567,12 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/* now we go in the opposite direction, verifying that we can write
* from memory to file using selections of different rank that
- * H5S_select_shape_same() views as being of the same shape.
+ * H5Sselect_shape_same() views as being of the same shape.
*
* Start by writing small_rank - 1 D slices from the in memory large data
* set to the on disk small dataset. After each write, read the slice of
* the small dataset back from disk, and verify that it contains the
- * expected data. Verify that H5S_select_shape_same() returns true on
+ * expected data. Verify that H5Sselect_shape_same() returns true on
* the memory and file selections.
*/
@@ -3759,14 +3753,13 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
);
- /* verify that H5S_select_shape_same() reports the in
+ /* verify that H5Sselect_shape_same() reports the in
* memory checkerboard selection of the slice through the
* large dataset and the checkerboard selection of the process
* slice of the small data set as having the same shape.
*/
- check = H5S_select_shape_same_test(tv_ptr->file_small_ds_sid_1,
- tv_ptr->mem_large_ds_sid);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed.");
+ check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_1, tv_ptr->mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed.");
/* write the checker board selection of the slice from the in
@@ -3886,7 +3879,7 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* board hyperslab selections of different rank in the parallel.
*
* Verify that we can write from memory to file using
- * selections of different rank that H5S_select_shape_same()
+ * selections of different rank that H5Sselect_shape_same()
* views as being of the same shape.
*
* Do this by writing checker board selections of the contents
@@ -3895,7 +3888,7 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* read the process's slice of the large data set back into
* memory, and verify that it contains the expected data.
*
- * Verify that H5S_select_shape_same() returns true on the
+ * Verify that H5Sselect_shape_same() returns true on the
* memory and file selections.
*
* Return: void
@@ -3934,7 +3927,7 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
* small data set to slices of the on disk large data set. After
* each write, read the process's slice of the large data set back
* into memory, and verify that it contains the expected data.
- * Verify that H5S_select_shape_same() returns true on the memory
+ * Verify that H5Sselect_shape_same() returns true on the memory
* and file selections.
*/
@@ -4110,14 +4103,13 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
);
- /* verify that H5S_select_shape_same() reports the in
+ /* verify that H5Sselect_shape_same() reports the in
* memory small data set slice selection and the
* on disk slice through the large data set selection
* as having the same shape.
*/
- check = H5S_select_shape_same_test(tv_ptr->mem_small_ds_sid,
- tv_ptr->file_large_ds_sid_1);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+ check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_1);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
/* write the small data set slice from memory to the
@@ -4369,12 +4361,12 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
/* first, verify that we can read from disk correctly using selections
- * of different rank that H5S_select_shape_same() views as being of the
+ * of different rank that H5Sselect_shape_same() views as being of the
* same shape.
*
* Start by reading a (small_rank - 1)-D slice from this processes slice
* of the on disk large data set, and verifying that the data read is
- * correct. Verify that H5S_select_shape_same() returns true on the
+ * correct. Verify that H5Sselect_shape_same() returns true on the
* memory and file selections.
*
* The first step is to set up the needed checker board selection in the
@@ -4394,12 +4386,12 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
/* now we go in the opposite direction, verifying that we can write
* from memory to file using selections of different rank that
- * H5S_select_shape_same() views as being of the same shape.
+ * H5Sselect_shape_same() views as being of the same shape.
*
* Start by writing small_rank - 1 D slices from the in memory large data
* set to the on disk small dataset. After each write, read the slice of
* the small dataset back from disk, and verify that it contains the
- * expected data. Verify that H5S_select_shape_same() returns true on
+ * expected data. Verify that H5Sselect_shape_same() returns true on
* the memory and file selections.
*/
@@ -4410,7 +4402,7 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
* small data set to slices of the on disk large data set. After
* each write, read the process's slice of the large data set back
* into memory, and verify that it contains the expected data.
- * Verify that H5S_select_shape_same() returns true on the memory
+ * Verify that H5Sselect_shape_same() returns true on the memory
* and file selections.
*/
diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c
index 0466839..eb3b0f7 100644
--- a/testpar/t_span_tree.c
+++ b/testpar/t_span_tree.c
@@ -2289,11 +2289,11 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
large_sel_start);
- /* verify that H5S_select_shape_same() reports the two
+ /* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
*/
- check = H5S_select_shape_same_test(mem_large_ds_sid, file_small_ds_sid);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed (1)");
+ check = H5Sselect_shape_same(mem_large_ds_sid, file_small_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed (1)");
ret = H5Dread(small_dataset,
@@ -2409,11 +2409,11 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
large_sel_start);
- /* verify that H5S_select_shape_same() reports the two
+ /* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
*/
- check = H5S_select_shape_same_test(mem_small_ds_sid, file_large_ds_sid);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed (2)");
+ check = H5Sselect_shape_same(mem_small_ds_sid, file_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed (2)");
ret = H5Dread(large_dataset,
diff --git a/testpar/testpar.h b/testpar/testpar.h
index 86677d1..f76de51 100644
--- a/testpar/testpar.h
+++ b/testpar/testpar.h
@@ -34,7 +34,7 @@
if (VERBOSE_MED && *mesg != '\0') \
HDprintf("%s\n", mesg)
-/*
+/*
* VRFY: Verify if the condition val is true.
* If it is true, then call MESG to print mesg, depending on the verbose
* level.
@@ -44,12 +44,12 @@
* This will allow program to continue and can be used for debugging.
* (The "do {...} while(0)" is to group all the statements as one unit.)
*/
-#define VRFY(val, mesg) do { \
+#define VRFY_IMPL(val, mesg, rankvar) do { \
if (val) { \
MESG(mesg); \
} \
else { \
- HDprintf("Proc %d: ", mpi_rank); \
+ HDprintf("Proc %d: ", rankvar); \
HDprintf("*** Parallel ERROR ***\n"); \
HDprintf(" VRFY (%s) failed at line %4d in %s\n", \
mesg, (int)__LINE__, __FILE__); \
@@ -62,6 +62,9 @@
} \
} while(0)
+#define VRFY_G(val, mesg) VRFY_IMPL(val, mesg, mpi_rank_g)
+#define VRFY(val, mesg) VRFY_IMPL(val, mesg, mpi_rank)
+
/*
* Checking for information purpose.
* If val is false, print mesg; else nothing.