summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjhendersonHDF <jhenderson@hdfgroup.org>2023-07-27 19:11:46 (GMT)
committerGitHub <noreply@github.com>2023-07-27 19:11:46 (GMT)
commit2bb4c909c4c63cf87d10c494cd76d57092d45335 (patch)
treee5da9e282045d3b17921169e00bb4164aa31bedc
parent17a5a1a37ccc851ca9123b2921fbae15c4814edf (diff)
downloadhdf5-2bb4c909c4c63cf87d10c494cd76d57092d45335.zip
hdf5-2bb4c909c4c63cf87d10c494cd76d57092d45335.tar.gz
hdf5-2bb4c909c4c63cf87d10c494cd76d57092d45335.tar.bz2
Fix some warnings in developer builds (#3247) (#3291)
* Fix some warnings in developer builds * Switch approach to Winline flag
-rw-r--r--config/cmake/HDFCompilerFlags.cmake13
-rw-r--r--config/gnu-warnings/developer-general9
-rw-r--r--config/intel-warnings/developer-general9
-rw-r--r--src/H5.c310
-rw-r--r--src/H5Dmpio.c23
-rw-r--r--src/H5FDmpio.c2
-rw-r--r--src/H5FDonion.c3
-rw-r--r--src/H5MMprivate.h6
-rw-r--r--src/H5Spoint.c26
-rw-r--r--src/H5Tref.c6
-rw-r--r--src/H5mpi.c5
-rw-r--r--test/chunk_info.c4
-rw-r--r--test/h5test.c3
-rw-r--r--test/onion.c3
-rw-r--r--test/stab.c5
-rw-r--r--testpar/API/t_coll_md_read.c30
-rw-r--r--testpar/t_bigio.c12
-rw-r--r--testpar/t_coll_chunk.c12
-rw-r--r--testpar/t_coll_md.c30
-rw-r--r--testpar/t_filters_parallel.c3
-rw-r--r--tools/lib/h5tools.h1
-rw-r--r--tools/lib/h5tools_str.c4
22 files changed, 250 insertions, 269 deletions
diff --git a/config/cmake/HDFCompilerFlags.cmake b/config/cmake/HDFCompilerFlags.cmake
index 00850d3..1878b9b 100644
--- a/config/cmake/HDFCompilerFlags.cmake
+++ b/config/cmake/HDFCompilerFlags.cmake
@@ -189,6 +189,19 @@ if (HDF5_ENABLE_DEV_WARNINGS)
elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang")
ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/developer-general")
endif ()
+
+ # Turn on -Winline warnings now only for non-Debug and
+ # non-Developer builds. For at least GNU compilers this
+ # flag appears to conflict specifically with the -Og
+ # optimization flag and will produce warnings about functions
+ # not being considered for inlining
+ if (NOT ${HDF_CFG_NAME} MATCHES "Debug" AND NOT ${HDF_CFG_NAME} MATCHES "Developer")
+ if (CMAKE_C_COMPILER_ID STREQUAL "GNU")
+ list (APPEND H5_CFLAGS "-Winline")
+ elseif (CMAKE_C_COMPILER_ID STREQUAL "Intel" AND NOT _INTEL_WINDOWS)
+ list (APPEND H5_CFLAGS "-Winline")
+ endif ()
+ endif ()
else ()
if (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_C_COMPILER_VERSION VERSION_GREATER_EQUAL 4.8)
ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/no-developer-general")
diff --git a/config/gnu-warnings/developer-general b/config/gnu-warnings/developer-general
index 460b874..79ecd6a 100644
--- a/config/gnu-warnings/developer-general
+++ b/config/gnu-warnings/developer-general
@@ -1,10 +1,17 @@
# (suggestions from gcc, not code problems)
-Waggregate-return
-Wdisabled-optimization
--Winline
-Wmissing-format-attribute
-Wmissing-noreturn
-Wswitch-default
-Wswitch-enum
-Wunsafe-loop-optimizations
-Wunused-macros
+# -Winline warnings aren't included here because, for at least
+# GNU compilers, this flag appears to conflict specifically with
+# the -Og optimization level flag added for Debug and Developer
+# builds and will produce warnings about functions not being
+# considered for inlining. The flag will be added to the list
+# of compiler flags separately if developer warnings are enabled
+# and the build type is not Debug or Developer
+#-Winline
diff --git a/config/intel-warnings/developer-general b/config/intel-warnings/developer-general
index fae56f0..861218e 100644
--- a/config/intel-warnings/developer-general
+++ b/config/intel-warnings/developer-general
@@ -1,4 +1,11 @@
--Winline
-Wreorder
-Wport
-Wstrict-aliasing
+# -Winline warnings aren't included here because, for at least
+# GNU compilers, this flag appears to conflict specifically with
+# the -Og optimization level flag added for Debug and Developer
+# builds and will produce warnings about functions not being
+# considered for inlining. The flag will be added to the list
+# of compiler flags separately if developer warnings are enabled
+# and the build type is not Debug or Developer
+#-Winline
diff --git a/src/H5.c b/src/H5.c
index 1e625c3..c22d0c9 100644
--- a/src/H5.c
+++ b/src/H5.c
@@ -243,33 +243,35 @@ H5_init_library(void)
* The dataspace interface needs to be initialized so that future IDs for
* dataspaces work.
*/
- /* clang-format off */
- struct {
- herr_t (*func)(void);
- const char *descr;
- } initializer[] = {
- {H5E_init, "error"}
- , {H5VL_init_phase1, "VOL"}
- , {H5SL_init, "skip lists"}
- , {H5FD_init, "VFD"}
- , {H5_default_vfd_init, "default VFD"}
- , {H5P_init_phase1, "property list"}
- , {H5AC_init, "metadata caching"}
- , {H5L_init, "link"}
- , {H5S_init, "dataspace"}
- , {H5PL_init, "plugins"}
- /* Finish initializing interfaces that depend on the interfaces above */
- , {H5P_init_phase2, "property list"}
- , {H5VL_init_phase2, "VOL"}
- };
-
- for (i = 0; i < NELMTS(initializer); i++) {
- if (initializer[i].func() < 0) {
- HGOTO_ERROR(H5E_FUNC, H5E_CANTINIT, FAIL,
- "unable to initialize %s interface", initializer[i].descr)
+ {
+ /* clang-format off */
+ struct {
+ herr_t (*func)(void);
+ const char *descr;
+ } initializer[] = {
+ {H5E_init, "error"}
+ , {H5VL_init_phase1, "VOL"}
+ , {H5SL_init, "skip lists"}
+ , {H5FD_init, "VFD"}
+ , {H5_default_vfd_init, "default VFD"}
+ , {H5P_init_phase1, "property list"}
+ , {H5AC_init, "metadata caching"}
+ , {H5L_init, "link"}
+ , {H5S_init, "dataspace"}
+ , {H5PL_init, "plugins"}
+ /* Finish initializing interfaces that depend on the interfaces above */
+ , {H5P_init_phase2, "property list"}
+ , {H5VL_init_phase2, "VOL"}
+ };
+
+ for (i = 0; i < NELMTS(initializer); i++) {
+ if (initializer[i].func() < 0) {
+ HGOTO_ERROR(H5E_FUNC, H5E_CANTINIT, FAIL,
+ "unable to initialize %s interface", initializer[i].descr)
+ }
}
+ /* clang-format on */
}
- /* clang-format on */
/* Debugging? */
H5__debug_mask("-all");
@@ -349,139 +351,141 @@ H5_term_library(void)
* way that would necessitate some cleanup work in the other interface.
*/
-#define TERMINATOR(module, wait) { \
- .func = H5##module##_term_package \
- , .name = #module \
- , .completed = false \
- , .await_prior = wait \
- }
+ {
+#define TERMINATOR(module, wait) { \
+ .func = H5##module##_term_package \
+ , .name = #module \
+ , .completed = false \
+ , .await_prior = wait \
+ }
- /*
- * Termination is ordered by the `terminator` table so the "higher" level
- * packages are shut down before "lower" level packages that they
- * rely on:
- */
- struct {
- int (*func)(void); /* function to terminate the module; returns 0
- * on success, >0 if termination was not
- * completed and we should try to terminate
- * some dependent modules, first.
- */
- const char *name; /* name of the module */
- hbool_t completed; /* true iff this terminator was already
- * completed
- */
- const hbool_t await_prior; /* true iff all prior terminators in the
- * list must complete before this
- * terminator is attempted
- */
- } terminator[] = {
- /* Close the event sets first, so that all asynchronous operations
- * complete before anything else attempts to shut down.
- */
- TERMINATOR(ES, false)
- /* Do not attempt to close down package L until after event sets
- * have finished closing down.
- */
- , TERMINATOR(L, true)
- /* Close the "top" of various interfaces (IDs, etc) but don't shut
- * down the whole interface yet, so that the object header messages
- * get serialized correctly for entries in the metadata cache and the
- * symbol table entry in the superblock gets serialized correctly, etc.
- * all of which is performed in the 'F' shutdown.
- *
- * The tops of packages A, D, G, M, S, T do not need to wait for L
- * or previous packages to finish closing down.
- */
- , TERMINATOR(A_top, false)
- , TERMINATOR(D_top, false)
- , TERMINATOR(G_top, false)
- , TERMINATOR(M_top, false)
- , TERMINATOR(S_top, false)
- , TERMINATOR(T_top, false)
- /* Don't shut down the file code until objects in files are shut down */
- , TERMINATOR(F, true)
- /* Don't shut down the property list code until all objects that might
- * use property lists are shut down
- */
- , TERMINATOR(P, true)
- /* Wait to shut down the "bottom" of various interfaces until the
- * files are closed, so pieces of the file can be serialized
- * correctly.
- *
- * Shut down the "bottom" of the attribute, dataset, group,
- * reference, dataspace, and datatype interfaces, fully closing
- * out the interfaces now.
- */
- , TERMINATOR(A, true)
- , TERMINATOR(D, false)
- , TERMINATOR(G, false)
- , TERMINATOR(M, false)
- , TERMINATOR(S, false)
- , TERMINATOR(T, false)
- /* Wait to shut down low-level packages like AC until after
- * the preceding high-level packages have shut down. This prevents
- * low-level objects from closing "out from underneath" their
- * reliant high-level objects.
- */
- , TERMINATOR(AC, true)
- /* Shut down the "pluggable" interfaces, before the plugin framework */
- , TERMINATOR(Z, false)
- , TERMINATOR(FD, false)
- , TERMINATOR(VL, false)
- /* Don't shut down the plugin code until all "pluggable" interfaces
- * (Z, FD, PL) are shut down
- */
- , TERMINATOR(PL, true)
- /* Shut down the following packages in strictly the order given
- * by the table.
+ /*
+ * Termination is ordered by the `terminator` table so the "higher" level
+ * packages are shut down before "lower" level packages that they
+ * rely on:
*/
- , TERMINATOR(E, true)
- , TERMINATOR(I, true)
- , TERMINATOR(SL, true)
- , TERMINATOR(FL, true)
- , TERMINATOR(CX, true)
- };
-
- do {
- pending = 0;
- for (i = 0; i < NELMTS(terminator); i++) {
- if (terminator[i].completed)
- continue;
- if (pending != 0 && terminator[i].await_prior)
- break;
- if (terminator[i].func() == 0) {
- terminator[i].completed = true;
- continue;
+ struct {
+ int (*func)(void); /* function to terminate the module; returns 0
+ * on success, >0 if termination was not
+ * completed and we should try to terminate
+ * some dependent modules, first.
+ */
+ const char *name; /* name of the module */
+ hbool_t completed; /* true iff this terminator was already
+ * completed
+ */
+ const hbool_t await_prior; /* true iff all prior terminators in the
+ * list must complete before this
+ * terminator is attempted
+ */
+ } terminator[] = {
+ /* Close the event sets first, so that all asynchronous operations
+ * complete before anything else attempts to shut down.
+ */
+ TERMINATOR(ES, false)
+ /* Do not attempt to close down package L until after event sets
+ * have finished closing down.
+ */
+ , TERMINATOR(L, true)
+ /* Close the "top" of various interfaces (IDs, etc) but don't shut
+ * down the whole interface yet, so that the object header messages
+ * get serialized correctly for entries in the metadata cache and the
+ * symbol table entry in the superblock gets serialized correctly, etc.
+ * all of which is performed in the 'F' shutdown.
+ *
+ * The tops of packages A, D, G, M, S, T do not need to wait for L
+ * or previous packages to finish closing down.
+ */
+ , TERMINATOR(A_top, false)
+ , TERMINATOR(D_top, false)
+ , TERMINATOR(G_top, false)
+ , TERMINATOR(M_top, false)
+ , TERMINATOR(S_top, false)
+ , TERMINATOR(T_top, false)
+ /* Don't shut down the file code until objects in files are shut down */
+ , TERMINATOR(F, true)
+ /* Don't shut down the property list code until all objects that might
+ * use property lists are shut down
+ */
+ , TERMINATOR(P, true)
+ /* Wait to shut down the "bottom" of various interfaces until the
+ * files are closed, so pieces of the file can be serialized
+ * correctly.
+ *
+ * Shut down the "bottom" of the attribute, dataset, group,
+ * reference, dataspace, and datatype interfaces, fully closing
+ * out the interfaces now.
+ */
+ , TERMINATOR(A, true)
+ , TERMINATOR(D, false)
+ , TERMINATOR(G, false)
+ , TERMINATOR(M, false)
+ , TERMINATOR(S, false)
+ , TERMINATOR(T, false)
+ /* Wait to shut down low-level packages like AC until after
+ * the preceding high-level packages have shut down. This prevents
+ * low-level objects from closing "out from underneath" their
+ * reliant high-level objects.
+ */
+ , TERMINATOR(AC, true)
+ /* Shut down the "pluggable" interfaces, before the plugin framework */
+ , TERMINATOR(Z, false)
+ , TERMINATOR(FD, false)
+ , TERMINATOR(VL, false)
+ /* Don't shut down the plugin code until all "pluggable" interfaces
+ * (Z, FD, PL) are shut down
+ */
+ , TERMINATOR(PL, true)
+ /* Shut down the following packages in strictly the order given
+ * by the table.
+ */
+ , TERMINATOR(E, true)
+ , TERMINATOR(I, true)
+ , TERMINATOR(SL, true)
+ , TERMINATOR(FL, true)
+ , TERMINATOR(CX, true)
+ };
+
+ do {
+ pending = 0;
+ for (i = 0; i < NELMTS(terminator); i++) {
+ if (terminator[i].completed)
+ continue;
+ if (pending != 0 && terminator[i].await_prior)
+ break;
+ if (terminator[i].func() == 0) {
+ terminator[i].completed = true;
+ continue;
+ }
+
+ /* log a package when its terminator needs to be retried */
+ pending++;
+ nprinted = HDsnprintf(next, nleft, "%s%s",
+ (next != loop) ? "," : "", terminator[i].name);
+ if (nprinted < 0)
+ continue;
+ if ((size_t)nprinted >= nleft)
+ nprinted = HDsnprintf(next, nleft, "...");
+ if (nprinted < 0 || (size_t)nprinted >= nleft)
+ continue;
+ nleft -= (size_t)nprinted;
+ next += nprinted;
}
+ } while (pending && ntries++ < 100);
- /* log a package when its terminator needs to be retried */
- pending++;
- nprinted = HDsnprintf(next, nleft, "%s%s",
- (next != loop) ? "," : "", terminator[i].name);
- if (nprinted < 0)
- continue;
- if ((size_t)nprinted >= nleft)
- nprinted = HDsnprintf(next, nleft, "...");
- if (nprinted < 0 || (size_t)nprinted >= nleft)
- continue;
- nleft -= (size_t)nprinted;
- next += nprinted;
- }
- } while (pending && ntries++ < 100);
-
- /* clang-format on */
+ /* clang-format on */
- if (pending) {
- /* Only display the error message if the user is interested in them. */
- if (func) {
- HDfprintf(stderr, "HDF5: infinite loop closing library\n");
- HDfprintf(stderr, " %s\n", loop);
+ if (pending) {
+ /* Only display the error message if the user is interested in them. */
+ if (func) {
+ fprintf(stderr, "HDF5: infinite loop closing library\n");
+ fprintf(stderr, " %s\n", loop);
#ifndef NDEBUG
- HDabort();
-#endif /* NDEBUG */
- } /* end if */
- } /* end if */
+ HDabort();
+#endif /* NDEBUG */
+ } /* end if */
+ } /* end if */
+ }
/* Free open debugging streams */
while (H5_debug_g.open_stream) {
@@ -1122,7 +1126,7 @@ H5close(void)
*
*-------------------------------------------------------------------------
*/
-void *
+void *H5_ATTR_MALLOC
H5allocate_memory(size_t size, hbool_t clear)
{
void *ret_value = NULL;
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index 77edfc4..29410f8 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -54,29 +54,6 @@
#define H5D_MULTI_CHUNK_IO 1
#define H5D_ONE_LINK_CHUNK_IO_MORE_OPT 2
#define H5D_MULTI_CHUNK_IO_MORE_OPT 3
-#define H5D_NO_IO 4
-
-/***** Macros for One linked collective IO case. *****/
-/* The default value to do one linked collective IO for all chunks.
- * If the average number of chunks per process is greater than this
- * value, the library will create an MPI derived datatype to link all
- * chunks to do collective IO. The user can set this value through an
- * API.
- */
-
-/* Macros to represent options on how to obtain chunk address for one linked-chunk IO case */
-#define H5D_OBTAIN_ONE_CHUNK_ADDR_IND 0
-#define H5D_OBTAIN_ALL_CHUNK_ADDR_COL 2
-
-/* Macros to define the default ratio of obtaining all chunk addresses for one linked-chunk IO case */
-#define H5D_ALL_CHUNK_ADDR_THRES_COL 30
-#define H5D_ALL_CHUNK_ADDR_THRES_COL_NUM 10000
-
-/***** Macros for multi-chunk collective IO case. *****/
-/* The default value of the threshold to do collective IO for this
- * chunk. If the average number of processes per chunk is greater
- * than the default value, collective IO is done for this chunk.
- */
/* Macros to represent different IO modes(NONE, Independent or collective)for multiple chunk IO case */
#define H5D_CHUNK_IO_MODE_COL 1
diff --git a/src/H5FDmpio.c b/src/H5FDmpio.c
index 4b55bd5..50d3cff 100644
--- a/src/H5FDmpio.c
+++ b/src/H5FDmpio.c
@@ -236,6 +236,8 @@ H5FD__mem_t_to_str(H5FD_mem_t mem_type)
return "H5FD_MEM_LHEAP";
case H5FD_MEM_OHDR:
return "H5FD_MEM_OHDR";
+ case H5FD_MEM_NTYPES:
+ return "H5FD_MEM_NTYPES";
default:
return "(Unknown)";
}
diff --git a/src/H5FDonion.c b/src/H5FDonion.c
index 1c81870..ad21e8f 100644
--- a/src/H5FDonion.c
+++ b/src/H5FDonion.c
@@ -911,6 +911,7 @@ H5FD__onion_open(const char *filename, unsigned flags, hid_t fapl_id, haddr_t ma
const H5FD_onion_fapl_info_t *fa = NULL;
H5FD_onion_fapl_info_t *new_fa = NULL;
const char *config_str = NULL;
+ double log2_page_size = 0.0;
hid_t backing_fapl_id = H5I_INVALID_HID;
char *name_onion = NULL;
char *recovery_file_nameery = NULL;
@@ -994,7 +995,7 @@ H5FD__onion_open(const char *filename, unsigned flags, hid_t fapl_id, haddr_t ma
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "page size is not a power of two")
/* Assign the page size */
- double log2_page_size = HDlog2((double)(fa->page_size));
+ log2_page_size = HDlog2((double)(fa->page_size));
file->curr_rev_record.archival_index.page_size_log2 = (uint32_t)log2_page_size;
/* Proceed with open. */
diff --git a/src/H5MMprivate.h b/src/H5MMprivate.h
index 130a83e..eab8c89 100644
--- a/src/H5MMprivate.h
+++ b/src/H5MMprivate.h
@@ -34,9 +34,9 @@
H5_DLL void *H5MM_malloc(size_t size) H5_ATTR_MALLOC;
H5_DLL void *H5MM_calloc(size_t size) H5_ATTR_MALLOC;
H5_DLL void *H5MM_realloc(void *mem, size_t size);
-H5_DLL char *H5MM_xstrdup(const char *s);
-H5_DLL char *H5MM_strdup(const char *s);
-H5_DLL char *H5MM_strndup(const char *s, size_t n);
+H5_DLL char *H5MM_xstrdup(const char *s) H5_ATTR_MALLOC;
+H5_DLL char *H5MM_strdup(const char *s) H5_ATTR_MALLOC;
+H5_DLL char *H5MM_strndup(const char *s, size_t n) H5_ATTR_MALLOC;
H5_DLL void *H5MM_xfree(void *mem);
H5_DLL void *H5MM_xfree_const(const void *mem);
H5_DLL void *H5MM_memcpy(void *dest, const void *src, size_t n);
diff --git a/src/H5Spoint.c b/src/H5Spoint.c
index 0d5a3d1..5233946 100644
--- a/src/H5Spoint.c
+++ b/src/H5Spoint.c
@@ -1353,16 +1353,18 @@ done:
static herr_t
H5S__point_deserialize(H5S_t **space, const uint8_t **p, const size_t p_size, hbool_t skip)
{
- H5S_t *tmp_space = NULL; /* Pointer to actual dataspace to use,
- either *space or a newly allocated one */
- hsize_t dims[H5S_MAX_RANK]; /* Dimension sizes */
- uint32_t version; /* Version number */
- uint8_t enc_size = 0; /* Encoded size of selection info */
- hsize_t *coord = NULL, *tcoord; /* Pointer to array of elements */
- const uint8_t *pp; /* Local pointer for decoding */
- uint64_t num_elem = 0; /* Number of elements in selection */
- unsigned rank; /* Rank of points */
- unsigned i, j; /* local counting variables */
+ H5S_t *tmp_space = NULL; /* Pointer to actual dataspace to use,
+ either *space or a newly allocated one */
+ hsize_t dims[H5S_MAX_RANK]; /* Dimension sizes */
+ uint32_t version; /* Version number */
+ uint8_t enc_size = 0; /* Encoded size of selection info */
+ hsize_t *coord = NULL, *tcoord; /* Pointer to array of elements */
+ const uint8_t *pp; /* Local pointer for decoding */
+ uint64_t num_elem = 0; /* Number of elements in selection */
+ unsigned rank; /* Rank of points */
+ unsigned i, j; /* local counting variables */
+ size_t enc_type_size;
+ size_t coordinate_buffer_requirement;
herr_t ret_value = SUCCEED; /* Return value */
const uint8_t *p_end = *p + p_size - 1; /* Pointer to last valid byte in buffer */
FUNC_ENTER_PACKAGE
@@ -1460,7 +1462,7 @@ H5S__point_deserialize(H5S_t **space, const uint8_t **p, const size_t p_size, hb
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate coordinate information")
/* Determine necessary size of buffer for coordinates */
- size_t enc_type_size = 0;
+ enc_type_size = 0;
switch (enc_size) {
case H5S_SELECT_INFO_ENC_SIZE_2:
@@ -1477,7 +1479,7 @@ H5S__point_deserialize(H5S_t **space, const uint8_t **p, const size_t p_size, hb
break;
}
- size_t coordinate_buffer_requirement = num_elem * rank * enc_type_size;
+ coordinate_buffer_requirement = num_elem * rank * enc_type_size;
if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, coordinate_buffer_requirement, p_end))
HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "buffer overflow while decoding selection coordinates")
diff --git a/src/H5Tref.c b/src/H5Tref.c
index f914b8d..f96d181 100644
--- a/src/H5Tref.c
+++ b/src/H5Tref.c
@@ -1003,8 +1003,10 @@ H5T__ref_disk_write(H5VL_object_t H5_ATTR_UNUSED *src_file, const void *src_buf,
src_size -= H5R_ENCODE_HEADER_SIZE;
#ifndef NDEBUG
- size_t buf_size_left = dst_size - sizeof(uint32_t);
- HDassert(buf_size_left > sizeof(uint32_t));
+ {
+ size_t buf_size_left = dst_size - sizeof(uint32_t);
+ assert(buf_size_left > sizeof(uint32_t));
+ }
#endif
/* Set the size */
diff --git a/src/H5mpi.c b/src/H5mpi.c
index 31ad036..f847508 100644
--- a/src/H5mpi.c
+++ b/src/H5mpi.c
@@ -70,7 +70,7 @@ H5_mpi_set_bigio_count(hsize_t new_count)
*
*-------------------------------------------------------------------------
*/
-hsize_t
+H5_ATTR_PURE hsize_t
H5_mpi_get_bigio_count(void)
{
return bigio_count_g;
@@ -803,6 +803,7 @@ H5_mpio_get_file_sync_required(MPI_File fh, hbool_t *file_sync_required)
{
MPI_Info info_used;
int flag;
+ char *sync_env_var;
char value[MPI_MAX_INFO_VAL];
herr_t ret_value = SUCCEED;
@@ -826,7 +827,7 @@ H5_mpio_get_file_sync_required(MPI_File fh, hbool_t *file_sync_required)
HGOTO_ERROR(H5E_LIB, H5E_CANTFREE, FAIL, "can't free MPI info")
/* Force setting the flag via env variable (temp solution before the flag is implemented in MPI) */
- char *sync_env_var = HDgetenv("HDF5_DO_MPI_FILE_SYNC");
+ sync_env_var = HDgetenv("HDF5_DO_MPI_FILE_SYNC");
if (sync_env_var && (!HDstrcmp(sync_env_var, "TRUE") || !HDstrcmp(sync_env_var, "1")))
*file_sync_required = TRUE;
if (sync_env_var && (!HDstrcmp(sync_env_var, "FALSE") || !HDstrcmp(sync_env_var, "0")))
diff --git a/test/chunk_info.c b/test/chunk_info.c
index 5651b26..77d2b8a 100644
--- a/test/chunk_info.c
+++ b/test/chunk_info.c
@@ -114,10 +114,6 @@ const char *FILENAME[] = {"tchunk_info_earliest", "tchunk_info_v18", "tchunk_in
/* For compressed data */
#define DEFLATE_SIZE_ADJUST(s) (ceil(((double)(s)) * 1.001) + 12.0)
-/* For use in error reporting */
-#define MSG_CHK_ADDR "Chunk address should not be HADDR_UNDEF because of H5D_ALLOC_TIME_EARLY."
-#define MSG_CHK_SIZE "Chunk size should not be 0 because of H5D_ALLOC_TIME_EARLY."
-
/* Utility function to initialize arguments */
void reinit_vars(unsigned *read_flt_msk, haddr_t *addr, hsize_t *size);
diff --git a/test/h5test.c b/test/h5test.c
index 856de4b..e3a3fb0 100644
--- a/test/h5test.c
+++ b/test/h5test.c
@@ -88,9 +88,6 @@ MPI_Info h5_io_info_g = MPI_INFO_NULL; /* MPI INFO object for IO */
*/
static const char *multi_letters = "msbrglo";
-/* Length of multi-file VFD filename buffers */
-#define H5TEST_MULTI_FILENAME_LEN 1024
-
/* Temporary file for sending signal messages */
#define TMP_SIGNAL_FILE "tmp_signal_file"
diff --git a/test/onion.c b/test/onion.c
index c643c00..1063b67 100644
--- a/test/onion.c
+++ b/test/onion.c
@@ -36,7 +36,6 @@
#define ONION_TEST_PAGE_SIZE_1 4
#define ONION_TEST_PAGE_SIZE_5 32
-#define ONION_TEST_BASENAME_SIZE 32
#define ONION_TEST_FIXNAME_SIZE 1024
#define ONION_TEST_EXPECTED_HISTORY_REVISIONS_MAX 16
#define ONION_TEST_REV_REV_WRITES_MAX 8
@@ -1308,6 +1307,7 @@ test_revision_record_encode_decode(void)
uint64_t size_ret;
H5FD_onion_revision_record_t r_out;
uint32_t checksum = 0;
+ hbool_t badness = FALSE;
char comment[25] = "Example comment message.";
H5FD_onion_revision_record_t record = {
H5FD_ONION_REVISION_RECORD_VERSION_CURR,
@@ -1383,7 +1383,6 @@ test_revision_record_encode_decode(void)
if (H5FD__onion_revision_record_encode(&record, buf, &checksum) != exp_size)
TEST_ERROR;
- hbool_t badness = FALSE;
for (i = 0; i < exp_size; i++) {
if (exp[i] != buf[i]) {
badness = TRUE;
diff --git a/test/stab.c b/test/stab.c
index 697bce0..8daf9cc 100644
--- a/test/stab.c
+++ b/test/stab.c
@@ -198,6 +198,7 @@ test_long(hid_t fcpl, hid_t fapl, hbool_t new_format)
{
hid_t fid = (-1); /* File ID */
hid_t g1 = (-1), g2 = (-1);
+ size_t name2Len;
char *name1 = NULL, *name2 = NULL;
char filename[NAME_BUF_SIZE];
size_t i;
@@ -217,8 +218,8 @@ test_long(hid_t fcpl, hid_t fapl, hbool_t new_format)
for (i = 0; i < LONG_NAME_LEN; i++)
name1[i] = (char)('A' + i % 26);
name1[LONG_NAME_LEN - 1] = '\0';
- size_t name2Len = (2 * LONG_NAME_LEN) + 2;
- name2 = (char *)HDmalloc(name2Len);
+ name2Len = (2 * LONG_NAME_LEN) + 2;
+ name2 = (char *)malloc(name2Len);
HDsnprintf(name2, name2Len, "%s/%s", name1, name1);
/* Create groups */
diff --git a/testpar/API/t_coll_md_read.c b/testpar/API/t_coll_md_read.c
index f6f99bf..1c37ba7 100644
--- a/testpar/API/t_coll_md_read.c
+++ b/testpar/API/t_coll_md_read.c
@@ -369,36 +369,6 @@ test_multi_chunk_io_addrmap_issue(void)
* I/O with collective metadata reads enabled doesn't cause issues due to
* collective metadata reads being made only by process 0 in H5D__sort_chunk().
*
- * NOTE: Due to the way that the threshold value which pertains to this test
- * is currently calculated within HDF5, the following two conditions must be
- * true to trigger the issue:
- *
- * Condition 1: A certain threshold ratio must be met in order to have HDF5
- * obtain all chunk addresses collectively inside H5D__sort_chunk(). This is
- * given by the following:
- *
- * (sum_chunk * 100) / (dataset_nchunks * mpi_size) >= 30%
- *
- * where:
- * * `sum_chunk` is the combined sum of the number of chunks selected in
- * the dataset by all ranks (chunks selected by more than one rank count
- * individually toward the sum for each rank selecting that chunk)
- * * `dataset_nchunks` is the number of chunks in the dataset (selected
- * or not)
- * * `mpi_size` is the size of the MPI Communicator
- *
- * Condition 2: `sum_chunk` divided by `mpi_size` must exceed or equal a certain
- * threshold (as of this writing, 10000).
- *
- * To satisfy both these conditions, we #define a macro,
- * LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM, which corresponds to the
- * value of the H5D_ALL_CHUNK_ADDR_THRES_COL_NUM macro in H5Dmpio.c (the
- * 10000 threshold from condition 2). We then create a dataset of that many
- * chunks and have each MPI rank write to and read from a piece of every single
- * chunk in the dataset. This ensures chunk utilization is the max possible
- * and exceeds our 30% target ratio, while always exactly matching the numeric
- * chunk threshold value of condition 2.
- *
* Failure in this test may either cause a hang, or, due to how the MPI calls
* pertaining to this issue might mistakenly match up, may cause an MPI error
* message similar to:
diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c
index bca52b1..0f8d01f 100644
--- a/testpar/t_bigio.c
+++ b/testpar/t_bigio.c
@@ -1517,6 +1517,9 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap
status = H5Sselect_all(file_dataspace);
VRFY_G((status >= 0), "H5Sselect_all succeeded");
break;
+
+ default:
+ break;
}
switch (mem_selection) {
@@ -1540,6 +1543,9 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap
status = H5Sselect_all(mem_dataspace);
VRFY_G((status >= 0), "H5Sselect_all succeeded");
break;
+
+ default:
+ break;
}
/* set up the collective transfer property list */
@@ -1777,6 +1783,9 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap
status = H5Sselect_all(file_dataspace);
VRFY_G((status >= 0), "H5Sselect_all succeeded");
break;
+
+ default:
+ break;
}
switch (mem_selection) {
@@ -1800,6 +1809,9 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap
status = H5Sselect_all(mem_dataspace);
VRFY_G((status >= 0), "H5Sselect_all succeeded");
break;
+
+ default:
+ break;
}
/* fill dataset with test data */
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index 6636ffa..da8d69e 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -669,6 +669,9 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap
status = H5Sselect_all(file_dataspace);
VRFY((status >= 0), "H5Sselect_all succeeded");
break;
+
+ default:
+ break;
}
switch (mem_selection) {
@@ -692,6 +695,9 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap
status = H5Sselect_all(mem_dataspace);
VRFY((status >= 0), "H5Sselect_all succeeded");
break;
+
+ default:
+ break;
}
/* set up the collective transfer property list */
@@ -932,6 +938,9 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap
status = H5Sselect_all(file_dataspace);
VRFY((status >= 0), "H5Sselect_all succeeded");
break;
+
+ default:
+ break;
}
switch (mem_selection) {
@@ -955,6 +964,9 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap
status = H5Sselect_all(mem_dataspace);
VRFY((status >= 0), "H5Sselect_all succeeded");
break;
+
+ default:
+ break;
}
/* fill dataset with test data */
diff --git a/testpar/t_coll_md.c b/testpar/t_coll_md.c
index aa72486..cc32d01 100644
--- a/testpar/t_coll_md.c
+++ b/testpar/t_coll_md.c
@@ -342,36 +342,6 @@ test_multi_chunk_io_addrmap_issue(void)
* I/O with collective metadata reads enabled doesn't cause issues due to
* collective metadata reads being made only by process 0 in H5D__sort_chunk().
*
- * NOTE: Due to the way that the threshold value which pertains to this test
- * is currently calculated within HDF5, the following two conditions must be
- * true to trigger the issue:
- *
- * Condition 1: A certain threshold ratio must be met in order to have HDF5
- * obtain all chunk addresses collectively inside H5D__sort_chunk(). This is
- * given by the following:
- *
- * (sum_chunk * 100) / (dataset_nchunks * mpi_size) >= 30%
- *
- * where:
- * * `sum_chunk` is the combined sum of the number of chunks selected in
- * the dataset by all ranks (chunks selected by more than one rank count
- * individually toward the sum for each rank selecting that chunk)
- * * `dataset_nchunks` is the number of chunks in the dataset (selected
- * or not)
- * * `mpi_size` is the size of the MPI Communicator
- *
- * Condition 2: `sum_chunk` divided by `mpi_size` must exceed or equal a certain
- * threshold (as of this writing, 10000).
- *
- * To satisfy both these conditions, we #define a macro,
- * LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM, which corresponds to the
- * value of the H5D_ALL_CHUNK_ADDR_THRES_COL_NUM macro in H5Dmpio.c (the
- * 10000 threshold from condition 2). We then create a dataset of that many
- * chunks and have each MPI rank write to and read from a piece of every single
- * chunk in the dataset. This ensures chunk utilization is the max possible
- * and exceeds our 30% target ratio, while always exactly matching the numeric
- * chunk threshold value of condition 2.
- *
* Failure in this test may either cause a hang, or, due to how the MPI calls
* pertaining to this issue might mistakenly match up, may cause an MPI error
* message similar to:
diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c
index b4a4edb..1593d15 100644
--- a/testpar/t_filters_parallel.c
+++ b/testpar/t_filters_parallel.c
@@ -380,6 +380,7 @@ verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chu
else
VRFY(space_status == H5D_SPACE_STATUS_NOT_ALLOCATED, "verified space allocation status");
break;
+ case H5D_ALLOC_TIME_ERROR:
default:
if (MAINPROCESS)
MESG("unknown space allocation time");
@@ -8771,6 +8772,8 @@ main(int argc, char **argv)
case H5D_ALLOC_TIME_INCR:
alloc_time = "Incremental";
break;
+ case H5D_ALLOC_TIME_DEFAULT:
+ case H5D_ALLOC_TIME_ERROR:
default:
alloc_time = "Unknown";
}
diff --git a/tools/lib/h5tools.h b/tools/lib/h5tools.h
index 753a83b..3b0d5b9 100644
--- a/tools/lib/h5tools.h
+++ b/tools/lib/h5tools.h
@@ -652,6 +652,7 @@ H5TOOLS_DLLVAR int enable_error_stack; /* re-enable error stack; disable=0 enabl
#define H5_TOOLS_DATASET "DATASET"
#define H5_TOOLS_DATATYPE "DATATYPE"
#define H5_TOOLS_ATTRIBUTE "ATTRIBUTE"
+#define H5_TOOLS_MAP "MAP"
#define H5_TOOLS_UNKNOWN "UNKNOWN"
/* Definitions of useful routines */
diff --git a/tools/lib/h5tools_str.c b/tools/lib/h5tools_str.c
index 223eb61..208e33a 100644
--- a/tools/lib/h5tools_str.c
+++ b/tools/lib/h5tools_str.c
@@ -1230,6 +1230,10 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
h5tools_str_append(str, H5_TOOLS_DATATYPE);
break;
+ case H5O_TYPE_MAP:
+ h5tools_str_append(str, H5_TOOLS_MAP);
+ break;
+
case H5O_TYPE_UNKNOWN:
case H5O_TYPE_NTYPES:
default: