summaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
authorNeil Fortner <nfortne2@hdfgroup.org>2015-04-10 23:00:48 (GMT)
committerNeil Fortner <nfortne2@hdfgroup.org>2015-04-10 23:00:48 (GMT)
commit6a27582a4488d4d18a0b6cd584b8950ab02b81a7 (patch)
tree01615f7679a3d3ef4b2d9caa487874aa421df506 /test
parent7a4b02567e4de16d7478c10ffa07562b0daf320b (diff)
parent3875cc6960266775e8078c782c8864516865be67 (diff)
downloadhdf5-6a27582a4488d4d18a0b6cd584b8950ab02b81a7.zip
hdf5-6a27582a4488d4d18a0b6cd584b8950ab02b81a7.tar.gz
hdf5-6a27582a4488d4d18a0b6cd584b8950ab02b81a7.tar.bz2
[svn-r26781] Merge revisions 26305 through 26780 from trunk to vds branch.
Tested: ummon
Diffstat (limited to 'test')
-rw-r--r--test/CMakeLists.txt28
-rw-r--r--test/CMakeTests.cmake4
-rw-r--r--test/Makefile.in10
-rw-r--r--test/cache.c7
-rw-r--r--test/cache_api.c8
-rw-r--r--test/cache_common.c4
-rw-r--r--test/cmpd_dset.c16
-rw-r--r--test/cross_read.c98
-rw-r--r--test/dsets.c464
-rw-r--r--test/dt_arith.c307
-rw-r--r--test/dtransform.c25
-rw-r--r--test/fheap.c8
-rw-r--r--test/fillval.c24
-rw-r--r--test/flush2.c18
-rw-r--r--test/gen_cross.c84
-rw-r--r--test/gen_filespace.c3
-rw-r--r--test/gen_filters.c4
-rw-r--r--test/gen_plist.c4
-rw-r--r--test/getname.c59
-rw-r--r--test/hyperslab.c6
-rw-r--r--test/links.c2
-rw-r--r--test/mtime.c2
-rw-r--r--test/objcopy.c20
-rw-r--r--test/plugin.c94
-rw-r--r--test/set_extent.c4
-rw-r--r--test/tarray.c28
-rw-r--r--test/tattr.c22
-rw-r--r--test/testhdf5.h12
-rw-r--r--test/tfile.c4
-rw-r--r--test/tgenprop.c10
-rw-r--r--test/th5s.c38
-rw-r--r--test/tmisc.c104
-rw-r--r--test/trefer.c75
-rw-r--r--test/tsohm.c4
-rw-r--r--test/ttsafe_cancel.c7
-rw-r--r--test/tunicode.c4
-rw-r--r--test/tvltypes.c18
37 files changed, 598 insertions, 1031 deletions
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 5e23219..950b7cf 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -27,7 +27,7 @@ set (TEST_LIB_HEADERS
)
add_library (${HDF5_TEST_LIB_TARGET} ${LIB_TYPE} ${TEST_LIB_SRCS} ${TEST_LIB_HEADERS})
-TARGET_C_PROPERTIES (${HDF5_TEST_LIB_TARGET} " " " ")
+TARGET_C_PROPERTIES (${HDF5_TEST_LIB_TARGET} ${LIB_TYPE} " " " ")
if (MSVC)
target_link_libraries (${HDF5_TEST_LIB_TARGET} "ws2_32.lib")
endif (MSVC)
@@ -68,7 +68,7 @@ if (BUILD_SHARED_LIBS)
INCLUDE_DIRECTORIES (${HDF5_SRC_DIR})
add_library (${HDF5_TEST_PLUGIN_LIB_TARGET} ${LIB_TYPE} ${HDF5_TEST_SOURCE_DIR}/${test_lib}.c)
- TARGET_C_PROPERTIES (${HDF5_TEST_PLUGIN_LIB_TARGET} " " " ")
+ TARGET_C_PROPERTIES (${HDF5_TEST_PLUGIN_LIB_TARGET} ${LIB_TYPE} " " " ")
target_link_libraries (${HDF5_TEST_PLUGIN_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
H5_SET_LIB_OPTIONS (
${HDF5_TEST_PLUGIN_LIB_TARGET} ${HDF5_TEST_PLUGIN_LIB_NAME}
@@ -99,7 +99,7 @@ if (BUILD_SHARED_LIBS)
INCLUDE_DIRECTORIES (${HDF5_SRC_DIR})
add_library (${HDF5_TEST_PLUGIN_LIB_TARGET} ${LIB_TYPE} ${HDF5_TEST_SOURCE_DIR}/${test_lib}.c)
- TARGET_C_PROPERTIES (${HDF5_TEST_PLUGIN_LIB_TARGET} " " " ")
+ TARGET_C_PROPERTIES (${HDF5_TEST_PLUGIN_LIB_TARGET} ${LIB_TYPE} " " " ")
target_link_libraries (${HDF5_TEST_PLUGIN_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
H5_SET_LIB_OPTIONS (
${HDF5_TEST_PLUGIN_LIB_TARGET} ${HDF5_TEST_PLUGIN_LIB_NAME}
@@ -154,14 +154,14 @@ set (testhdf5_SRCS
#-- Adding test for testhdf5
add_executable (testhdf5 ${testhdf5_SRCS})
TARGET_NAMING (testhdf5 ${LIB_TYPE})
-TARGET_C_PROPERTIES (testhdf5 " " " ")
+TARGET_C_PROPERTIES (testhdf5 ${LIB_TYPE} " " " ")
target_link_libraries (testhdf5 ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET})
set_target_properties (testhdf5 PROPERTIES FOLDER test)
MACRO (ADD_H5_EXE file)
add_executable (${file} ${HDF5_TEST_SOURCE_DIR}/${file}.c)
TARGET_NAMING (${file} ${LIB_TYPE})
- TARGET_C_PROPERTIES (${file} " " " ")
+ TARGET_C_PROPERTIES (${file} ${LIB_TYPE} " " " ")
target_link_libraries (${file} ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET})
set_target_properties (${file} PROPERTIES FOLDER test)
ENDMACRO (ADD_H5_EXE file)
@@ -238,21 +238,21 @@ endforeach (test ${H5_TESTS})
#-- Adding test for cache
add_executable (cache ${HDF5_TEST_SOURCE_DIR}/cache.c ${HDF5_TEST_SOURCE_DIR}/cache_common.c)
TARGET_NAMING (cache ${LIB_TYPE})
-TARGET_C_PROPERTIES (cache " " " ")
+TARGET_C_PROPERTIES (cache ${LIB_TYPE} " " " ")
target_link_libraries (cache ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
set_target_properties (cache PROPERTIES FOLDER test)
#-- Adding test for cache_api
add_executable (cache_api ${HDF5_TEST_SOURCE_DIR}/cache_api.c ${HDF5_TEST_SOURCE_DIR}/cache_common.c)
TARGET_NAMING (cache_api ${LIB_TYPE})
-TARGET_C_PROPERTIES (cache_api " " " ")
+TARGET_C_PROPERTIES (cache_api ${LIB_TYPE} " " " ")
target_link_libraries (cache_api ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
set_target_properties (cache_api PROPERTIES FOLDER test)
#-- Adding test for cache_tagging
add_executable (cache_tagging ${HDF5_TEST_SOURCE_DIR}/cache_tagging.c ${HDF5_TEST_SOURCE_DIR}/cache_common.c)
TARGET_NAMING (cache_tagging ${LIB_TYPE})
-TARGET_C_PROPERTIES (cache_tagging " " " ")
+TARGET_C_PROPERTIES (cache_tagging ${LIB_TYPE} " " " ")
target_link_libraries (cache_tagging ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
set_target_properties (cache_tagging PROPERTIES FOLDER test)
@@ -265,7 +265,7 @@ add_executable (ttsafe
${HDF5_TEST_SOURCE_DIR}/ttsafe_acreate.c
)
TARGET_NAMING (ttsafe ${LIB_TYPE})
-TARGET_C_PROPERTIES (ttsafe " " " ")
+TARGET_C_PROPERTIES (ttsafe ${LIB_TYPE} " " " ")
target_link_libraries (ttsafe ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
set_target_properties (ttsafe PROPERTIES FOLDER test)
@@ -273,7 +273,7 @@ set_target_properties (ttsafe PROPERTIES FOLDER test)
if (HDF5_ENABLE_DEPRECATED_SYMBOLS)
add_executable (err_compat ${HDF5_TEST_SOURCE_DIR}/err_compat.c)
TARGET_NAMING (err_compat ${LIB_TYPE})
- TARGET_C_PROPERTIES (err_compat " " " ")
+ TARGET_C_PROPERTIES (err_compat ${LIB_TYPE} " " " ")
target_link_libraries (err_compat ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
set_target_properties (err_compat PROPERTIES FOLDER test)
endif (HDF5_ENABLE_DEPRECATED_SYMBOLS)
@@ -281,14 +281,14 @@ endif (HDF5_ENABLE_DEPRECATED_SYMBOLS)
#-- Adding test for error_test
add_executable (error_test ${HDF5_TEST_SOURCE_DIR}/error_test.c)
TARGET_NAMING (error_test ${LIB_TYPE})
-TARGET_C_PROPERTIES (error_test " " " ")
+TARGET_C_PROPERTIES (error_test ${LIB_TYPE} " " " ")
target_link_libraries (error_test ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
set_target_properties (error_test PROPERTIES FOLDER test)
#-- Adding test for links_env
add_executable (links_env ${HDF5_TEST_SOURCE_DIR}/links_env.c)
TARGET_NAMING (links_env ${LIB_TYPE})
-TARGET_C_PROPERTIES (links_env " " " ")
+TARGET_C_PROPERTIES (links_env ${LIB_TYPE} " " " ")
target_link_libraries (links_env ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
set_target_properties (links_env PROPERTIES FOLDER test)
@@ -309,8 +309,8 @@ endif (\${TEST_RESULT} STREQUAL \"0\")
##############################################################################
if (BUILD_SHARED_LIBS)
add_executable (plugin ${HDF5_TEST_SOURCE_DIR}/plugin.c)
- TARGET_NAMING (plugin ${LIB_TYPE})
- TARGET_C_PROPERTIES (plugin " " " ")
+ TARGET_NAMING (plugin SHARED)
+ TARGET_C_PROPERTIES (plugin SHARED " " " ")
target_link_libraries (plugin ${HDF5_TEST_PLUGIN_LIB_TARGET})
set_target_properties (plugin PROPERTIES FOLDER test)
endif (BUILD_SHARED_LIBS)
diff --git a/test/CMakeTests.cmake b/test/CMakeTests.cmake
index c01ef61..c630501 100644
--- a/test/CMakeTests.cmake
+++ b/test/CMakeTests.cmake
@@ -728,8 +728,8 @@ endif (HDF5_TEST_VFD)
if (HDF5_BUILD_GENERATORS AND NOT BUILD_SHARED_LIBS)
MACRO (ADD_H5_GENERATOR genfile)
add_executable (${genfile} ${HDF5_TEST_SOURCE_DIR}/${genfile}.c)
- TARGET_NAMING (${genfile} ${LIB_TYPE})
- TARGET_C_PROPERTIES (${genfile} " " " ")
+ TARGET_NAMING (${genfile} STATIC)
+ TARGET_C_PROPERTIES (${genfile} STATIC " " " ")
target_link_libraries (${genfile} ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET})
set_target_properties (${genfile} PROPERTIES FOLDER generator/test)
ENDMACRO (ADD_H5_GENERATOR genfile)
diff --git a/test/Makefile.in b/test/Makefile.in
index 0ca6ff8..3204bd3 100644
--- a/test/Makefile.in
+++ b/test/Makefile.in
@@ -816,7 +816,6 @@ AM_FCFLAGS = @AM_FCFLAGS@ @H5_FCFLAGS@
AM_LDFLAGS = @AM_LDFLAGS@ @H5_LDFLAGS@
AM_MAKEFLAGS = @AM_MAKEFLAGS@
AR = @AR@
-AS = @AS@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
@@ -848,7 +847,6 @@ DIRECT_VFD = @DIRECT_VFD@
DLLTOOL = @DLLTOOL@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
-DYNAMIC_DIRS = @DYNAMIC_DIRS@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
@@ -867,17 +865,14 @@ FCFLAGS_f90 = @FCFLAGS_f90@
FCLIBS = @FCLIBS@
FC_VERSION = @FC_VERSION@
FGREP = @FGREP@
-FILTERS = @FILTERS@
FSEARCH_DIRS = @FSEARCH_DIRS@
GREP = @GREP@
H5_CFLAGS = @H5_CFLAGS@
H5_CPPFLAGS = @H5_CPPFLAGS@
H5_CXXFLAGS = @H5_CXXFLAGS@
-H5_CXX_SHARED = @H5_CXX_SHARED@
H5_FCFLAGS = @H5_FCFLAGS@
H5_FORTRAN_SHARED = @H5_FORTRAN_SHARED@
H5_LDFLAGS = @H5_LDFLAGS@
-H5_LONE_COLON = @H5_LONE_COLON@
H5_VERSION = @H5_VERSION@
HADDR_T = @HADDR_T@
HAVE_DMALLOC = @HAVE_DMALLOC@
@@ -940,7 +935,6 @@ R_INTEGER = @R_INTEGER@
R_LARGE = @R_LARGE@
SEARCH = @SEARCH@
SED = @SED@
-SETX = @SETX@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
SIZE_T = @SIZE_T@
@@ -955,10 +949,6 @@ TR = @TR@
TRACE_API = @TRACE_API@
UNAME_INFO = @UNAME_INFO@
USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@
-USE_FILTER_FLETCHER32 = @USE_FILTER_FLETCHER32@
-USE_FILTER_NBIT = @USE_FILTER_NBIT@
-USE_FILTER_SCALEOFFSET = @USE_FILTER_SCALEOFFSET@
-USE_FILTER_SHUFFLE = @USE_FILTER_SHUFFLE@
USE_FILTER_SZIP = @USE_FILTER_SZIP@
USINGMEMCHECKER = @USINGMEMCHECKER@
VERSION = @VERSION@
diff --git a/test/cache.c b/test/cache.c
index 5ad1314..8725b31 100644
--- a/test/cache.c
+++ b/test/cache.c
@@ -20340,7 +20340,10 @@ check_auto_cache_resize(void)
auto_size_ctl.decr_mode = H5C_decr__age_out_with_threshold;
- auto_size_ctl.upper_hr_threshold = 0.999; /* for ease of testing */
+ /* NOTE: upper_hr_threshold MUST be type double (not float)
+ * or the cache test will fail on 64-bit systems.
+ */
+ auto_size_ctl.upper_hr_threshold = H5_DOUBLE(0.999); /* for ease of testing */
auto_size_ctl.decrement = 0.5f;
@@ -28353,7 +28356,7 @@ check_auto_cache_resize_aux_fcns(void)
pass = FALSE;
failure_mssg = "H5C_get_cache_hit_rate failed.\n";
- } else if ( ! DBL_REL_EQUAL(hit_rate, 0.5, FP_EPSILON) ) { /* i.e. hit_rate != 0.5 */
+ } else if ( ! DBL_REL_EQUAL(hit_rate, 0.5F, FP_EPSILON) ) { /* i.e. hit_rate != 0.5 */
pass = FALSE;
failure_mssg =
diff --git a/test/cache_api.c b/test/cache_api.c
index b1ccef1..8f556be 100644
--- a/test/cache_api.c
+++ b/test/cache_api.c
@@ -2173,7 +2173,7 @@ H5AC_cache_config_t invalid_configs[NUM_INVALID_CONFIGS] =
/* long int epoch_length = */ 50000,
/* enum H5C_cache_incr_mode incr_mode = */ H5C_incr__threshold,
/* double lower_hr_threshold = */ 0.9f,
- /* double increment = */ 0.999999999999,
+ /* double increment = */ H5_DOUBLE(0.999999999999),
/* hbool_t apply_max_increment = */ TRUE,
/* size_t max_increment = */ (4 * 1024 * 1024),
/* enum H5C_cache_flash_incr_mode */
@@ -2532,7 +2532,7 @@ H5AC_cache_config_t invalid_configs[NUM_INVALID_CONFIGS] =
/* double flash_threshold = */ 0.5f,
/* enum H5C_cache_decr_mode decr_mode = */ H5C_decr__threshold,
/* double upper_hr_threshold = */ 0.999f,
- /* double decrement = */ 1.0000000001,
+ /* double decrement = */ H5_DOUBLE(1.0000000001),
/* hbool_t apply_max_decrement = */ TRUE,
/* size_t max_decrement = */ (1 * 1024 * 1024),
/* int epochs_before_eviction = */ 3,
@@ -2712,7 +2712,7 @@ H5AC_cache_config_t invalid_configs[NUM_INVALID_CONFIGS] =
/* size_t max_decrement = */ (1 * 1024 * 1024),
/* int epochs_before_eviction = */ 3,
/* hbool_t apply_empty_reserve = */ TRUE,
- /* double empty_reserve = */ 1.00000000001,
+ /* double empty_reserve = */ H5_DOUBLE(1.00000000001),
/* int dirty_bytes_threshold = */ (256 * 1024),
/* int metadata_write_strategy = */
H5AC__DEFAULT_METADATA_WRITE_STRATEGY
@@ -2776,7 +2776,7 @@ H5AC_cache_config_t invalid_configs[NUM_INVALID_CONFIGS] =
/* double flash_multiple = */ 2.0f,
/* double flash_threshold = */ 0.5f,
/* enum H5C_cache_decr_mode decr_mode = */ H5C_decr__age_out_with_threshold,
- /* double upper_hr_threshold = */ 1.00000001,
+ /* double upper_hr_threshold = */ H5_DOUBLE(1.00000001),
/* double decrement = */ 0.9f,
/* hbool_t apply_max_decrement = */ TRUE,
/* size_t max_decrement = */ (1 * 1024 * 1024),
diff --git a/test/cache_common.c b/test/cache_common.c
index 7b26714..0231a11 100644
--- a/test/cache_common.c
+++ b/test/cache_common.c
@@ -5161,7 +5161,7 @@ check_and_validate_cache_hit_rate(hid_t file_id,
} else {
- expected_hit_rate = 0.0;
+ expected_hit_rate = 0.0F;
}
result = H5Fget_mdc_hit_rate(file_id, &hit_rate);
@@ -5171,7 +5171,7 @@ check_and_validate_cache_hit_rate(hid_t file_id,
pass = FALSE;
failure_mssg = "H5Fget_mdc_hit_rate() failed.";
- } else if ( ! DBL_REL_EQUAL(hit_rate, expected_hit_rate, 0.00001) ) {
+ } else if ( ! DBL_REL_EQUAL(hit_rate, expected_hit_rate, 0.00001F) ) {
pass = FALSE;
failure_mssg = "unexpected hit rate.";
diff --git a/test/cmpd_dset.c b/test/cmpd_dset.c
index a782fb0..d81b7fc 100644
--- a/test/cmpd_dset.c
+++ b/test/cmpd_dset.c
@@ -1301,17 +1301,17 @@ compare_data(void *src_data, void *dst_data, hbool_t src_subset)
printf(" src={a=%d, b=%d, c=[%d,%d,%d,%d,%d,%d,%d,%d], d=%d, e=%d, f=%f, g=%f, h=[%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f], i=%f, j=%f, k=%f, l=%f, m=%f, n=%f}\n",
s_ptr->a, s_ptr->b, s_ptr->c[0], s_ptr->c[1], s_ptr->c[2],
s_ptr->c[3], s_ptr->c[4], s_ptr->c[5], s_ptr->c[6], s_ptr->c[7],
- s_ptr->d, s_ptr->e, s_ptr->f, s_ptr->g,s_ptr->h[0],s_ptr->h[1],s_ptr->h[2],
- s_ptr->h[3],s_ptr->h[4],s_ptr->h[5],s_ptr->h[6],s_ptr->h[7],s_ptr->h[8],
- s_ptr->h[9],s_ptr->h[10],s_ptr->h[11],s_ptr->h[12],s_ptr->h[13],s_ptr->h[14],
- s_ptr->h[15], s_ptr->i,s_ptr->j,s_ptr->k,s_ptr->l,s_ptr->m,s_ptr->n);
+ s_ptr->d, s_ptr->e, (double)s_ptr->f, (double)s_ptr->g,(double)s_ptr->h[0],(double)s_ptr->h[1],(double)s_ptr->h[2],
+ (double)s_ptr->h[3],(double)s_ptr->h[4],(double)s_ptr->h[5],(double)s_ptr->h[6],(double)s_ptr->h[7],(double)s_ptr->h[8],
+ (double)s_ptr->h[9],(double)s_ptr->h[10],(double)s_ptr->h[11],(double)s_ptr->h[12],(double)s_ptr->h[13],(double)s_ptr->h[14],
+ (double)s_ptr->h[15], (double)s_ptr->i,(double)s_ptr->j,s_ptr->k,s_ptr->l,s_ptr->m,s_ptr->n);
printf(" dst={a=%d, b=%d, c=[%d,%d,%d,%d,%d,%d,%d,%d], d=%d, e=%d, f=%f, g=%f, h=[%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f], i=%f, j=%f, k=%f, l=%f, m=%f, n=%f}\n",
d_ptr->a, d_ptr->b, d_ptr->c[0], d_ptr->c[1], d_ptr->c[2],
d_ptr->c[3], d_ptr->c[4], d_ptr->c[5], d_ptr->c[6], d_ptr->c[7],
- d_ptr->d, d_ptr->e, d_ptr->f, d_ptr->g,d_ptr->h[0],d_ptr->h[1],d_ptr->h[2],
- d_ptr->h[3],d_ptr->h[4],d_ptr->h[5],d_ptr->h[6],d_ptr->h[7],d_ptr->h[8],
- d_ptr->h[9],d_ptr->h[10],d_ptr->h[11],d_ptr->h[12],d_ptr->h[13],
- d_ptr->h[14], d_ptr->h[15], d_ptr->i,d_ptr->j,d_ptr->k,d_ptr->l,
+ d_ptr->d, d_ptr->e, (double)d_ptr->f, (double)d_ptr->g,(double)d_ptr->h[0],(double)d_ptr->h[1],(double)d_ptr->h[2],
+ (double)d_ptr->h[3],(double)d_ptr->h[4],(double)d_ptr->h[5],(double)d_ptr->h[6],(double)d_ptr->h[7],(double)d_ptr->h[8],
+ (double)d_ptr->h[9],(double)d_ptr->h[10],(double)d_ptr->h[11],(double)d_ptr->h[12],(double)d_ptr->h[13],
+ (double)d_ptr->h[14],(double)d_ptr->h[15],(double)d_ptr->i,(double)d_ptr->j,d_ptr->k,d_ptr->l,
d_ptr->m,d_ptr->n);
goto error;
}
diff --git a/test/cross_read.c b/test/cross_read.c
index ba99c9e..35dd2ce 100644
--- a/test/cross_read.c
+++ b/test/cross_read.c
@@ -100,15 +100,15 @@ static int check_data(const char *dsetname, hid_t fid, hbool_t floating_number)
for (j = 0; j < NX; j++) {
for (i = 0; i < NY; i++) {
data_in[j][i] = ((double)(i + j + 1))/3;
- data_out[j][i] = 0;
+ data_out[j][i] = 0.0F;
int_data_in[j][i] = i + j;
int_data_out[j][i] = 0;
}
}
for (i = 0; i < NY; i++) {
- data_in[NX][i] = -2.2;
- data_out[NX][i] = 0;
+ data_in[NX][i] = -2.2F;
+ data_out[NX][i] = 0.0F;
int_data_in[NX][i] = -2;
int_data_out[NX][i] = 0;
@@ -126,7 +126,7 @@ static int check_data(const char *dsetname, hid_t fid, hbool_t floating_number)
/* Check results */
for (j=0; j<(NX+1); j++) {
for (i=0; i<NY; i++) {
- if (!DBL_REL_EQUAL(data_out[j][i], data_in[j][i], 0.001)) {
+ if (!DBL_REL_EQUAL(data_out[j][i], data_in[j][i], 0.001F)) {
if (!nerrors++) {
H5_FAILED();
printf("element [%d][%d] is %g but should have been %g\n",
@@ -213,116 +213,46 @@ static int open_dataset(char *fname)
nerrors += check_data(DATASETNAME1, file, TRUE);
TESTING("dataset of LE FLOAT with scale-offset filter");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
nerrors += check_data(DATASETNAME2, file, TRUE);
-#else /*H5_HAVE_FILTER_SCALEOFFSET*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
TESTING("dataset of BE FLOAT with scale-offset filter");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
nerrors += check_data(DATASETNAME3, file, TRUE);
-#else /*H5_HAVE_FILTER_SCALEOFFSET*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
TESTING("dataset of LE DOUBLE with scale-offset filter");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
nerrors += check_data(DATASETNAME4, file, TRUE);
-#else /*H5_HAVE_FILTER_SCALEOFFSET*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
TESTING("dataset of BE DOUBLE with scale-offset filter");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
nerrors += check_data(DATASETNAME5, file, TRUE);
-#else /*H5_HAVE_FILTER_SCALEOFFSET*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
TESTING("dataset of LE CHAR with scale-offset filter");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
nerrors += check_data(DATASETNAME6, file, FALSE);
-#else /*H5_HAVE_FILTER_SCALEOFFSET*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
TESTING("dataset of BE CHAR with scale-offset filter");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
nerrors += check_data(DATASETNAME7, file, FALSE);
-#else /*H5_HAVE_FILTER_SCALEOFFSET*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
TESTING("dataset of LE SHORT with scale-offset filter");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
nerrors += check_data(DATASETNAME8, file, FALSE);
-#else /*H5_HAVE_FILTER_SCALEOFFSET*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
TESTING("dataset of BE SHORT with scale-offset filter");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
nerrors += check_data(DATASETNAME9, file, FALSE);
-#else /*H5_HAVE_FILTER_SCALEOFFSET*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
TESTING("dataset of LE INT with scale-offset filter");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
nerrors += check_data(DATASETNAME10, file, FALSE);
-#else /*H5_HAVE_FILTER_SCALEOFFSET*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
TESTING("dataset of BE INT with scale-offset filter");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
nerrors += check_data(DATASETNAME11, file, FALSE);
-#else /*H5_HAVE_FILTER_SCALEOFFSET*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
TESTING("dataset of LE LONG LONG with scale-offset filter");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
nerrors += check_data(DATASETNAME12, file, FALSE);
-#else /*H5_HAVE_FILTER_SCALEOFFSET*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
TESTING("dataset of BE LONG LONG with scale-offset filter");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
nerrors += check_data(DATASETNAME13, file, FALSE);
-#else /*H5_HAVE_FILTER_SCALEOFFSET*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
TESTING("dataset of LE FLOAT with Fletcher32 filter");
-#ifdef H5_HAVE_FILTER_FLETCHER32
nerrors += check_data(DATASETNAME14, file, TRUE);
-#else /*H5_HAVE_FILTER_FLETCHER32*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_FLETCHER32*/
TESTING("dataset of BE FLOAT with Fletcher32 filter");
-#ifdef H5_HAVE_FILTER_FLETCHER32
nerrors += check_data(DATASETNAME15, file, TRUE);
-#else /*H5_HAVE_FILTER_FLETCHER32*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_FLETCHER32*/
TESTING("dataset of LE FLOAT with Deflate filter");
#ifdef H5_HAVE_FILTER_DEFLATE
@@ -357,36 +287,16 @@ static int open_dataset(char *fname)
#endif /*H5_HAVE_FILTER_SZIP*/
TESTING("dataset of LE FLOAT with Shuffle filter");
-#ifdef H5_HAVE_FILTER_SHUFFLE
nerrors += check_data(DATASETNAME20, file, TRUE);
-#else /*H5_HAVE_FILTER_SHUFFLE*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_SHUFFLE*/
TESTING("dataset of BE FLOAT with Shuffle filter");
-#ifdef H5_HAVE_FILTER_SHUFFLE
nerrors += check_data(DATASETNAME21, file, TRUE);
-#else /*H5_HAVE_FILTER_SHUFFLE*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_SHUFFLE*/
TESTING("dataset of LE FLOAT with Nbit filter");
-#ifdef H5_HAVE_FILTER_NBIT
nerrors += check_data(DATASETNAME22, file, TRUE);
-#else /*H5_HAVE_FILTER_NBIT*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_NBIT*/
TESTING("dataset of BE FLOAT with Nbit filter");
-#ifdef H5_HAVE_FILTER_NBIT
nerrors += check_data(DATASETNAME23, file, TRUE);
-#else /*H5_HAVE_FILTER_NBIT*/
- SKIPPED();
- puts(not_supported);
-#endif /*H5_HAVE_FILTER_NBIT*/
if(H5Fclose(file))
TEST_ERROR
diff --git a/test/dsets.c b/test/dsets.c
index 3055943..e9dd2ce 100644
--- a/test/dsets.c
+++ b/test/dsets.c
@@ -23,19 +23,19 @@
#include <stdlib.h>
#include <time.h>
+/*
+ * This file needs to access private information from the H5Z package.
+ */
+#define H5Z_PACKAGE
+
+
#include "h5test.h"
#include "H5srcdir.h"
+#include "H5Zpkg.h"
#ifdef H5_HAVE_SZLIB_H
# include "szlib.h"
#endif
-/*
- * This file needs to access private datatypes from the H5Z package.
- */
-#define H5Z_PACKAGE
-#include "H5Zpkg.h"
-
-
const char *FILENAME[] = {
"dataset",
"compact_dataset",
@@ -70,19 +70,17 @@ const char *FILENAME[] = {
#define DSET_CONV_BUF_NAME "conv_buf"
#define DSET_TCONV_NAME "tconv"
#define DSET_DEFLATE_NAME "deflate"
-#ifdef H5_HAVE_FILTER_SZIP
-#define DSET_SZIP_NAME "szip"
-#endif /* H5_HAVE_FILTER_SZIP */
#define DSET_SHUFFLE_NAME "shuffle"
#define DSET_FLETCHER32_NAME "fletcher32"
#define DSET_FLETCHER32_NAME_2 "fletcher32_2"
#define DSET_FLETCHER32_NAME_3 "fletcher32_3"
#define DSET_SHUF_DEF_FLET_NAME "shuffle+deflate+fletcher32"
#define DSET_SHUF_DEF_FLET_NAME_2 "shuffle+deflate+fletcher32_2"
-#if defined H5_HAVE_FILTER_SZIP && defined H5_HAVE_FILTER_SHUFFLE && defined H5_HAVE_FILTER_FLETCHER32
+#ifdef H5_HAVE_FILTER_SZIP
+#define DSET_SZIP_NAME "szip"
#define DSET_SHUF_SZIP_FLET_NAME "shuffle+szip+fletcher32"
#define DSET_SHUF_SZIP_FLET_NAME_2 "shuffle+szip+fletcher32_2"
-#endif /* defined H5_HAVE_FILTER_SZIP && defined H5_HAVE_FILTER_SHUFFLE && defined H5_HAVE_FILTER_FLETCHER32 */
+#endif /* H5_HAVE_FILTER_SZIP */
#define DSET_BOGUS_NAME "bogus"
#define DSET_MISSING_NAME "missing"
@@ -2006,21 +2004,17 @@ test_get_filter_info(void)
/* Verify that each filter is reported as having the right combination
* of encoder and decoder.
*/
-#ifdef H5_HAVE_FILTER_FLETCHER32
if(H5Zget_filter_info(H5Z_FILTER_FLETCHER32, &flags) < 0) TEST_ERROR
if(((flags & H5Z_FILTER_CONFIG_ENCODE_ENABLED) == 0) ||
((flags & H5Z_FILTER_CONFIG_DECODE_ENABLED) == 0))
TEST_ERROR
-#endif
-#ifdef H5_HAVE_FILTER_SHUFFLE
if(H5Zget_filter_info(H5Z_FILTER_SHUFFLE, &flags) < 0) TEST_ERROR
if(((flags & H5Z_FILTER_CONFIG_ENCODE_ENABLED) == 0) ||
((flags & H5Z_FILTER_CONFIG_DECODE_ENABLED) == 0))
TEST_ERROR
-#endif
#ifdef H5_HAVE_FILTER_DEFLATE
if(H5Zget_filter_info(H5Z_FILTER_DEFLATE, &flags) < 0) TEST_ERROR
@@ -2083,10 +2077,8 @@ UNUSED
const hsize_t chunk_size[2] = {FILTER_CHUNK_DIM1, FILTER_CHUNK_DIM2}; /* Chunk dimensions */
hsize_t null_size; /* Size of dataset with null filter */
-#ifdef H5_HAVE_FILTER_FLETCHER32
hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */
unsigned data_corrupt[3]; /* position and length of data to be corrupted */
-#endif /* H5_HAVE_FILTER_FLETCHER32 */
#ifdef H5_HAVE_FILTER_DEFLATE
hsize_t deflate_size; /* Size of dataset with deflate filter */
@@ -2098,13 +2090,11 @@ UNUSED
unsigned szip_pixels_per_block=4;
#endif /* H5_HAVE_FILTER_SZIP */
-#ifdef H5_HAVE_FILTER_SHUFFLE
hsize_t shuffle_size; /* Size of dataset with shuffle filter */
-#endif /* H5_HAVE_FILTER_SHUFFLE */
-#if(defined H5_HAVE_FILTER_DEFLATE | defined H5_HAVE_FILTER_SZIP) && defined H5_HAVE_FILTER_SHUFFLE && defined H5_HAVE_FILTER_FLETCHER32
- hsize_t combo_size; /* Size of dataset with shuffle+deflate filter */
-#endif /* H5_HAVE_FILTER_DEFLATE && H5_HAVE_FILTER_SHUFFLE && H5_HAVE_FILTER_FLETCHER32 */
+#if(defined H5_HAVE_FILTER_DEFLATE | defined H5_HAVE_FILTER_SZIP)
+ hsize_t combo_size; /* Size of dataset with multiple filters */
+#endif /* defined H5_HAVE_FILTER_DEFLATE | defined H5_HAVE_FILTER_SZIP */
/* test the H5Zget_filter_info function */
if(test_get_filter_info() < 0) goto error;
@@ -2128,7 +2118,6 @@ UNUSED
* STEP 1: Test Fletcher32 Checksum by itself.
*----------------------------------------------------------
*/
-#ifdef H5_HAVE_FILTER_FLETCHER32
puts("Testing Fletcher32 checksum(enabled for read)");
if((dc = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error;
if(H5Pset_chunk (dc, 2, chunk_size) < 0) goto error;
@@ -2169,11 +2158,6 @@ UNUSED
/* Clean up objects used for this test */
if(H5Pclose (dc) < 0) goto error;
-#else /* H5_HAVE_FILTER_FLETCHER32 */
- TESTING("fletcher32 checksum");
- SKIPPED();
- puts(" Fletcher32 checksum not enabled");
-#endif /* H5_HAVE_FILTER_FLETCHER32 */
/*----------------------------------------------------------
* STEP 2: Test deflation by itself.
@@ -2231,7 +2215,6 @@ UNUSED
* STEP 4: Test shuffling by itself.
*----------------------------------------------------------
*/
-#ifdef H5_HAVE_FILTER_SHUFFLE
puts("Testing shuffle filter");
if((dc = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error;
if(H5Pset_chunk (dc, 2, chunk_size) < 0) goto error;
@@ -2246,17 +2229,12 @@ UNUSED
/* Clean up objects used for this test */
if(H5Pclose (dc) < 0) goto error;
-#else /* H5_HAVE_FILTER_SHUFFLE */
- TESTING("shuffle filter");
- SKIPPED();
- puts(" Shuffle filter not enabled");
-#endif /* H5_HAVE_FILTER_SHUFFLE */
/*----------------------------------------------------------
* STEP 5: Test shuffle + deflate + checksum in any order.
*----------------------------------------------------------
*/
-#if defined H5_HAVE_FILTER_DEFLATE && defined H5_HAVE_FILTER_SHUFFLE && defined H5_HAVE_FILTER_FLETCHER32
+#ifdef H5_HAVE_FILTER_DEFLATE
puts("Testing shuffle+deflate+checksum filters(checksum first)");
if((dc = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error;
if(H5Pset_chunk (dc, 2, chunk_size) < 0) goto error;
@@ -2280,17 +2258,17 @@ UNUSED
/* Clean up objects used for this test */
if(H5Pclose (dc) < 0) goto error;
-#else /* H5_HAVE_FILTER_DEFLATE && H5_HAVE_FILTER_SHUFFLE && H5_HAVE_FILTER_FLETCHER32 */
+#else /* H5_HAVE_FILTER_DEFLATE */
TESTING("shuffle+deflate+fletcher32 filters");
SKIPPED();
- puts(" Deflate, shuffle, or fletcher32 checksum filter not enabled");
-#endif /* H5_HAVE_FILTER_DEFLATE && H5_HAVE_FILTER_SHUFFLE && H5_HAVE_FILTER_FLETCHER32 */
+ puts(" Deflate filter not enabled");
+#endif /* H5_HAVE_FILTER_DEFLATE */
/*----------------------------------------------------------
* STEP 6: Test shuffle + szip + checksum in any order.
*----------------------------------------------------------
*/
-#if defined H5_HAVE_FILTER_SZIP && defined H5_HAVE_FILTER_SHUFFLE && defined H5_HAVE_FILTER_FLETCHER32
+#ifdef H5_HAVE_FILTER_SZIP
TESTING("shuffle+szip+checksum filters(checksum first, with encoder)");
if((dc = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error;
@@ -2339,11 +2317,11 @@ UNUSED
SKIPPED();
}
-#else /* H5_HAVE_FILTER_SZIP && H5_HAVE_FILTER_SHUFFLE && H5_HAVE_FILTER_FLETCHER32 */
+#else /* H5_HAVE_FILTER_SZIP */
TESTING("shuffle+szip+fletcher32 filters");
SKIPPED();
- puts(" Szip, shuffle, or fletcher32 checksum filter not enabled");
-#endif /* H5_HAVE_FILTER_SZIP && H5_HAVE_FILTER_SHUFFLE && H5_HAVE_FILTER_FLETCHER32 */
+ puts(" szip filter not enabled");
+#endif /* H5_HAVE_FILTER_SZIP */
return 0;
error:
@@ -2610,20 +2588,15 @@ error:
static herr_t
test_onebyte_shuffle(hid_t file)
{
-#ifdef H5_HAVE_FILTER_SHUFFLE
hid_t dataset, space,dc;
const hsize_t size[2] = {10, 20};
const hsize_t chunk_size[2] = {10, 20};
unsigned char orig_data[10][20];
unsigned char new_data[10][20];
size_t i, j;
-#else /* H5_HAVE_FILTER_SHUFFLE */
- const char *not_supported= " Data shuffling is not enabled.";
-#endif /* H5_HAVE_FILTER_SHUFFLE */
TESTING("8-bit shuffling (setup)");
-#ifdef H5_HAVE_FILTER_SHUFFLE
/* Create the data space */
if((space = H5Screate_simple(2, size, NULL)) < 0) goto error;
@@ -2641,10 +2614,6 @@ test_onebyte_shuffle(hid_t file)
orig_data[i][j] = (unsigned char)HDrandom();
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 1: Test shuffling by setting up a chunked dataset and writing
@@ -2653,16 +2622,11 @@ test_onebyte_shuffle(hid_t file)
*/
TESTING("8-bit shuffling (write)");
-#ifdef H5_HAVE_FILTER_SHUFFLE
if(H5Dwrite(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT,
orig_data) < 0)
goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 2: Try to read the data we just wrote.
@@ -2670,7 +2634,6 @@ test_onebyte_shuffle(hid_t file)
*/
TESTING("8-bit shuffling (read)");
-#ifdef H5_HAVE_FILTER_SHUFFLE
/* Read the dataset back */
if(H5Dread(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT,
new_data) < 0)
@@ -2697,10 +2660,6 @@ test_onebyte_shuffle(hid_t file)
if(H5Dclose(dataset) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
return 0;
@@ -2726,7 +2685,6 @@ error:
static herr_t
test_nbit_int(hid_t file)
{
-#ifdef H5_HAVE_FILTER_NBIT
hid_t dataset, datatype, mem_datatype, space, dc;
hsize_t size[2] = {2, 5};
hsize_t chunk_size[2] = {2,5};
@@ -2735,13 +2693,10 @@ test_nbit_int(hid_t file)
unsigned int mask;
size_t precision, offset;
size_t i, j;
-#else /* H5_HAVE_FILTER_NBIT */
- const char *not_supported= " Nbit is not enabled.";
-#endif /* H5_HAVE_FILTER_NBIT */
puts("Testing nbit filter");
TESTING(" nbit int (setup)");
-#ifdef H5_HAVE_FILTER_NBIT
+
/* Define dataset datatype (integer), and set precision, offset */
datatype = H5Tcopy(H5T_NATIVE_INT);
precision = 17; /* precision includes sign bit */
@@ -2779,10 +2734,6 @@ test_nbit_int(hid_t file)
}
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 1: Test nbit by setting up a chunked dataset and writing
@@ -2791,15 +2742,10 @@ test_nbit_int(hid_t file)
*/
TESTING(" nbit int (write)");
-#ifdef H5_HAVE_FILTER_NBIT
if(H5Dwrite(dataset, mem_datatype, H5S_ALL, H5S_ALL, H5P_DEFAULT,
orig_data) < 0)
goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 2: Try to read the data we just wrote.
@@ -2807,7 +2753,6 @@ test_nbit_int(hid_t file)
*/
TESTING(" nbit int (read)");
-#ifdef H5_HAVE_FILTER_NBIT
/* Read the dataset back */
if(H5Dread(dataset, mem_datatype, H5S_ALL, H5S_ALL, H5P_DEFAULT,
new_data) < 0)
@@ -2839,10 +2784,7 @@ test_nbit_int(hid_t file)
if(H5Dclose(dataset) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
+
return 0;
error:
return -1;
@@ -2866,24 +2808,20 @@ error:
static herr_t
test_nbit_float(hid_t file)
{
-#ifdef H5_HAVE_FILTER_NBIT
hid_t dataset, datatype, space, dc;
const hsize_t size[2] = {2, 5};
const hsize_t chunk_size[2] = {2, 5};
/* orig_data[] are initialized to be within the range that can be represented by
* dataset datatype (no precision loss during datatype conversion)
*/
- float orig_data[2][5] = {{(float)188384.00f, (float)19.103516f, (float)-1.0831790e9f, (float)-84.242188f,
- (float)5.2045898f}, {(float)-49140.000f, (float)2350.2500f, (float)-3.2110596e-1f, (float)6.4998865e-5f, (float)-0.0000000f}};
+ float orig_data[2][5] = {{188384.0f, 19.103516f, -1.0831790e9f, -84.242188f, 5.2045898f},
+ {-49140.0f, 2350.25f, -3.2110596e-1f, 6.4998865e-5f, -0.0f}};
float new_data[2][5];
size_t precision, offset;
size_t i, j;
-#else /* H5_HAVE_FILTER_NBIT */
- const char *not_supported= " Nbit is not enabled.";
-#endif /* H5_HAVE_FILTER_NBIT */
TESTING(" nbit float (setup)");
-#ifdef H5_HAVE_FILTER_NBIT
+
/* Define user-defined single-precision floating-point type for dataset */
datatype = H5Tcopy(H5T_IEEE_F32BE);
if(H5Tset_fields(datatype, (size_t)26, (size_t)20, (size_t)6, (size_t)7, (size_t)13) < 0) goto error;
@@ -2906,10 +2844,6 @@ test_nbit_float(hid_t file)
if((dataset = H5Dcreate2(file, DSET_NBIT_FLOAT_NAME, datatype,
space, H5P_DEFAULT, dc, H5P_DEFAULT)) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 1: Test nbit by setting up a chunked dataset and writing
@@ -2918,16 +2852,11 @@ test_nbit_float(hid_t file)
*/
TESTING(" nbit float (write)");
-#ifdef H5_HAVE_FILTER_NBIT
if(H5Dwrite(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
orig_data) < 0)
goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 2: Try to read the data we just wrote.
@@ -2935,7 +2864,6 @@ test_nbit_float(hid_t file)
*/
TESTING(" nbit float (read)");
-#ifdef H5_HAVE_FILTER_NBIT
/* Read the dataset back */
if(H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
new_data) < 0)
@@ -2966,10 +2894,6 @@ test_nbit_float(hid_t file)
if(H5Dclose(dataset) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
return 0;
@@ -2996,25 +2920,33 @@ static herr_t
test_nbit_double(hid_t file)
{
/* assume unsigned int and float has the same number of bytes */
-#ifdef H5_HAVE_FILTER_NBIT
hid_t dataset, datatype, space, dc;
const hsize_t size[2] = {2, 5};
const hsize_t chunk_size[2] = {2, 5};
/* orig_data[] are initialized to be within the range that can be represented by
* dataset datatype (no precision loss during datatype conversion)
*/
- double orig_data[2][5] = {{1.6081706885101836e+60, -255.32099170994480,
- 1.2677579992621376e-61, 64568.289448797700, -1.0619721778839084e-75}, {2.1499497833454840e+56,
- 6.6562295504670740e-3, -1.5747263393432150, 1.0711093225222612, -9.8971679387636870e-1}};
+ double orig_data[2][5] = {
+ {
+ H5_DOUBLE(1.6081706885101836e+60),
+ H5_DOUBLE(-255.32099170994480),
+ H5_DOUBLE(1.2677579992621376e-61),
+ H5_DOUBLE(64568.289448797700),
+ H5_DOUBLE(-1.0619721778839084e-75)
+ },
+ {
+ H5_DOUBLE(2.1499497833454840e+56),
+ H5_DOUBLE(6.6562295504670740e-3),
+ H5_DOUBLE(-1.5747263393432150),
+ H5_DOUBLE(1.0711093225222612),
+ H5_DOUBLE(-9.8971679387636870e-1)
+ }};
double new_data[2][5];
size_t precision, offset;
size_t i, j;
-#else /* H5_HAVE_FILTER_NBIT */
- const char *not_supported= " Nbit is not enabled.";
-#endif /* H5_HAVE_FILTER_NBIT */
TESTING(" nbit double (setup)");
-#ifdef H5_HAVE_FILTER_NBIT
+
/* Define user-defined doule-precision floating-point type for dataset */
datatype = H5Tcopy(H5T_IEEE_F64BE);
if(H5Tset_fields(datatype, (size_t)55, (size_t)46, (size_t)9, (size_t)5, (size_t)41) < 0) goto error;
@@ -3038,10 +2970,6 @@ test_nbit_double(hid_t file)
space, H5P_DEFAULT, dc, H5P_DEFAULT)) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 1: Test nbit by setting up a chunked dataset and writing
@@ -3050,15 +2978,10 @@ test_nbit_double(hid_t file)
*/
TESTING(" nbit double (write)");
-#ifdef H5_HAVE_FILTER_NBIT
if(H5Dwrite(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
orig_data) < 0)
goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 2: Try to read the data we just wrote.
@@ -3066,7 +2989,6 @@ test_nbit_double(hid_t file)
*/
TESTING(" nbit double (read)");
-#ifdef H5_HAVE_FILTER_NBIT
/* Read the dataset back */
if(H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
new_data) < 0)
@@ -3097,10 +3019,6 @@ test_nbit_double(hid_t file)
if(H5Dclose(dataset) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
return 0;
@@ -3126,7 +3044,6 @@ error:
static herr_t
test_nbit_array(hid_t file)
{
-#ifdef H5_HAVE_FILTER_NBIT
hid_t dataset, base_datatype, array_datatype, space, dc;
hid_t mem_base_datatype, mem_array_datatype;
const hsize_t size[2] = {2, 5};
@@ -3136,12 +3053,9 @@ test_nbit_array(hid_t file)
unsigned int new_data[2][5][3][2];
size_t precision, offset;
size_t i, j, m, n;
-#else /* H5_HAVE_FILTER_NBIT */
- const char *not_supported= " Nbit is not enabled.";
-#endif /* H5_HAVE_FILTER_NBIT */
TESTING(" nbit array (setup)");
-#ifdef H5_HAVE_FILTER_NBIT
+
/* Define dataset array datatype's base datatype and set precision, offset */
base_datatype = H5Tcopy(H5T_NATIVE_UINT);
precision = 22;
@@ -3179,12 +3093,8 @@ test_nbit_array(hid_t file)
for(m = 0; m < (size_t)adims[0]; m++)
for(n = 0; n < (size_t)adims[1]; n++)
orig_data[i][j][m][n] = (unsigned int)(((long long)HDrandom() %
- (long long)HDpow(2.0, (double)precision)) << offset);
+ (long long)HDpow(2.0F, (double)precision)) << offset);
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 1: Test nbit by setting up a chunked dataset and writing
@@ -3193,16 +3103,11 @@ test_nbit_array(hid_t file)
*/
TESTING(" nbit array (write)");
-#ifdef H5_HAVE_FILTER_NBIT
if(H5Dwrite(dataset, mem_array_datatype, H5S_ALL, H5S_ALL, H5P_DEFAULT,
orig_data) < 0)
goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 2: Try to read the data we just wrote.
@@ -3210,7 +3115,6 @@ test_nbit_array(hid_t file)
*/
TESTING(" nbit array (read)");
-#ifdef H5_HAVE_FILTER_NBIT
/* Read the dataset back */
if(H5Dread(dataset, mem_array_datatype, H5S_ALL, H5S_ALL, H5P_DEFAULT,
new_data) < 0)
@@ -3244,10 +3148,7 @@ test_nbit_array(hid_t file)
if(H5Dclose(dataset) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
+
return 0;
error:
@@ -3272,7 +3173,6 @@ error:
static herr_t
test_nbit_compound(hid_t file)
{
-#ifdef H5_HAVE_FILTER_NBIT
typedef struct { /* Struct with atomic fields */
int i;
char c;
@@ -3287,19 +3187,16 @@ test_nbit_compound(hid_t file)
hid_t dataset, space, dc;
const hsize_t size[2] = {2, 5};
const hsize_t chunk_size[2] = {2, 5};
- const float float_val[2][5] = {{(float)188384.00, (float)19.103516, (float)-1.0831790e9, (float)-84.242188,
- (float)5.2045898}, {(float)-49140.000, (float)2350.2500, (float)-3.2110596e-1, (float)6.4998865e-5, (float)-0.0000000}};
+ const float float_val[2][5] = {{188384.0F, 19.103516F, -1.0831790e9F, -84.242188F, 5.2045898F},
+ {-49140.0F, 2350.25F, -3.2110596e-1F, 6.4998865e-5F, -0.0F}};
atomic orig_data[2][5];
atomic new_data[2][5];
unsigned int i_mask, s_mask, c_mask;
size_t i, j;
-#else /* H5_HAVE_FILTER_NBIT */
- const char *not_supported= " Nbit is not enabled.";
-#endif /* H5_HAVE_FILTER_NBIT */
TESTING(" nbit compound (setup)");
-#ifdef H5_HAVE_FILTER_NBIT
+
/* Define datatypes of members of compound datatype */
i_tid=H5Tcopy(H5T_NATIVE_INT);
c_tid=H5Tcopy(H5T_NATIVE_CHAR);
@@ -3355,11 +3252,11 @@ test_nbit_compound(hid_t file)
for(i= 0;i< (size_t)size[0]; i++)
for(j = 0; j < (size_t)size[1]; j++) {
orig_data[i][j].i = (int)(((long long)HDrandom() %
- (long long)HDpow(2.0, (double)(precision[0]-1))) << offset[0]);
+ (long long)HDpow(2.0F, (double)(precision[0]-1))) << offset[0]);
orig_data[i][j].c = (char)(((long long)HDrandom() %
- (long long)HDpow(2.0, (double)(precision[1]-1))) << offset[1]);
+ (long long)HDpow(2.0F, (double)(precision[1]-1))) << offset[1]);
orig_data[i][j].s = (short)(((long long)HDrandom() %
- (long long)HDpow(2.0, (double)(precision[2]-1))) << offset[2]);
+ (long long)HDpow(2.0F, (double)(precision[2]-1))) << offset[2]);
orig_data[i][j].f = float_val[i][j];
/* some even-numbered integer values are negtive */
@@ -3370,10 +3267,6 @@ test_nbit_compound(hid_t file)
}
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 1: Test nbit by setting up a chunked dataset and writing
@@ -3382,15 +3275,10 @@ test_nbit_compound(hid_t file)
*/
TESTING(" nbit compound (write)");
-#ifdef H5_HAVE_FILTER_NBIT
if(H5Dwrite(dataset, mem_cmpd_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT,
orig_data) < 0)
goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 2: Try to read the data we just wrote.
@@ -3398,7 +3286,6 @@ test_nbit_compound(hid_t file)
*/
TESTING(" nbit compound (read)");
-#ifdef H5_HAVE_FILTER_NBIT
/* Read the dataset back */
if(H5Dread(dataset, mem_cmpd_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT,
new_data) < 0)
@@ -3440,10 +3327,7 @@ test_nbit_compound(hid_t file)
if(H5Dclose(dataset) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
+
return 0;
error:
@@ -3468,7 +3352,6 @@ error:
static herr_t
test_nbit_compound_2(hid_t file)
{
-#ifdef H5_HAVE_FILTER_NBIT
typedef struct { /* Struct with atomic fields */
int i;
char c;
@@ -3498,19 +3381,16 @@ test_nbit_compound_2(hid_t file)
hid_t dataset, space, dc;
const hsize_t size[2] = {2, 5};
const hsize_t chunk_size[2] = {2, 5};
- const float float_val[2][5] = {{(float)188384.00, (float)19.103516, (float)-1.0831790e9, (float)-84.242188,
- (float)5.2045898}, {(float)-49140.000, (float)2350.2500, (float)-3.2110596e-1, (float)6.4998865e-5, (float)-0.0000000}};
+ const float float_val[2][5] = {{188384.0F, 19.103516F, -1.0831790e9F, -84.242188F, 5.2045898F},
+ {-49140.0F, 2350.25F, -3.2110596e-1F, 6.4998865e-5F, -0.0F}};
complex orig_data[2][5];
complex new_data[2][5];
unsigned int i_mask, s_mask, c_mask, b_mask;
size_t i, j, m, n, b_failed, d_failed;
-#else /* H5_HAVE_FILTER_NBIT */
- const char *not_supported= " Nbit is not enabled.";
-#endif /* H5_HAVE_FILTER_NBIT */
TESTING(" nbit compound complex (setup)");
-#ifdef H5_HAVE_FILTER_NBIT
+
/* Define datatypes of members of compound datatype */
i_tid=H5Tcopy(H5T_NATIVE_INT);
c_tid=H5Tcopy(H5T_NATIVE_CHAR);
@@ -3598,38 +3478,34 @@ test_nbit_compound_2(hid_t file)
for(i= 0;i< (size_t)size[0]; i++)
for(j = 0; j < (size_t)size[1]; j++) {
orig_data[i][j].a.i = (int)(((long long)HDrandom() %
- (long long)HDpow(2.0, (double)(precision[0]-1))) << offset[0]);
+ (long long)HDpow(2.0F, (double)(precision[0]-1))) << offset[0]);
orig_data[i][j].a.c = (char)(((long long)HDrandom() %
- (long long)HDpow(2.0, (double)(precision[1]-1))) << offset[1]);
+ (long long)HDpow(2.0F, (double)(precision[1]-1))) << offset[1]);
orig_data[i][j].a.s = (short)(-((long long)HDrandom() %
- (long long)HDpow(2.0, (double)(precision[2]-1))) << offset[2]);
+ (long long)HDpow(2.0F, (double)(precision[2]-1))) << offset[2]);
orig_data[i][j].a.f = float_val[i][j];
orig_data[i][j].v = (unsigned int)(((long long)HDrandom() %
- (long long)HDpow(2.0, (double)precision[3])) << offset[3]);
+ (long long)HDpow(2.0F, (double)precision[3])) << offset[3]);
for(m = 0; m < (size_t)array_dims[0]; m++)
for(n = 0; n < (size_t)array_dims[1]; n++)
orig_data[i][j].b[m][n] = (char)(((long long)HDrandom() %
- (long long)HDpow(2.0, (double)(precision[4]-1))) << offset[4]);
+ (long long)HDpow(2.0F, (double)(precision[4]-1))) << offset[4]);
for(m = 0; m < (size_t)array_dims[0]; m++)
for(n = 0; n < (size_t)array_dims[1]; n++) {
orig_data[i][j].d[m][n].i = (int)(-((long long)HDrandom() %
- (long long)HDpow(2.0, (double)(precision[0]-1))) << offset[0]);
+ (long long)HDpow(2.0F, (double)(precision[0]-1))) << offset[0]);
orig_data[i][j].d[m][n].c = (char)(((long long)HDrandom() %
- (long long)HDpow(2.0, (double)(precision[1]-1))) << offset[1]);
+ (long long)HDpow(2.0F, (double)(precision[1]-1))) << offset[1]);
orig_data[i][j].d[m][n].s = (short)(((long long)HDrandom() %
- (long long)HDpow(2.0, (double)(precision[2]-1))) << offset[2]);
+ (long long)HDpow(2.0F, (double)(precision[2]-1))) << offset[2]);
orig_data[i][j].d[m][n].f = float_val[i][j];
}
}
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 1: Test nbit by setting up a chunked dataset and writing
@@ -3638,15 +3514,10 @@ test_nbit_compound_2(hid_t file)
*/
TESTING(" nbit compound complex (write)");
-#ifdef H5_HAVE_FILTER_NBIT
if(H5Dwrite(dataset, mem_cmpd_tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT,
orig_data) < 0)
goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 2: Try to read the data we just wrote.
@@ -3654,7 +3525,6 @@ test_nbit_compound_2(hid_t file)
*/
TESTING(" nbit compound complex (read)");
-#ifdef H5_HAVE_FILTER_NBIT
/* Read the dataset back */
if(H5Dread(dataset, mem_cmpd_tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT,
new_data) < 0)
@@ -3738,10 +3608,7 @@ test_nbit_compound_2(hid_t file)
if(H5Dclose(dataset) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
+
return 0;
error:
@@ -3766,7 +3633,6 @@ error:
static herr_t
test_nbit_compound_3(hid_t file)
{
-#ifdef H5_HAVE_FILTER_NBIT
typedef struct { /* Struct with some no-op type fields */
int i; /* integer field, NOT a no-op type */
char str[30]; /* fixed-length string, no-op type */
@@ -3784,12 +3650,8 @@ test_nbit_compound_3(hid_t file)
atomic new_data[5];
size_t i, k, j;
-#else /* H5_HAVE_FILTER_NBIT */
- const char *not_supported= " Nbit is not enabled.";
-#endif /* H5_HAVE_FILTER_NBIT */
TESTING(" nbit compound with no-op type (setup)");
-#ifdef H5_HAVE_FILTER_NBIT
/* Define datatypes of members of compound datatype */
i_tid=H5Tcopy(H5T_NATIVE_INT);
@@ -3834,7 +3696,7 @@ test_nbit_compound_3(hid_t file)
/* Initialize data */
for(i = 0; i < (size_t)size[0]; i++) {
HDmemset(&orig_data[i], 0, sizeof(orig_data[i]));
- orig_data[i].i = HDrandom() % (long)HDpow(2.0, 17.0 - 1.0);
+ orig_data[i].i = HDrandom() % (long)HDpow(2.0F, 17.0F - 1.0F);
HDstrcpy(orig_data[i].str, "fixed-length C string");
orig_data[i].vl_str = HDstrdup("variable-length C string");
@@ -3849,10 +3711,6 @@ test_nbit_compound_3(hid_t file)
}
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 1: Test nbit by setting up a chunked dataset and writing
@@ -3861,15 +3719,10 @@ test_nbit_compound_3(hid_t file)
*/
TESTING(" nbit compound with no-op type (write)");
-#ifdef H5_HAVE_FILTER_NBIT
if(H5Dwrite(dataset, cmpd_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT,
orig_data) < 0)
goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 2: Try to read the data we just wrote.
@@ -3877,7 +3730,6 @@ test_nbit_compound_3(hid_t file)
*/
TESTING(" nbit compound with no-op type (read)");
-#ifdef H5_HAVE_FILTER_NBIT
/* Read the dataset back */
if(H5Dread(dataset, cmpd_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT,
new_data) < 0)
@@ -3934,10 +3786,7 @@ test_nbit_compound_3(hid_t file)
if(H5Dclose(dataset) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
+
return 0;
error:
@@ -3962,19 +3811,14 @@ error:
static herr_t
test_nbit_int_size(hid_t file)
{
-#ifdef H5_HAVE_FILTER_NBIT
hid_t dataspace, dataset, datatype, mem_datatype, dset_create_props;
hsize_t dims[2], chunk_size[2];
hsize_t dset_size = 0;
int orig_data[DSET_DIM1][DSET_DIM2];
int i, j;
size_t precision, offset;
-#else /* H5_HAVE_FILTER_NBIT */
- const char *not_supported= " Nbit is not enabled.";
-#endif /* H5_HAVE_FILTER_NBIT */
TESTING(" nbit integer dataset size");
-#ifdef H5_HAVE_FILTER_NBIT
/* Define dataset datatype (integer), and set precision, offset */
if((datatype = H5Tcopy(H5T_NATIVE_INT)) < 0) {
@@ -4107,10 +3951,6 @@ test_nbit_int_size(hid_t file)
H5Pclose (dset_create_props);
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
return 0;
error:
@@ -4136,7 +3976,6 @@ error:
static herr_t
test_nbit_flt_size(hid_t file)
{
-#ifdef H5_HAVE_FILTER_NBIT
hid_t dataspace, dataset, datatype, dset_create_props;
hsize_t dims[2], chunk_size[2];
hsize_t dset_size = 0;
@@ -4144,12 +3983,8 @@ test_nbit_flt_size(hid_t file)
int i, j;
size_t precision, offset;
size_t spos, epos, esize, mpos, msize;
-#else /* H5_HAVE_FILTER_NBIT */
- const char *not_supported= " Nbit is not enabled.";
-#endif /* H5_HAVE_FILTER_NBIT */
TESTING(" nbit floating-number dataset size");
-#ifdef H5_HAVE_FILTER_NBIT
/* Define floating-point type for dataset
*-------------------------------------------------------------------
@@ -4313,10 +4148,6 @@ test_nbit_flt_size(hid_t file)
H5Pclose (dset_create_props);
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
return 0;
error:
@@ -4341,20 +4172,16 @@ error:
static herr_t
test_scaleoffset_int(hid_t file)
{
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
hid_t dataset, datatype, space, dc;
const hsize_t size[2] = {2, 5};
const hsize_t chunk_size[2] = {2,5};
int orig_data[2][5];
int new_data[2][5];
size_t i, j;
-#else /* H5_HAVE_FILTER_SCALEOFFSET */
- const char *not_supported= " Scaleoffset is not enabled.";
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
puts("Testing scaleoffset filter");
TESTING(" scaleoffset int without fill value (setup)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
+
datatype = H5Tcopy(H5T_NATIVE_INT);
/* Set order of dataset datatype */
@@ -4388,10 +4215,6 @@ test_scaleoffset_int(hid_t file)
}
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 1: Test scaleoffset by setting up a chunked dataset and writing
@@ -4400,14 +4223,9 @@ test_scaleoffset_int(hid_t file)
*/
TESTING(" scaleoffset int without fill value (write)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
if(H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
orig_data) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 2: Try to read the data we just wrote.
@@ -4415,7 +4233,6 @@ test_scaleoffset_int(hid_t file)
*/
TESTING(" scaleoffset int without fill value (read)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
/* Read the dataset back */
if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
new_data) < 0) goto error;
@@ -4442,10 +4259,7 @@ test_scaleoffset_int(hid_t file)
if(H5Dclose(dataset) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
+
return 0;
error:
return -1;
@@ -4470,7 +4284,6 @@ error:
static herr_t
test_scaleoffset_int_2(hid_t file)
{
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
hid_t dataset, datatype, space, mspace, dc;
const hsize_t size[2] = {2, 5};
const hsize_t chunk_size[2] = {2,5};
@@ -4482,12 +4295,9 @@ test_scaleoffset_int_2(hid_t file)
hsize_t block[2]; /* Block sizes */
int fillval;
size_t j;
-#else /* H5_HAVE_FILTER_SCALEOFFSET */
- const char *not_supported= " Scaleoffset is not enabled.";
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
TESTING(" scaleoffset int with fill value (setup)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
+
datatype = H5Tcopy(H5T_NATIVE_INT);
/* Set order of dataset datatype */
@@ -4534,10 +4344,6 @@ test_scaleoffset_int_2(hid_t file)
}
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 1: Test scaleoffset by setting up a chunked dataset and writing
@@ -4546,15 +4352,10 @@ test_scaleoffset_int_2(hid_t file)
*/
TESTING(" scaleoffset int with fill value (write)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
/* only data in the hyperslab will be written, other value should be fill value */
if(H5Dwrite(dataset, H5T_NATIVE_INT, mspace, mspace, H5P_DEFAULT,
orig_data) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 2: Try to read the data we just wrote.
@@ -4562,7 +4363,6 @@ test_scaleoffset_int_2(hid_t file)
*/
TESTING(" scaleoffset int with fill value (read)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
/* Read the dataset back */
if(H5Dread(dataset, H5T_NATIVE_INT, mspace, mspace, H5P_DEFAULT,
new_data) < 0) goto error;
@@ -4587,10 +4387,7 @@ test_scaleoffset_int_2(hid_t file)
if(H5Dclose(dataset) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
+
return 0;
error:
return -1;
@@ -4615,19 +4412,15 @@ error:
static herr_t
test_scaleoffset_float(hid_t file)
{
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
hid_t dataset, datatype, space, dc;
const hsize_t size[2] = {2, 5};
const hsize_t chunk_size[2] = {2,5};
float orig_data[2][5];
float new_data[2][5];
size_t i, j;
-#else /* H5_HAVE_FILTER_SCALEOFFSET */
- const char *not_supported= " Scaleoffset is not enabled.";
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
TESTING(" scaleoffset float without fill value, D-scaling (setup)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
+
datatype = H5Tcopy(H5T_NATIVE_FLOAT);
/* Set order of dataset datatype */
@@ -4655,7 +4448,7 @@ test_scaleoffset_float(hid_t file)
/* Initialize data */
for(i= 0;i< (size_t)size[0]; i++)
for(j = 0; j < (size_t)size[1]; j++) {
- orig_data[i][j] = (float)((HDrandom() % 100000) / (float)1000.0);
+ orig_data[i][j] = (float)((HDrandom() % 100000) / 1000.0F);
/* even-numbered values are negtive */
if((i*size[1]+j+1)%2 == 0)
@@ -4663,10 +4456,6 @@ test_scaleoffset_float(hid_t file)
}
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 1: Test scaleoffset by setting up a chunked dataset and writing
@@ -4675,14 +4464,9 @@ test_scaleoffset_float(hid_t file)
*/
TESTING(" scaleoffset float without fill value, D-scaling (write)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
if(H5Dwrite(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
orig_data) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 2: Try to read the data we just wrote.
@@ -4690,7 +4474,6 @@ test_scaleoffset_float(hid_t file)
*/
TESTING(" scaleoffset float without fill value, D-scaling (read)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
/* Read the dataset back */
if(H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
new_data) < 0) goto error;
@@ -4698,7 +4481,7 @@ test_scaleoffset_float(hid_t file)
/* Check that the values read are the same as the values written */
for(i=0; i<(size_t)size[0]; i++) {
for(j=0; j<(size_t)size[1]; j++) {
- if(HDfabs(new_data[i][j]-orig_data[i][j]) > HDpow(10.0, -3.0)) {
+ if(HDfabs(new_data[i][j]-orig_data[i][j]) > HDpow(10.0F, -3.0F)) {
H5_FAILED();
printf(" Read different values than written.\n");
printf(" At index %lu,%lu\n", (unsigned long)i, (unsigned long)j);
@@ -4717,10 +4500,7 @@ test_scaleoffset_float(hid_t file)
if(H5Dclose(dataset) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
+
return 0;
error:
return -1;
@@ -4745,7 +4525,6 @@ error:
static herr_t
test_scaleoffset_float_2(hid_t file)
{
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
hid_t dataset, datatype, space, mspace, dc;
const hsize_t size[2] = {2, 5};
const hsize_t chunk_size[2] = {2,5};
@@ -4757,12 +4536,9 @@ test_scaleoffset_float_2(hid_t file)
hsize_t count[2]; /* Block count */
hsize_t block[2]; /* Block sizes */
size_t j;
-#else /* H5_HAVE_FILTER_SCALEOFFSET */
- const char *not_supported= " Scaleoffset is not enabled.";
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
TESTING(" scaleoffset float with fill value, D-scaling (setup)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
+
datatype = H5Tcopy(H5T_NATIVE_FLOAT);
/* Set order of dataset datatype */
@@ -4775,7 +4551,7 @@ test_scaleoffset_float_2(hid_t file)
if((dc = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error;
/* Set fill value */
- fillval = 10000.0;
+ fillval = 10000.0F;
if(H5Pset_fill_value(dc, H5T_NATIVE_FLOAT, &fillval) < 0) goto error;
/* Set up to use scaleoffset filter, decimal scale factor is 3,
@@ -4803,7 +4579,7 @@ test_scaleoffset_float_2(hid_t file)
/* Initialize data of hyperslab */
for(j = 0; j < (size_t)size[1]; j++) {
- orig_data[0][j] = (float)((HDrandom() % 100000) / (float)1000.0);
+ orig_data[0][j] = (float)((HDrandom() % 100000) / 1000.0F);
/* even-numbered values are negtive */
if((j+1)%2 == 0)
@@ -4811,10 +4587,6 @@ test_scaleoffset_float_2(hid_t file)
}
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 1: Test scaleoffset by setting up a chunked dataset and writing
@@ -4823,15 +4595,10 @@ test_scaleoffset_float_2(hid_t file)
*/
TESTING(" scaleoffset float with fill value, D-scaling (write)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
/* only data in the hyperslab will be written, other value should be fill value */
if(H5Dwrite(dataset, H5T_NATIVE_FLOAT, mspace, mspace, H5P_DEFAULT,
orig_data) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 2: Try to read the data we just wrote.
@@ -4839,14 +4606,13 @@ test_scaleoffset_float_2(hid_t file)
*/
TESTING(" scaleoffset float with fill value, D-scaling (read)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
/* Read the dataset back */
if(H5Dread(dataset, H5T_NATIVE_FLOAT, mspace, mspace, H5P_DEFAULT,
new_data) < 0) goto error;
/* Check that the values read are the same as the values written */
for(j=0; j<(size_t)size[1]; j++) {
- if(HDfabs(new_data[0][j]-orig_data[0][j]) > HDpow(10.0, -3.0)) {
+ if(HDfabs(new_data[0][j]-orig_data[0][j]) > HDpow(10.0F, -3.0F)) {
H5_FAILED();
printf(" Read different values than written.\n");
printf(" At index %lu,%lu\n", (unsigned long)0, (unsigned long)j);
@@ -4863,10 +4629,7 @@ test_scaleoffset_float_2(hid_t file)
if(H5Dclose(dataset) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
+
return 0;
error:
return -1;
@@ -4891,19 +4654,15 @@ error:
static herr_t
test_scaleoffset_double(hid_t file)
{
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
hid_t dataset, datatype, space, dc;
const hsize_t size[2] = {2, 5};
const hsize_t chunk_size[2] = {2,5};
double orig_data[2][5];
double new_data[2][5];
size_t i, j;
-#else /* H5_HAVE_FILTER_SCALEOFFSET */
- const char *not_supported= " Scaleoffset is not enabled.";
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
TESTING(" scaleoffset double without fill value, D-scaling (setup)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
+
datatype = H5Tcopy(H5T_NATIVE_DOUBLE);
/* Set order of dataset datatype */
@@ -4931,7 +4690,7 @@ test_scaleoffset_double(hid_t file)
/* Initialize data */
for(i= 0;i< (size_t)size[0]; i++)
for(j = 0; j < (size_t)size[1]; j++) {
- orig_data[i][j] = (HDrandom() % 10000000) / 10000000.0;
+ orig_data[i][j] = (HDrandom() % 10000000) / 10000000.0F;
/* even-numbered values are negtive */
if((i*size[1]+j+1)%2 == 0)
@@ -4939,10 +4698,6 @@ test_scaleoffset_double(hid_t file)
}
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 1: Test scaleoffset by setting up a chunked dataset and writing
@@ -4951,14 +4706,9 @@ test_scaleoffset_double(hid_t file)
*/
TESTING(" scaleoffset double without fill value, D-scaling (write)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
if(H5Dwrite(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
orig_data) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 2: Try to read the data we just wrote.
@@ -4966,7 +4716,6 @@ test_scaleoffset_double(hid_t file)
*/
TESTING(" scaleoffset double without fill value, D-scaling (read)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
/* Read the dataset back */
if(H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
new_data) < 0) goto error;
@@ -4974,7 +4723,7 @@ test_scaleoffset_double(hid_t file)
/* Check that the values read are the same as the values written */
for(i=0; i<(size_t)size[0]; i++) {
for(j=0; j<(size_t)size[1]; j++) {
- if(HDfabs(new_data[i][j]-orig_data[i][j]) > HDpow(10.0, -7.0)) {
+ if(HDfabs(new_data[i][j]-orig_data[i][j]) > HDpow(10.0F, -7.0F)) {
H5_FAILED();
printf(" Read different values than written.\n");
printf(" At index %lu,%lu\n", (unsigned long)i, (unsigned long)j);
@@ -4993,10 +4742,7 @@ test_scaleoffset_double(hid_t file)
if(H5Dclose(dataset) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
+
return 0;
error:
return -1;
@@ -5021,7 +4767,6 @@ error:
static herr_t
test_scaleoffset_double_2(hid_t file)
{
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
hid_t dataset, datatype, space, mspace, dc;
const hsize_t size[2] = {2, 5};
const hsize_t chunk_size[2] = {2,5};
@@ -5033,12 +4778,9 @@ test_scaleoffset_double_2(hid_t file)
hsize_t count[2]; /* Block count */
hsize_t block[2]; /* Block sizes */
size_t j;
-#else /* H5_HAVE_FILTER_SCALEOFFSET */
- const char *not_supported= " Scaleoffset is not enabled.";
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
TESTING(" scaleoffset double with fill value, D-scaling (setup)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
+
datatype = H5Tcopy(H5T_NATIVE_DOUBLE);
/* Set order of dataset datatype */
@@ -5051,7 +4793,7 @@ test_scaleoffset_double_2(hid_t file)
if((dc = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error;
/* Set fill value */
- fillval = 10000.0;
+ fillval = 10000.0F;
if(H5Pset_fill_value(dc, H5T_NATIVE_DOUBLE, &fillval) < 0) goto error;
/* Set up to use scaleoffset filter, decimal scale factor is 7,
@@ -5079,7 +4821,7 @@ test_scaleoffset_double_2(hid_t file)
/* Initialize data of hyperslab */
for(j = 0; j < (size_t)size[1]; j++) {
- orig_data[0][j] = (HDrandom() % 10000000) / 10000000.0;
+ orig_data[0][j] = (HDrandom() % 10000000) / 10000000.0F;
/* even-numbered values are negtive */
if((j+1)%2 == 0)
@@ -5087,10 +4829,6 @@ test_scaleoffset_double_2(hid_t file)
}
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 1: Test scaleoffset by setting up a chunked dataset and writing
@@ -5099,15 +4837,10 @@ test_scaleoffset_double_2(hid_t file)
*/
TESTING(" scaleoffset double with fill value, D-scaling (write)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
/* only data in the hyperslab will be written, other value should be fill value */
if(H5Dwrite(dataset, H5T_NATIVE_DOUBLE, mspace, mspace, H5P_DEFAULT,
orig_data) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
/*----------------------------------------------------------------------
* STEP 2: Try to read the data we just wrote.
@@ -5115,14 +4848,13 @@ test_scaleoffset_double_2(hid_t file)
*/
TESTING(" scaleoffset double with fill value, D-scaling (read)");
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
/* Read the dataset back */
if(H5Dread(dataset, H5T_NATIVE_DOUBLE, mspace, mspace, H5P_DEFAULT,
new_data) < 0) goto error;
/* Check that the values read are the same as the values written */
for(j=0; j<(size_t)size[1]; j++) {
- if(HDfabs(new_data[0][j]-orig_data[0][j]) > HDpow(10.0, -7.0)) {
+ if(HDfabs(new_data[0][j]-orig_data[0][j]) > HDpow(10.0F, -7.0F)) {
H5_FAILED();
printf(" Read different values than written.\n");
printf(" At index %lu,%lu\n", (unsigned long)0, (unsigned long)j);
@@ -5140,10 +4872,7 @@ test_scaleoffset_double_2(hid_t file)
if(H5Dclose(dataset) < 0) goto error;
PASSED();
-#else
- SKIPPED();
- puts(not_supported);
-#endif
+
return 0;
error:
return -1;
@@ -5892,11 +5621,11 @@ test_set_local(hid_t fapl)
h5_fixname(FILENAME[5], fapl, filename, sizeof filename);
/* Initialize the integer & floating-point dataset */
- n=1.0;
+ n=1.0F;
for(i = 0; i < DSET_DIM1; i++)
for(j = 0; j < DSET_DIM2; j++) {
points[i][j] = (int)n++;
- points_dbl[i][j] = (double)1.5*n++;
+ points_dbl[i][j] = (double)1.5F*n++;
}
/* Open file */
@@ -6090,7 +5819,7 @@ test_set_local(hid_t fapl)
for(j=0; j<dims[1]; j++) {
/* If the difference between two values is greater than 0.001%, they're
* considered not equal. */
- if(!DBL_REL_EQUAL(points_dbl[i][j],check_dbl[i][j],0.00001)) {
+ if(!DBL_REL_EQUAL(points_dbl[i][j],check_dbl[i][j],0.00001F)) {
H5_FAILED();
printf(" Line %d: Read different values than written.\n",__LINE__);
printf(" At index %lu,%lu\n", (unsigned long)(i), (unsigned long)(j));
@@ -6371,7 +6100,7 @@ test_filter_delete(hid_t file)
TESTING("filter deletion");
-#if defined H5_HAVE_FILTER_DEFLATE && defined H5_HAVE_FILTER_SHUFFLE && defined H5_HAVE_FILTER_FLETCHER32
+#ifdef H5_HAVE_FILTER_DEFLATE
/* create the data space */
if((sid = H5Screate_simple(2, dims, NULL)) < 0) goto error;
@@ -6576,7 +6305,6 @@ test_filters_endianess(void)
TESTING("filters with big-endian/little-endian data");
-#if defined H5_HAVE_FILTER_FLETCHER32
/*-------------------------------------------------------------------------
* step 1: open a file written on a little-endian machine
*-------------------------------------------------------------------------
@@ -6609,9 +6337,7 @@ test_filters_endianess(void)
if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
PASSED();
-#else
- SKIPPED();
-#endif
+
return 0;
error:
@@ -7416,7 +7142,7 @@ test_chunk_cache(hid_t fapl)
/* Set new rdcc settings on fapl */
nslots_2 = nslots_1 * 2;
nbytes_2 = nbytes_1 * 2;
- w0_2 = w0_1 / 2.;
+ w0_2 = w0_1 / 2.0F;
if (H5Pset_cache(fapl_local, 0, nslots_2, nbytes_2, w0_2) < 0) FAIL_STACK_ERROR
h5_fixname(FILENAME[8], fapl, filename, sizeof filename);
@@ -7611,7 +7337,7 @@ test_big_chunks_bypass_cache(hid_t fapl)
/* Define cache size to be smaller than chunk size */
rdcc_nelmts = BYPASS_CHUNK_DIM/5;
rdcc_nbytes = sizeof(int)*BYPASS_CHUNK_DIM/5;
- if(H5Pset_cache(fapl_local, 0, rdcc_nelmts, rdcc_nbytes, (double)0.0) < 0) FAIL_STACK_ERROR
+ if(H5Pset_cache(fapl_local, 0, rdcc_nelmts, rdcc_nbytes, 0.0F) < 0) FAIL_STACK_ERROR
/* Create file */
if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_local)) < 0) FAIL_STACK_ERROR
diff --git a/test/dt_arith.c b/test/dt_arith.c
index a93d1b8..c5b72d4 100644
--- a/test/dt_arith.c
+++ b/test/dt_arith.c
@@ -28,7 +28,7 @@
#define NTESTELEM 10000
/* Epsilon for floating-point comparisons */
-#define FP_EPSILON 0.000001
+#define FP_EPSILON 0.000001F
/*
* Offset from alinged memory returned by malloc(). This can be used to test
@@ -538,7 +538,7 @@ some_dummy_func(float x)
{
char s[128];
- HDsnprintf(s, sizeof(s), "%g", x);
+ HDsnprintf(s, sizeof(s), "%g", (double)x);
}
@@ -832,7 +832,7 @@ static int test_particular_fp_integer(void)
printf(" %02x", saved_buf2[ENDIAN(src_size2, j, endian)]);
HDmemcpy(&x, saved_buf2, src_size2);
- printf(" %29.20e\n", x);
+ printf(" %29.20e\n", (double)x);
printf(" dst = ");
for (j=0; j<dst_size2; j++)
@@ -2720,7 +2720,7 @@ my_isnan(dtype_t type, void *val)
float x;
HDmemcpy(&x, val, sizeof(float));
- HDsnprintf(s, sizeof(s), "%g", x);
+ HDsnprintf(s, sizeof(s), "%g", (double)x);
} else if (FLT_DOUBLE==type) {
double x;
@@ -3294,12 +3294,11 @@ test_conv_flt_1 (const char *name, int run_test, hid_t src, hid_t dst)
check_mant[1] = HDfrexpl(hw_ld, check_expo+1);
#endif
}
-#ifdef H5_CONVERT_DENORMAL_FLOAT
/* Special check for denormalized values */
if(check_expo[0]<(-(int)dst_ebias) || check_expo[1]<(-(int)dst_ebias)) {
int expo_diff=check_expo[0]-check_expo[1];
int valid_bits=(int)((dst_ebias+dst_msize)+MIN(check_expo[0],check_expo[1]))-1;
- double epsilon=1.0;
+ double epsilon=1.0F;
/* Re-scale the mantissas based on any exponent difference */
if(expo_diff!=0)
@@ -3317,58 +3316,6 @@ test_conv_flt_1 (const char *name, int run_test, hid_t src, hid_t dst)
HDfabs(check_mant[0]-check_mant[1])<FP_EPSILON)
continue;
} /* end else */
-#else /* H5_CONVERT_DENORMAL_FLOAT */
- {
- hssize_t expo; /*exponent */
- uint8_t tmp[32];
-
- assert(src_size<=sizeof(tmp));
- if(sendian==H5T_ORDER_LE)
- HDmemcpy(tmp,&saved[j*src_size],src_size);
- else if(sendian==H5T_ORDER_BE)
- for (k=0; k<src_size; k++)
- tmp[k]=saved[j*src_size+(src_size-(k+1))];
- else {
- for (k = 0; k < src_size; k += 4) {
- tmp[k] = saved[j*src_size+(src_size-2)-k];
- tmp[k+1] = saved[j*src_size+(src_size-1)-k];
-
- tmp[(src_size-2)-k] = saved[j*src_size+k];
- tmp[(src_size-1)-k] = saved[j*src_size+k+1];
- }
- }
-
- expo = H5T__bit_get_d(tmp, src_epos, src_esize);
- if(expo==0)
- continue; /* Denormalized floating-point value detected */
- else {
- assert(dst_size<=sizeof(tmp));
- if(sendian==H5T_ORDER_LE)
- HDmemcpy(tmp,&buf[j*dst_size],dst_size);
- else if(sendian==H5T_ORDER_BE)
- for (k=0; k<dst_size; k++)
- tmp[k]=buf[j*dst_size+(dst_size-(k+1))];
- else {
- for (k = 0; k < src_size; k += 4) {
- tmp[k] = buf[j*dst_size+(dst_size-2)-k];
- tmp[k+1] = buf[j*dst_size+(dst_size-1)-k];
-
- tmp[(dst_size-2)-k] = buf[j*dst_size+k];
- tmp[(dst_size-1)-k] = buf[j*dst_size+k+1];
- }
- }
-
- expo = H5T__bit_get_d(tmp, dst_epos, dst_esize);
- if(expo==0)
- continue; /* Denormalized floating-point value detected */
- else {
- if (check_expo[0]==check_expo[1] &&
- HDfabs(check_mant[0]-check_mant[1])<FP_EPSILON)
- continue;
- } /* end else */
- } /* end else */
- }
-#endif /* H5_CONVERT_DENORMAL_FLOAT */
}
if (0==fails_this_test++) {
@@ -3387,7 +3334,7 @@ test_conv_flt_1 (const char *name, int run_test, hid_t src, hid_t dst)
if (FLT_FLOAT==src_type) {
float x;
HDmemcpy(&x, &saved[j*src_size], sizeof(float));
- printf(" %29.20e\n", x);
+ printf(" %29.20e\n", (double)x);
} else if (FLT_DOUBLE==src_type) {
double x;
HDmemcpy(&x, &saved[j*src_size], sizeof(double));
@@ -3407,7 +3354,7 @@ test_conv_flt_1 (const char *name, int run_test, hid_t src, hid_t dst)
if (FLT_FLOAT==dst_type) {
float x;
HDmemcpy(&x, &buf[j*dst_size], sizeof(float));
- printf(" %29.20e\n", x);
+ printf(" %29.20e\n", (double)x);
} else if (FLT_DOUBLE==dst_type) {
double x;
HDmemcpy(&x, &buf[j*dst_size], sizeof(double));
@@ -3425,7 +3372,7 @@ test_conv_flt_1 (const char *name, int run_test, hid_t src, hid_t dst)
printf(" %02x", hw[ENDIAN(dst_size,k,dendian)]);
printf("%*s", (int)(3*MAX(0, (ssize_t)src_size-(ssize_t)dst_size)), "");
if (FLT_FLOAT==dst_type)
- printf(" %29.20e\n", hw_f);
+ printf(" %29.20e\n", (double)hw_f);
else if (FLT_DOUBLE==dst_type)
printf(" %29.20e\n", hw_d);
#if H5_SIZEOF_LONG_DOUBLE!=H5_SIZEOF_DOUBLE
@@ -3877,12 +3824,10 @@ test_conv_int_fp(const char *name, int run_test, hid_t src, hid_t dst)
HDmemcpy(aligned, saved+j*sizeof(long long), sizeof(long long));
hw_float = (float)(*((long long*)aligned));
break;
-#ifdef H5_ULLONG_TO_FP_CAST_WORKS
case INT_ULLONG:
HDmemcpy(aligned, saved+j*sizeof(unsigned long long), sizeof(unsigned long long));
hw_float = (float)(*((unsigned long long*)aligned));
break;
-#endif /* H5_ULLONG_TO_FP_CAST_WORKS */
case FLT_FLOAT:
case FLT_DOUBLE:
case FLT_LDOUBLE:
@@ -3930,12 +3875,10 @@ test_conv_int_fp(const char *name, int run_test, hid_t src, hid_t dst)
HDmemcpy(aligned, saved+j*sizeof(long long), sizeof(long long));
hw_double = (double)(*((long long*)aligned));
break;
-#ifdef H5_ULLONG_TO_FP_CAST_WORKS
case INT_ULLONG:
HDmemcpy(aligned, saved+j*sizeof(unsigned long long), sizeof(unsigned long long));
hw_double = (double)(*((unsigned long long*)aligned));
break;
-#endif /* H5_ULLONG_TO_FP_CAST_WORKS */
case FLT_FLOAT:
case FLT_DOUBLE:
case FLT_LDOUBLE:
@@ -3984,12 +3927,10 @@ test_conv_int_fp(const char *name, int run_test, hid_t src, hid_t dst)
HDmemcpy(aligned, saved+j*sizeof(long long), sizeof(long long));
hw_ldouble = (long double)(*((long long*)aligned));
break;
-#ifdef H5_ULLONG_TO_FP_CAST_WORKS
case INT_ULLONG:
HDmemcpy(aligned, saved+j*sizeof(unsigned long long), sizeof(unsigned long long));
hw_ldouble = (long double)(*((unsigned long long*)aligned));
break;
-#endif /* H5_ULLONG_TO_FP_CAST_WORKS */
case FLT_FLOAT:
case FLT_DOUBLE:
case FLT_LDOUBLE:
@@ -4451,67 +4392,6 @@ test_conv_int_fp(const char *name, int run_test, hid_t src, hid_t dst)
}
}
}
-/* On some machines (notably the SGI and Solaris 64-bit machines) unsigned long
-* values are not converted to float or double values correctly, they are
-* consistently off by the lowest bit being rounded oppositely to our
-* software conversion routines output. So, on those machines, we allow
-* the converted value to be +/- 1 from the machine's value. -QAK
-*/
-#ifndef H5_SW_ULONG_TO_FP_BOTTOM_BIT_WORKS
- if(dst_size==sizeof(unsigned)) {
- unsigned tmp_s, tmp_h;
- HDmemcpy(&tmp_s,&buf[j*dst_size],sizeof(unsigned));
- HDmemcpy(&tmp_h,&hw[0],sizeof(unsigned));
- if((tmp_s+1)==tmp_h || (tmp_s-1)==tmp_h)
- continue; /*no error*/
- } /* end if */
- else if (dst_size==sizeof(unsigned long)) {
- unsigned long tmp_s, tmp_h;
- HDmemcpy(&tmp_s,&buf[j*dst_size],sizeof(unsigned long));
- HDmemcpy(&tmp_h,&hw[0],sizeof(unsigned long));
- if((tmp_s+1)==tmp_h || (tmp_s-1)==tmp_h)
- continue; /*no error*/
- } /* end if */
- else if (dst_size==sizeof(unsigned long long)) {
- unsigned long long tmp_s, tmp_h;
- HDmemcpy(&tmp_s,&buf[j*dst_size],sizeof(unsigned long long));
- HDmemcpy(&tmp_h,&hw[0],sizeof(unsigned long long));
- if((tmp_s+1)==tmp_h || (tmp_s-1)==tmp_h)
- continue; /*no error*/
- } /* end if */
-#endif /* end H5_ULONG_FP_BOTTOM_BIT_WORKS */
-
-/* For PGI compiler on Linux, during conversion from 'float' or 'double' to
-* 'unsigned long long', round-up happens when the fraction of float-point
-* value is greater than 0.5. So we allow the converted value to be off by 1.
-*/
-#ifndef H5_FP_TO_ULLONG_BOTTOM_BIT_WORKS
- if((src_type==FLT_FLOAT || src_type==FLT_DOUBLE) && dst_type==INT_ULLONG) {
- unsigned long long tmp_s, tmp_h;
- HDmemcpy(&tmp_s,&buf[j*dst_size],sizeof(unsigned long long));
- HDmemcpy(&tmp_h,&hw[0],sizeof(unsigned long long));
- if((tmp_s+1)==tmp_h)
- continue; /*no error*/
- }
-#endif /*end H5_FP_TO_ULLONG_BOTTOM_BIT_WORKS*/
-
-/* For GNU compilers on FreeBSD(sleipnir), during conversion from 'unsigned long long'
-* to 'long double', the last 2 bytes of mantissa are lost. But this loss seems
-* acceptable. We allow it to go through instead of fail it. Sometimes, there's roundup
-* to the 3rd last byte of mantissa. So we only try to compare all but the last 3 bytes.
-*/
-#ifndef H5_ULLONG_TO_LDOUBLE_PRECISION
-#if H5_SIZEOF_LONG_DOUBLE !=0
- if(src_type==INT_ULLONG && dst_type==FLT_LDOUBLE) {
- long double tmp_s, tmp_h;
- HDmemcpy(&tmp_s,&buf[j*dst_size],sizeof(long double));
- HDmemcpy(&tmp_h,&hw[0],sizeof(long double));
- /*Don't compare the last 3 bytes of mantissa*/
- if(!HDmemcmp(&tmp_s+4, &tmp_h+4, sizeof(long double)-4))
- continue; /*no error*/
- }
-#endif
-#endif /*end H5_ULLONG_TO_LDOUBLE_PRECISION*/
/* Print errors */
if (0==fails_this_test++) {
@@ -4570,7 +4450,7 @@ test_conv_int_fp(const char *name, int run_test, hid_t src, hid_t dst)
break;
case FLT_FLOAT:
HDmemcpy(aligned, saved+j*sizeof(float), sizeof(float));
- printf(" %29f\n", *((float*)aligned));
+ printf(" %29f\n", (double)*((float*)aligned));
break;
case FLT_DOUBLE:
HDmemcpy(aligned, saved+j*sizeof(double), sizeof(double));
@@ -4635,7 +4515,7 @@ test_conv_int_fp(const char *name, int run_test, hid_t src, hid_t dst)
break;
case FLT_FLOAT:
HDmemcpy(aligned, buf+j*sizeof(float), sizeof(float));
- printf(" %29f\n", *((float*)aligned));
+ printf(" %29f\n", (double)*((float*)aligned));
break;
case FLT_DOUBLE:
HDmemcpy(aligned, buf+j*sizeof(double), sizeof(double));
@@ -4689,7 +4569,7 @@ test_conv_int_fp(const char *name, int run_test, hid_t src, hid_t dst)
printf(" %29"H5_PRINTF_LL_WIDTH"u\n", *((unsigned long long*)hw));
break;
case FLT_FLOAT:
- printf(" %29f\n", *((float*)hw));
+ printf(" %29f\n", (double)*((float*)hw));
break;
case FLT_DOUBLE:
printf(" %29f\n", *((double*)hw));
@@ -5112,9 +4992,7 @@ run_int_fp_conv(const char *name)
nerrors += test_conv_int_fp(name, TEST_NORMAL, H5T_NATIVE_LONG, H5T_NATIVE_FLOAT);
nerrors += test_conv_int_fp(name, TEST_NORMAL, H5T_NATIVE_LONG, H5T_NATIVE_DOUBLE);
-#if H5_ULONG_TO_FLOAT_ACCURATE
nerrors += test_conv_int_fp(name, TEST_NORMAL, H5T_NATIVE_ULONG, H5T_NATIVE_FLOAT);
-#endif
nerrors += test_conv_int_fp(name, TEST_NORMAL, H5T_NATIVE_ULONG, H5T_NATIVE_DOUBLE);
#endif
@@ -5122,29 +5000,10 @@ run_int_fp_conv(const char *name)
nerrors += test_conv_int_fp(name, TEST_NORMAL, H5T_NATIVE_LLONG, H5T_NATIVE_FLOAT);
nerrors += test_conv_int_fp(name, TEST_NORMAL, H5T_NATIVE_LLONG, H5T_NATIVE_DOUBLE);
-#ifdef H5_ULLONG_TO_FP_CAST_WORKS
nerrors += test_conv_int_fp(name, TEST_NORMAL, H5T_NATIVE_ULLONG, H5T_NATIVE_FLOAT);
nerrors += test_conv_int_fp(name, TEST_NORMAL, H5T_NATIVE_ULLONG, H5T_NATIVE_DOUBLE);
-#else /* H5_ULLONG_TO_FP_CAST_WORKS */
- {
- char str[256]; /*hello string */
-
- HDsnprintf(str, sizeof(str), "Testing %s %s -> %s conversions",
- name, "unsigned long long", "float");
- printf("%-70s", str);
- SKIPPED();
- HDputs(" Test skipped due to compiler not handling conversion.");
-
- HDsnprintf(str, sizeof(str), "Testing %s %s -> %s conversions",
- name, "unsigned long long", "double");
- printf("%-70s", str);
- SKIPPED();
- HDputs(" Test skipped due to compiler not handling conversion.");
- }
-#endif /* H5_ULLONG_TO_FP_CAST_WORKS */
#endif
-#if H5_INTEGER_TO_LDOUBLE_ACCURATE
#if H5_SIZEOF_LONG_DOUBLE!=H5_SIZEOF_DOUBLE
nerrors += test_conv_int_fp(name, TEST_NORMAL, H5T_NATIVE_SCHAR, H5T_NATIVE_LDOUBLE);
nerrors += test_conv_int_fp(name, TEST_NORMAL, H5T_NATIVE_UCHAR, H5T_NATIVE_LDOUBLE);
@@ -5173,51 +5032,11 @@ run_int_fp_conv(const char *name)
#endif
#endif /* H5_SIZEOF_LONG!=H5_SIZEOF_INT */
#if H5_SIZEOF_LONG_LONG!=H5_SIZEOF_LONG
-#if H5_LLONG_TO_LDOUBLE_CORRECT
nerrors += test_conv_int_fp(name, TEST_NORMAL, H5T_NATIVE_LLONG, H5T_NATIVE_LDOUBLE);
-#else /* H5_LLONG_TO_LDOUBLE_CORRECT */
- {
- char str[256]; /*hello string */
-
- HDsnprintf(str, sizeof(str), "Testing %s %s -> %s conversions",
- name, "long long", "long double");
- printf("%-70s", str);
- SKIPPED();
- HDputs(" Test skipped due to compiler error in handling conversion.");
- }
-#endif /* H5_LLONG_TO_LDOUBLE_CORRECT */
-#if H5_ULLONG_TO_FP_CAST_WORKS && H5_ULLONG_TO_LDOUBLE_PRECISION && H5_LLONG_TO_LDOUBLE_CORRECT
nerrors += test_conv_int_fp(name, TEST_NORMAL, H5T_NATIVE_ULLONG, H5T_NATIVE_LDOUBLE);
-#else /* H5_ULLONG_TO_FP_CAST_WORKS && H5_ULLONG_TO_LDOUBLE_PRECISION && H5_LLONG_TO_LDOUBLE_CORRECT */
- {
- char str[256]; /*hello string */
-
- HDsnprintf(str, sizeof(str), "Testing %s %s -> %s conversions",
- name, "unsigned long long", "long double");
- printf("%-70s", str);
- SKIPPED();
- HDputs(" Test skipped due to compiler not handling conversion.");
- }
-#endif /* H5_ULLONG_TO_FP_CAST_WORKS && H5_ULLONG_TO_LDOUBLE_PRECISION && H5_LLONG_TO_LDOUBLE_CORRECT */
-#endif
#endif
-#else /*H5_INTEGER_TO_LDOUBLE_ACCURATE*/
- {
- char str[256]; /*string */
-
- HDsnprintf(str, sizeof(str), "Testing %s %s -> %s conversions",
- name, "all integers", "long double");
- printf("%-70s", str);
- SKIPPED();
-#if H5_SIZEOF_LONG_DOUBLE !=0
- HDputs(" Test skipped due to hardware conversion error.");
-#else
- HDputs(" Test skipped due to disabled long double.");
#endif
- }
-#endif /*H5_INTEGER_TO_LDOUBLE_ACCURATE*/
-
return nerrors;
}
@@ -5240,7 +5059,6 @@ static int
run_fp_int_conv(const char *name)
{
int nerrors = 0;
-#ifdef H5_FP_TO_INTEGER_OVERFLOW_WORKS
int test_values;
#ifdef H5_VMS
@@ -5277,62 +5095,23 @@ run_fp_int_conv(const char *name)
#if H5_SIZEOF_LONG_LONG!=H5_SIZEOF_LONG
if(!strcmp(name, "hw")) { /* Hardware conversion */
- /* Windows .NET 2003 doesn't work for hardware conversion of this case.
- * .NET should define this macro H5_HW_FP_TO_LLONG_NOT_WORKS. */
-#ifndef H5_HW_FP_TO_LLONG_NOT_WORKS
nerrors += test_conv_int_fp(name, test_values, H5T_NATIVE_FLOAT, H5T_NATIVE_LLONG);
nerrors += test_conv_int_fp(name, test_values, H5T_NATIVE_DOUBLE, H5T_NATIVE_LLONG);
-#endif /*H5_HW_FP_TO_LLONG_NOT_WORKS*/
} else { /* Software conversion */
nerrors += test_conv_int_fp(name, test_values, H5T_NATIVE_FLOAT, H5T_NATIVE_LLONG);
nerrors += test_conv_int_fp(name, test_values, H5T_NATIVE_DOUBLE, H5T_NATIVE_LLONG);
}
-#ifdef H5_FP_TO_ULLONG_RIGHT_MAXIMUM
nerrors += test_conv_int_fp(name, test_values, H5T_NATIVE_FLOAT, H5T_NATIVE_ULLONG);
nerrors += test_conv_int_fp(name, test_values, H5T_NATIVE_DOUBLE, H5T_NATIVE_ULLONG);
-#else /*H5_FP_TO_ULLONG_RIGHT_MAXIMUM*/
- {
- char str[256]; /*hello string */
-
- HDsnprintf(str, sizeof(str), "Testing %s %s -> %s conversions",
- name, "float", "unsigned long long");
- printf("%-70s", str);
- SKIPPED();
- HDputs(" Test skipped due to hardware conversion error.");
-
- HDsnprintf(str, sizeof(str), "Testing %s %s -> %s conversions",
- name, "double", "unsigned long long");
- printf("%-70s", str);
- SKIPPED();
- HDputs(" Test skipped due to hardware conversion error.");
- }
-#endif /*H5_FP_TO_ULLONG_RIGHT_MAXIMUM*/
#endif
-#if H5_LDOUBLE_TO_INTEGER_WORKS && H5_LDOUBLE_TO_INTEGER_ACCURATE
#if H5_SIZEOF_LONG_DOUBLE!=H5_SIZEOF_DOUBLE
nerrors += test_conv_int_fp(name, test_values, H5T_NATIVE_LDOUBLE, H5T_NATIVE_SCHAR);
nerrors += test_conv_int_fp(name, test_values, H5T_NATIVE_LDOUBLE, H5T_NATIVE_UCHAR);
nerrors += test_conv_int_fp(name, test_values, H5T_NATIVE_LDOUBLE, H5T_NATIVE_SHORT);
nerrors += test_conv_int_fp(name, test_values, H5T_NATIVE_LDOUBLE, H5T_NATIVE_USHORT);
nerrors += test_conv_int_fp(name, test_values, H5T_NATIVE_LDOUBLE, H5T_NATIVE_INT);
-#if H5_LDOUBLE_TO_UINT_ACCURATE
nerrors += test_conv_int_fp(name, test_values, H5T_NATIVE_LDOUBLE, H5T_NATIVE_UINT);
-#else /*H5_LDOUBLE_TO_UINT_ACCURATE*/
- {
- char str[256]; /*string */
-
- HDsnprintf(str, sizeof(str), "Testing %s %s -> %s conversions",
- name, "long double", "unsigned int");
- printf("%-70s", str);
- SKIPPED();
-#if H5_SIZEOF_LONG_DOUBLE!=0
- HDputs(" Test skipped due to hardware conversion error.");
-#else
- HDputs(" Test skipped due to disabled long double.");
-#endif
- }
-#endif /*H5_LDOUBLE_TO_UINT_ACCURATE*/
#if H5_SIZEOF_LONG!=H5_SIZEOF_INT && H5_SIZEOF_LONG_DOUBLE!=0
#ifndef H5_LDOUBLE_TO_LONG_SPECIAL
nerrors += test_conv_int_fp(name, test_values, H5T_NATIVE_LDOUBLE, H5T_NATIVE_LONG);
@@ -5355,77 +5134,13 @@ run_fp_int_conv(const char *name)
#endif /*H5_SIZEOF_LONG!=H5_SIZEOF_INT && H5_SIZEOF_LONG_DOUBLE!=0 */
#if H5_SIZEOF_LONG_LONG!=H5_SIZEOF_LONG && H5_SIZEOF_LONG_DOUBLE!=0
-#ifdef H5_LDOUBLE_TO_LLONG_ACCURATE
nerrors += test_conv_int_fp(name, test_values, H5T_NATIVE_LDOUBLE, H5T_NATIVE_LLONG);
-#else /*H5_LDOUBLE_TO_LLONG_ACCURATE*/
- {
- char str[256]; /*string */
-
- HDsnprintf(str, sizeof(str), "Testing %s %s -> %s conversions",
- name, "long double", "long long");
- printf("%-70s", str);
- SKIPPED();
-#if H5_SIZEOF_LONG_DOUBLE!=0
- HDputs(" Test skipped due to hardware conversion error.");
-#else
- HDputs(" Test skipped due to disabled long double.");
-#endif
- }
-#endif /*H5_LDOUBLE_TO_LLONG_ACCURATE*/
-#if defined(H5_FP_TO_ULLONG_RIGHT_MAXIMUM) && defined(H5_LDOUBLE_TO_LLONG_ACCURATE)
nerrors += test_conv_int_fp(name, test_values, H5T_NATIVE_LDOUBLE, H5T_NATIVE_ULLONG);
-#else /*H5_FP_TO_ULLONG_RIGHT_MAXIMUM && H5_LDOUBLE_TO_LLONG_ACCURATE*/
- {
- char str[256]; /*string */
-
- HDsnprintf(str, sizeof(str), "Testing %s %s -> %s conversions",
- name, "long double", "unsigned long long");
- printf("%-70s", str);
- SKIPPED();
-#if H5_SIZEOF_LONG_DOUBLE!=0
- HDputs(" Test skipped due to hardware conversion error.");
-#else
- HDputs(" Test skipped due to disabled long double.");
#endif
- }
-#endif /*H5_FP_TO_ULLONG_RIGHT_MAXIMUM && H5_LDOUBLE_TO_LLONG_ACCURATE*/
-#endif
-#endif
-#else /*H5_LDOUBLE_TO_INTEGER_WORKS && H5_LDOUBLE_TO_INTEGER_ACCURATE*/
- {
- char str[256]; /*hello string */
-
- HDsnprintf(str, sizeof(str), "Testing %s %s -> %s conversions",
- name, "long double", "all integers");
- printf("%-70s", str);
- SKIPPED();
-#if H5_SIZEOF_LONG_DOUBLE!=0
- HDputs(" Test skipped due to hardware conversion error.");
-#else
- HDputs(" Test skipped due to disabled long double.");
#endif
- }
-#endif /*H5_LDOUBLE_TO_INTEGER_WORKS && H5_LDOUBLE_TO_INTEGER_ACCURATE*/
#ifndef H5_VMS
} /* end for */
#endif /* H5_VMS */
-#else /* H5_FP_TO_INTEGER_OVERFLOW_WORKS */
-/* For Cray X1, the compiler generates floating exception when the
- * conversion overflows. So disable all of the conversions from
- * floating-point numbers to integers.
- */
- char str[256]; /*string */
-
- HDsnprintf(str, sizeof(str), "Testing %s %s -> %s conversions",
- name, "all floating-point numbers", "all integers");
- printf("%-70s", str);
- SKIPPED();
-#if H5_SIZEOF_LONG_DOUBLE!=0
- HDputs(" Test skipped due to hardware conversion error.");
-#else
- HDputs(" Test skipped due to disbaled long double.");
-#endif
-#endif /* H5_FP_TO_INTEGER_OVERFLOW_WORKS */
return nerrors;
}
diff --git a/test/dtransform.c b/test/dtransform.c
index 9c78043..4adbaf5 100644
--- a/test/dtransform.c
+++ b/test/dtransform.c
@@ -17,7 +17,7 @@
#define ROWS 12
#define COLS 18
-#define FLOAT_TOL 0.0001
+#define FLOAT_TOL 0.0001F
static int init_test(hid_t file_id);
static int test_copy(const hid_t dxpl_id_c_to_f_copy, const hid_t dxpl_id_polynomial_copy);
@@ -297,20 +297,8 @@ int main(void)
TEST_TYPE_CONTIG(dxpl_id_utrans_inv, unsigned int, H5T_NATIVE_UINT, "uint", transformData, 0);
TEST_TYPE_CONTIG(dxpl_id_c_to_f, long, H5T_NATIVE_LONG, "long", windchillFfloat, 1);
TEST_TYPE_CONTIG(dxpl_id_utrans_inv, unsigned long, H5T_NATIVE_ULONG, "ulong", transformData, 0);
-
-#ifdef H5_LLONG_TO_FP_CAST_WORKS
TEST_TYPE_CONTIG(dxpl_id_c_to_f, long long, H5T_NATIVE_LLONG, "llong", windchillFfloat, 1);
-#else
- TESTING("contiguous, with type conversion (float->llong)")
- SKIPPED()
-#endif
-
-#ifdef H5_ULLONG_TO_FP_CAST_WORKS
TEST_TYPE_CONTIG(dxpl_id_utrans_inv, unsigned long long, H5T_NATIVE_ULLONG, "ullong", transformData, 0);
-#else
- TESTING("contiguous, with type conversion (float->ullong)")
- SKIPPED()
-#endif
TEST_TYPE_CONTIG(dxpl_id_c_to_f, float, H5T_NATIVE_FLOAT, "float", windchillFfloat, 1);
TEST_TYPE_CONTIG(dxpl_id_c_to_f, double, H5T_NATIVE_DOUBLE, "double", windchillFfloat, 1);
#if H5_SIZEOF_LONG_DOUBLE!=0
@@ -326,19 +314,8 @@ int main(void)
TEST_TYPE_CHUNK(dxpl_id_utrans_inv, unsigned int, H5T_NATIVE_UINT, "uint", transformData, 0);
TEST_TYPE_CHUNK(dxpl_id_c_to_f, long, H5T_NATIVE_LONG, "long", windchillFfloat, 1);
TEST_TYPE_CHUNK(dxpl_id_utrans_inv, unsigned long, H5T_NATIVE_ULONG, "ulong", transformData, 0);
-#ifdef H5_LLONG_TO_FP_CAST_WORKS
TEST_TYPE_CHUNK(dxpl_id_c_to_f, long long, H5T_NATIVE_LLONG, "llong", windchillFfloat, 1);
-#else
- TESTING("chunked, with type conversion (float->llong)")
- SKIPPED()
-#endif
-
-#ifdef H5_ULLONG_TO_FP_CAST_WORKS
TEST_TYPE_CHUNK(dxpl_id_utrans_inv, unsigned long long, H5T_NATIVE_ULLONG, "ullong", transformData, 0);
-#else
- TESTING("chunked, with type conversion (float->ullong)")
- SKIPPED()
-#endif
TEST_TYPE_CHUNK(dxpl_id_c_to_f, float, H5T_NATIVE_FLOAT, "float", windchillFfloat, 1);
TEST_TYPE_CHUNK(dxpl_id_c_to_f, double, H5T_NATIVE_DOUBLE, "double", windchillFfloat, 1);
#if H5_SIZEOF_LONG_DOUBLE!=0
diff --git a/test/fheap.c b/test/fheap.c
index 96b0673..6bcd8bf 100644
--- a/test/fheap.c
+++ b/test/fheap.c
@@ -16061,9 +16061,9 @@ test_write(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
/* Change size of data to write */
if(u < 20)
- obj_size = (size_t)(obj_size * 1.3);
+ obj_size = (size_t)(obj_size * 1.3F);
else
- obj_size = (size_t)(obj_size / 1.3);
+ obj_size = (size_t)(obj_size / 1.3F);
} /* end for */
/* Close the fractal heap */
@@ -16110,9 +16110,9 @@ test_write(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
/* Change size of data to write */
if(u < 20)
- obj_size = (size_t)(obj_size * 1.3);
+ obj_size = (size_t)(obj_size * 1.3F);
else
- obj_size = (size_t)(obj_size / 1.3);
+ obj_size = (size_t)(obj_size / 1.3F);
} /* end for */
/* Close the fractal heap */
diff --git a/test/fillval.c b/test/fillval.c
index 459620d..e7ad521 100644
--- a/test/fillval.c
+++ b/test/fillval.c
@@ -627,7 +627,7 @@ test_create(hid_t fapl, const char *base_name, H5D_layout_t layout)
H5_FAILED();
puts(" Got wrong fill value");
printf(" Got rd_c.a=%f, rd_c.y=%f and rd_c.x=%d, rd_c.z=%c\n",
- rd_c.a, rd_c.y, rd_c.x, rd_c.z);
+ (double)rd_c.a, rd_c.y, rd_c.x, rd_c.z);
}
if(H5Dclose(dset9) < 0) goto error;
if(H5Pclose(dcpl) < 0) goto error;
@@ -700,7 +700,7 @@ test_create(hid_t fapl, const char *base_name, H5D_layout_t layout)
H5_FAILED();
puts(" Got wrong fill value");
printf(" Got rd_c.a=%f, rd_c.y=%f and rd_c.x=%d, rd_c.z=%c\n",
- rd_c.a, rd_c.y, rd_c.x, rd_c.z);
+ (double)rd_c.a, rd_c.y, rd_c.x, rd_c.z);
}
if(H5Dclose(dset8) < 0) goto error;
if(H5Pclose(dcpl) < 0) goto error;
@@ -820,8 +820,8 @@ test_rdwr_cases(hid_t file, hid_t dcpl, const char *dname, void *_fillval,
"Fill value: %f, %d, %f, %c\n",
hs_offset[0], hs_offset[1],
hs_offset[2], hs_offset[3],
- hs_offset[4], rd_c.a, rd_c.x, rd_c.y, rd_c.z,
- fill_c.a, fill_c.x, fill_c.y, fill_c.z);
+ hs_offset[4], (double)rd_c.a, rd_c.x, rd_c.y, rd_c.z,
+ (double)fill_c.a, fill_c.x, fill_c.y, fill_c.z);
goto error;
}
}
@@ -888,8 +888,8 @@ test_rdwr_cases(hid_t file, hid_t dcpl, const char *dname, void *_fillval,
hs_offset[0], hs_offset[1],
hs_offset[2], hs_offset[3],
hs_offset[4],
- buf_c[u].a, buf_c[u].x, buf_c[u].y, buf_c[u].z,
- fill_c.a, fill_c.x, fill_c.y, fill_c.z);
+ (double)buf_c[u].a, buf_c[u].x, buf_c[u].y, buf_c[u].z,
+ (double)fill_c.a, fill_c.x, fill_c.y, fill_c.z);
goto error;
} /* end if */
} /* end for */
@@ -909,9 +909,9 @@ test_rdwr_cases(hid_t file, hid_t dcpl, const char *dname, void *_fillval,
else if(datatype == H5T_COMPOUND) {
HDmemset(buf_c, 0, ((size_t)nelmts * sizeof(comp_datatype)));
for(u = 0; u < nelmts; u++) {
- buf_c[u].a = (float)1111.11;
+ buf_c[u].a = 1111.11F;
buf_c[u].x = 2222;
- buf_c[u].y = 3333.3333;
+ buf_c[u].y = 3333.3333F;
buf_c[u].z = 'd';
}
if(H5Dwrite(dset2, ctype_id, mspace, fspace, H5P_DEFAULT, buf_c) < 0)
@@ -1002,7 +1002,7 @@ test_rdwr_cases(hid_t file, hid_t dcpl, const char *dname, void *_fillval,
(long)hs_offset[0], (long)hs_offset[1],
(long)hs_offset[2], (long)hs_offset[3],
(long)hs_offset[4],
- rd_c.a, rd_c.x, rd_c.y, rd_c.z, should_be_c.a,
+ (double)rd_c.a, rd_c.x, rd_c.y, rd_c.z, (double)should_be_c.a,
should_be_c.x,should_be_c.y,should_be_c.z);
goto error;
}
@@ -1021,7 +1021,7 @@ test_rdwr_cases(hid_t file, hid_t dcpl, const char *dname, void *_fillval,
(long)hs_offset[0], (long)hs_offset[1],
(long)hs_offset[2], (long)hs_offset[3],
(long)hs_offset[4],
- rd_c.a, rd_c.x, rd_c.y, rd_c.z, should_be_c.a,
+ (double)rd_c.a, rd_c.x, rd_c.y, rd_c.z, (double)should_be_c.a,
should_be_c.x,should_be_c.y,should_be_c.z);
goto error;
}
@@ -1144,7 +1144,7 @@ test_rdwr(hid_t fapl, const char *base_name, H5D_layout_t layout)
* as compound type */
if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC) < 0) goto error;
HDmemset(&fill_ctype, 0, sizeof(fill_ctype));
- fill_ctype.y = 4444.4444;
+ fill_ctype.y = 4444.4444F;
if(H5Pset_fill_value(dcpl, ctype_id, &fill_ctype) < 0) goto error;
nerrors += test_rdwr_cases(file, dcpl, "dset11", &fill_ctype, H5D_FILL_TIME_ALLOC,
layout, H5T_COMPOUND, ctype_id);
@@ -1197,7 +1197,7 @@ test_rdwr(hid_t fapl, const char *base_name, H5D_layout_t layout)
* as compound type */
if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC) < 0) goto error;
HDmemset(&fill_ctype, 0, sizeof(fill_ctype));
- fill_ctype.y = 4444.4444;
+ fill_ctype.y = 4444.4444F;
if(H5Pset_fill_value(dcpl, ctype_id, &fill_ctype) < 0) goto error;
nerrors += test_rdwr_cases(file, dcpl, "dset12", &fill_ctype, H5D_FILL_TIME_ALLOC,
layout, H5T_COMPOUND, ctype_id);
diff --git a/test/flush2.c b/test/flush2.c
index 06dd086..28ce41e 100644
--- a/test/flush2.c
+++ b/test/flush2.c
@@ -73,13 +73,13 @@ check_dset(hid_t file, const char* name)
* 1998-11-06 ptl
*/
error = fabs(the_data[i][j] - (double)(hssize_t)i / ((hssize_t)j + 1));
- if(error > 0.0001) {
- H5_FAILED();
- printf(" dset[%lu][%lu] = %g\n",
- (unsigned long)i, (unsigned long)j, the_data[i][j]);
- printf(" should be %g\n",
- (double)(hssize_t)i/(hssize_t)(j+1));
- goto error;
+ if(error > 0.0001F) {
+ H5_FAILED();
+ printf(" dset[%lu][%lu] = %g\n",
+ (unsigned long)i, (unsigned long)j, the_data[i][j]);
+ printf(" should be %g\n",
+ (double)(hssize_t)i/(hssize_t)(j+1));
+ goto error;
}
}
if(H5Dclose(dset) < 0) goto error;
@@ -186,7 +186,7 @@ main(void)
PASSED()
else
{
-#if defined H5_HAVE_WIN32_API && defined _HDF5USEDLL_
+#if defined H5_HAVE_WIN32_API && !defined (hdf5_EXPORTS)
SKIPPED();
puts(" DLL will flush the file even when calling _exit, skip this test temporarily");
#elif defined H5_VMS
@@ -209,7 +209,7 @@ main(void)
PASSED()
else
{
-#if defined H5_HAVE_WIN32_API && defined _HDF5USEDLL_
+#if defined H5_HAVE_WIN32_API && !defined (hdf5_EXPORTS)
SKIPPED();
puts(" DLL will flush the file even when calling _exit, skip this test temporarily");
#elif defined H5_VMS
diff --git a/test/gen_cross.c b/test/gen_cross.c
index 3b0a56f..1c73016 100644
--- a/test/gen_cross.c
+++ b/test/gen_cross.c
@@ -175,7 +175,6 @@ create_normal_dset(hid_t fid, hid_t fsid, hid_t msid)
return 0;
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
error:
H5E_BEGIN_TRY {
H5Pclose(dcpl);
@@ -183,7 +182,6 @@ error:
} H5E_END_TRY;
return -1;
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
}
@@ -205,7 +203,6 @@ error:
int
create_scale_offset_dsets_float(hid_t fid, hid_t fsid, hid_t msid)
{
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
hid_t dataset; /* dataset handles */
hid_t dcpl;
float data[NX][NY]; /* data to write */
@@ -267,15 +264,8 @@ create_scale_offset_dsets_float(hid_t fid, hid_t fsid, hid_t msid)
if(H5Pclose(dcpl) < 0)
TEST_ERROR
-#else /* H5_HAVE_FILTER_SCALEOFFSET */
- const char *not_supported= "Scaleoffset filter is not enabled. Can't create the dataset.";
-
- puts(not_supported);
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
-
return 0;
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
error:
H5E_BEGIN_TRY {
H5Pclose(dcpl);
@@ -283,7 +273,6 @@ error:
} H5E_END_TRY;
return -1;
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
}
@@ -305,7 +294,6 @@ error:
int
create_scale_offset_dsets_double(hid_t fid, hid_t fsid, hid_t msid)
{
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
hid_t dataset; /* dataset handles */
hid_t dcpl;
double data[NX][NY]; /* data to write */
@@ -367,15 +355,8 @@ create_scale_offset_dsets_double(hid_t fid, hid_t fsid, hid_t msid)
if(H5Pclose(dcpl) < 0)
TEST_ERROR
-#else /* H5_HAVE_FILTER_SCALEOFFSET */
- const char *not_supported= "Scaleoffset filter is not enabled. Can't create the dataset.";
-
- puts(not_supported);
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
-
return 0;
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
error:
H5E_BEGIN_TRY {
H5Pclose(dcpl);
@@ -383,7 +364,6 @@ error:
} H5E_END_TRY;
return -1;
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
}
@@ -405,7 +385,6 @@ error:
int
create_scale_offset_dsets_char(hid_t fid, hid_t fsid, hid_t msid)
{
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
hid_t dataset; /* dataset handles */
hid_t dcpl;
char data[NX][NY]; /* data to write */
@@ -475,15 +454,8 @@ create_scale_offset_dsets_char(hid_t fid, hid_t fsid, hid_t msid)
if(H5Pclose(dcpl) < 0)
TEST_ERROR
-#else /* H5_HAVE_FILTER_SCALEOFFSET */
- const char *not_supported= "Scaleoffset filter is not enabled. Can't create the dataset.";
-
- puts(not_supported);
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
-
return 0;
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
error:
H5E_BEGIN_TRY {
H5Pclose(dcpl);
@@ -491,7 +463,6 @@ error:
} H5E_END_TRY;
return -1;
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
}
@@ -513,7 +484,6 @@ error:
int
create_scale_offset_dsets_short(hid_t fid, hid_t fsid, hid_t msid)
{
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
hid_t dataset; /* dataset handles */
hid_t dcpl;
short data[NX][NY]; /* data to write */
@@ -583,15 +553,8 @@ create_scale_offset_dsets_short(hid_t fid, hid_t fsid, hid_t msid)
if(H5Pclose(dcpl) < 0)
TEST_ERROR
-#else /* H5_HAVE_FILTER_SCALEOFFSET */
- const char *not_supported= "Scaleoffset filter is not enabled. Can't create the dataset.";
-
- puts(not_supported);
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
-
return 0;
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
error:
H5E_BEGIN_TRY {
H5Pclose(dcpl);
@@ -599,7 +562,6 @@ error:
} H5E_END_TRY;
return -1;
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
}
@@ -621,7 +583,6 @@ error:
int
create_scale_offset_dsets_int(hid_t fid, hid_t fsid, hid_t msid)
{
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
hid_t dataset; /* dataset handles */
hid_t dcpl;
int data[NX][NY]; /* data to write */
@@ -691,15 +652,8 @@ create_scale_offset_dsets_int(hid_t fid, hid_t fsid, hid_t msid)
if(H5Pclose(dcpl) < 0)
TEST_ERROR
-#else /* H5_HAVE_FILTER_SCALEOFFSET */
- const char *not_supported= "Scaleoffset filter is not enabled. Can't create the dataset.";
-
- puts(not_supported);
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
-
return 0;
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
error:
H5E_BEGIN_TRY {
H5Pclose(dcpl);
@@ -707,7 +661,6 @@ error:
} H5E_END_TRY;
return -1;
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
}
@@ -730,7 +683,6 @@ error:
int
create_scale_offset_dsets_long_long(hid_t fid, hid_t fsid, hid_t msid)
{
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
hid_t dataset; /* dataset handles */
hid_t dcpl;
long long data[NX][NY]; /* data to write */
@@ -800,15 +752,8 @@ create_scale_offset_dsets_long_long(hid_t fid, hid_t fsid, hid_t msid)
if(H5Pclose(dcpl) < 0)
TEST_ERROR
-#else /* H5_HAVE_FILTER_SCALEOFFSET */
- const char *not_supported= "Scaleoffset filter is not enabled. Can't create the dataset.";
-
- puts(not_supported);
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
-
return 0;
-#ifdef H5_HAVE_FILTER_SCALEOFFSET
error:
H5E_BEGIN_TRY {
H5Pclose(dcpl);
@@ -816,7 +761,6 @@ error:
} H5E_END_TRY;
return -1;
-#endif /* H5_HAVE_FILTER_SCALEOFFSET */
}
@@ -838,7 +782,6 @@ error:
int
create_fletcher_dsets_float(hid_t fid, hid_t fsid, hid_t msid)
{
-#ifdef H5_HAVE_FILTER_FLETCHER32
hid_t dataset; /* dataset handles */
hid_t dcpl;
float data[NX][NY]; /* data to write */
@@ -900,15 +843,8 @@ create_fletcher_dsets_float(hid_t fid, hid_t fsid, hid_t msid)
if(H5Pclose(dcpl) < 0)
TEST_ERROR
-#else /* H5_HAVE_FILTER_FLETCHER32 */
- const char *not_supported= "Fletcher filter is not enabled. Can't create the dataset.";
-
- puts(not_supported);
-#endif /* H5_HAVE_FILTER_FLETCHER32 */
-
return 0;
-#ifdef H5_HAVE_FILTER_FLETCHER32
error:
H5E_BEGIN_TRY {
H5Pclose(dcpl);
@@ -916,7 +852,6 @@ error:
} H5E_END_TRY;
return -1;
-#endif /* H5_HAVE_FILTER_FLETCHER32 */
}
@@ -1131,7 +1066,6 @@ error:
int
create_shuffle_dsets_float(hid_t fid, hid_t fsid, hid_t msid)
{
-#ifdef H5_HAVE_FILTER_SHUFFLE
hid_t dataset; /* dataset handles */
hid_t dcpl;
float data[NX][NY]; /* data to write */
@@ -1193,15 +1127,8 @@ create_shuffle_dsets_float(hid_t fid, hid_t fsid, hid_t msid)
if(H5Pclose(dcpl) < 0)
TEST_ERROR
-#else /* H5_HAVE_FILTER_SHUFFLE */
- const char *not_supported= "Shuffle filter is not enabled. Can't create the dataset.";
-
- puts(not_supported);
-#endif /* H5_HAVE_FILTER_SHUFFLE */
-
return 0;
-#ifdef H5_HAVE_FILTER_SHUFFLE
error:
H5E_BEGIN_TRY {
H5Pclose(dcpl);
@@ -1209,7 +1136,6 @@ error:
} H5E_END_TRY;
return -1;
-#endif /* H5_HAVE_FILTER_SHUFFLE */
}
@@ -1231,7 +1157,6 @@ error:
int
create_nbit_dsets_float(hid_t fid, hid_t fsid, hid_t msid)
{
-#ifdef H5_HAVE_FILTER_NBIT
hid_t dataset; /* dataset handles */
hid_t datatype;
hid_t dcpl;
@@ -1314,15 +1239,8 @@ create_nbit_dsets_float(hid_t fid, hid_t fsid, hid_t msid)
if(H5Pclose(dcpl) < 0)
TEST_ERROR
-#else /* H5_HAVE_FILTER_NBIT */
- const char *not_supported= "Nbit filter is not enabled. Can't create the dataset.";
-
- puts(not_supported);
-#endif /* H5_HAVE_FILTER_NBIT */
-
return 0;
-#ifdef H5_HAVE_FILTER_NBIT
error:
H5E_BEGIN_TRY {
H5Pclose(dcpl);
@@ -1330,7 +1248,6 @@ error:
} H5E_END_TRY;
return -1;
-#endif /* H5_HAVE_FILTER_NBIT */
}
@@ -1451,3 +1368,4 @@ main (void)
return 0;
}
+
diff --git a/test/gen_filespace.c b/test/gen_filespace.c
index 7ee2a7a..e0c42e8 100644
--- a/test/gen_filespace.c
+++ b/test/gen_filespace.c
@@ -42,7 +42,8 @@ static void gen_file(void)
hid_t dataset, space;
hsize_t dim[1];
int data[NUM_ELMTS];
- unsigned i, j; /* Local index variable */
+ size_t j; /* Local index variable */
+ int i; /* Local index variable */
H5F_file_space_type_t fs_type; /* File space handling strategy */
for(j = 0, fs_type = H5F_FILE_SPACE_ALL_PERSIST; j < NELMTS(FILENAMES); j++, fs_type = (H5F_file_space_type_t)(fs_type + 1)) {
diff --git a/test/gen_filters.c b/test/gen_filters.c
index 58400d5..b44339d 100644
--- a/test/gen_filters.c
+++ b/test/gen_filters.c
@@ -48,7 +48,6 @@ static size_t filter_bogus(unsigned int flags, size_t cd_nelmts,
static herr_t
test_filters_endianess(void)
{
-#if defined H5_HAVE_FILTER_FLETCHER32
hid_t fid = -1; /* file ID */
hid_t dsid = -1; /* dataset ID */
hid_t sid = -1; /* dataspace ID */
@@ -85,10 +84,8 @@ test_filters_endianess(void)
if(H5Sclose(sid) < 0) goto error;
if(H5Fclose(fid) < 0) goto error;
-#endif /* H5_HAVE_FILTER_FLETCHER32 */
return 0;
-#if defined H5_HAVE_FILTER_FLETCHER32
error:
H5E_BEGIN_TRY {
H5Pclose(dcpl);
@@ -97,7 +94,6 @@ error:
H5Fclose(fid);
} H5E_END_TRY;
return -1;
-#endif /* H5_HAVE_FILTER_FLETCHER32 */
} /* end test_filters_endianess() */
/* This message derives from H5Z */
diff --git a/test/gen_plist.c b/test/gen_plist.c
index acc5f3e..e77af73 100644
--- a/test/gen_plist.c
+++ b/test/gen_plist.c
@@ -280,8 +280,8 @@ main(void)
assert(ret > 0);
/* Create FAPL for the elink FAPL */
- if((fapl1 = ret = H5Pcreate(ret = H5P_FILE_ACCESS)) < 0)
- assert(ret > 0);
+ if((fapl1 = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ assert(fapl1 > 0);
if((ret = H5Pset_alignment(fapl1, 2, 1024)) < 0)
assert(ret > 0);
diff --git a/test/getname.c b/test/getname.c
index 2d999b9..dc1ddbe 100644
--- a/test/getname.c
+++ b/test/getname.c
@@ -103,7 +103,11 @@ test_main(hid_t file_id, hid_t fapl)
hid_t space_id;
hid_t type_id, type2_id;
hsize_t dims[1] = { 5 };
- size_t name_len; /* Name length */
+ size_t name_len; /* Name length */
+ H5O_info_t oinfo; /* Object info structs */
+ hid_t dtype; /* Object identifier for testing */
+ hid_t dtype_anon; /* Object identifier for testing anonymous */
+ ssize_t size; /* Size returned by H5Iget_name */
/* Initialize the file names */
h5_fixname(FILENAME[1], fapl, filename1, sizeof filename1);
@@ -2355,7 +2359,58 @@ test_main(hid_t file_id, hid_t fapl)
H5Gclose(group_id);
H5Gclose(group2_id);
H5Fclose(file1_id);
- H5Fclose(file2_id);
+
+ PASSED();
+
+ /*-------------------------------------------------------------------------
+ * Test H5Iget_name with anonymous datatypes
+ *-------------------------------------------------------------------------
+ */
+
+ TESTING("H5Iget_name with anonymous datatypes");
+
+ /* Commit the type anonymously and link it in */
+ if((dtype = H5Tcopy(H5T_NATIVE_INT)) < 0) TEST_ERROR
+ /* Test H5Iget_name with created datatype, should fail because not committed */
+ H5E_BEGIN_TRY {
+ if((size = H5Iget_name(dtype, NULL, 0)) >= 0) TEST_ERROR
+ } H5E_END_TRY;
+
+ if(H5Tcommit_anon(file2_id, dtype, H5P_DEFAULT, H5P_DEFAULT)) TEST_ERROR
+
+ /* Test H5Iget_name with anonymously created datatype, should pass because committed */
+ if((size = H5Iget_name(dtype, NULL, 0)) != 0) TEST_ERROR
+
+ /* Create a link to the object */
+ if( H5Olink(dtype, file2_id, "datatype", H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+
+ /* Commit a second datatype with no links to it and commit it */
+ if((dtype_anon = H5Tcopy(H5T_NATIVE_INT)) < 0) TEST_ERROR
+ if(H5Tcommit_anon(file2_id, dtype_anon, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+
+ /* Test H5Iget_name with anonymously created datatype, should pass because committed */
+ if((size = H5Iget_name(dtype_anon, NULL,0)) != 0) TEST_ERROR
+
+ /* Store the address of the datatype for later use */
+ if(H5Oget_info(dtype_anon, &oinfo) < 0) TEST_ERROR
+
+ /* Update the reference count to dtype_anon to preserve the datatype */
+ if(H5Oincr_refcount(dtype_anon) < 0) TEST_ERROR
+
+ if(H5Tclose(dtype) < 0) TEST_ERROR
+ if(H5Tclose(dtype_anon) < 0) TEST_ERROR
+ if(H5Fclose(file2_id) < 0) TEST_ERROR
+
+ /* Re-open the file and check that the anonymous datatypes persist */
+ if( (file2_id = H5Fopen(filename2, H5F_ACC_RDONLY, fapl)) < 0) TEST_ERROR
+
+ /* Check the H5Iget_name does not return an error for anon committed datatypes */
+ if((dtype_anon = H5Oopen_by_addr(file2_id, oinfo.addr)) < 0) TEST_ERROR
+
+ if((size = H5Iget_name(dtype_anon,NULL,0)) != 0) TEST_ERROR
+
+ if(H5Tclose(dtype_anon) < 0) TEST_ERROR
+ if(H5Fclose(file2_id) < 0) TEST_ERROR
PASSED();
diff --git a/test/hyperslab.c b/test/hyperslab.c
index 5e2c109..9bc085c 100644
--- a/test/hyperslab.c
+++ b/test/hyperslab.c
@@ -609,10 +609,10 @@ test_multifill(size_t nx)
for(i = 0; i < nx; i++) {
src[i].left = 1111111;
- src[i].mid = 12345.6789;
+ src[i].mid = 12345.6789F;
src[i].right = 2222222;
dst[i].left = 3333333;
- dst[i].mid = 98765.4321;
+ dst[i].mid = 98765.4321F;
dst[i].right = 4444444;
} /* end for */
@@ -621,7 +621,7 @@ test_multifill(size_t nx)
* over and over again.
*/
fill.left = 55555555;
- fill.mid = 3.1415927;
+ fill.mid = 3.1415927F;
fill.right = 66666666;
src_stride = 0;
diff --git a/test/links.c b/test/links.c
index 6119bb3..891b5e4 100644
--- a/test/links.c
+++ b/test/links.c
@@ -141,7 +141,7 @@ const char *FILENAME[] = {
#define H5L_DIM1 100
#define H5L_DIM2 100
-#define FILTER_FILESIZE_MAX_FRACTION .9
+#define FILTER_FILESIZE_MAX_FRACTION 0.9F
/* Creation order macros */
#define CORDER_GROUP_NAME "corder_group"
diff --git a/test/mtime.c b/test/mtime.c
index 6e00fe0..0c0c923 100644
--- a/test/mtime.c
+++ b/test/mtime.c
@@ -114,7 +114,7 @@ main(void)
puts(" Modification times will be mantained in the file but");
puts(" cannot be queried on this system. See H5O_mtime_decode().");
return 0;
- } else if(HDfabs(HDdifftime(now, oi1.ctime)) > 60.0) {
+ } else if(HDfabs(HDdifftime(now, oi1.ctime)) > 60.0F) {
H5_FAILED();
tm = HDlocaltime(&(oi1.ctime));
HDstrftime((char*)buf1, sizeof buf1, "%Y-%m-%d %H:%M:%S", tm);
diff --git a/test/objcopy.c b/test/objcopy.c
index 0a1d7a0..71a7236 100644
--- a/test/objcopy.c
+++ b/test/objcopy.c
@@ -2315,7 +2315,7 @@ test_copy_dataset_compound(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t
#endif /* H5_CLEAR_MEMORY */
for(i = 0; i < DIM_SIZE_1; i++) {
buf[i].a = i;
- buf[i].d = 1. / (i + 1);
+ buf[i].d = 1.0F / (i + 1);
} /* end for */
/* Initialize the filenames */
@@ -2447,9 +2447,9 @@ test_copy_dataset_chunked(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t
/* set initial data values */
for(i = 0; i < DIM_SIZE_1; i++) {
- buf1d[i] = (float)(i / 2.0);
+ buf1d[i] = (float)(i / 2.0F);
for(j = 0; j < DIM_SIZE_2; j++)
- buf2d[i][j] = (float)(i + (j / 100.0));
+ buf2d[i][j] = (float)(i + (j / 100.0F));
} /* end for */
/* Initialize the filenames */
@@ -2790,9 +2790,9 @@ test_copy_dataset_chunked_sparse(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* set initial data values */
for(i = 0; i < DIM_SIZE_1; i++) {
- buf1d[i] = (float)(i / 10.0);
+ buf1d[i] = (float)(i / 10.0F);
for(j = 0; j < DIM_SIZE_2; j++)
- buf2d[i][j] = (float)(i + (j / 100.0));
+ buf2d[i][j] = (float)(i + (j / 100.0F));
} /* end for */
/* Initialize the filenames */
@@ -2985,7 +2985,7 @@ test_copy_dataset_compressed(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid
/* set initial data values */
for (i=0; i<DIM_SIZE_1; i++)
for (j=0; j<DIM_SIZE_2; j++)
- buf[i][j] = (float)(100.0); /* Something easy to compress */
+ buf[i][j] = 100.0F; /* Something easy to compress */
/* Initialize the filenames */
h5_fixname(FILENAME[0], src_fapl, src_filename, sizeof src_filename);
@@ -3114,7 +3114,7 @@ test_copy_dataset_compact(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t
/* set initial data values */
for (i=0; i<DIM_SIZE_1; i++)
for (j=0; j<DIM_SIZE_2; j++)
- buf[i][j] = (float)(i+j/100.0);
+ buf[i][j] = (float)(i+j/100.0F);
/* Initialize the filenames */
h5_fixname(FILENAME[0], src_fapl, src_filename, sizeof src_filename);
@@ -7591,7 +7591,7 @@ test_copy_dataset_contig_cmpd_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
buf[i].b.p = (int *)HDmalloc(buf[i].b.len * sizeof(int));
for(j = 0; j < buf[i].b.len; j++)
((int *)buf[i].b.p)[j] = (int)(i * 10 + j);
- buf[i].c = 1. / (i + 1.);
+ buf[i].c = 1.0F / (i + 1.0F);
} /* end for */
/* Initialize the filenames */
@@ -7730,7 +7730,7 @@ test_copy_dataset_chunked_cmpd_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl
buf[i].b.p = (int *)HDmalloc(buf[i].b.len * sizeof(int));
for(j = 0; j < buf[i].b.len; j++)
((int *)buf[i].b.p)[j] = (int)(i * 10 + j);
- buf[i].c = 1. / (i + 1.);
+ buf[i].c = 1.0F / (i + 1.0F);
} /* end for */
/* Initialize the filenames */
@@ -7875,7 +7875,7 @@ test_copy_dataset_compact_cmpd_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl
buf[i].b.p = (int *)HDmalloc(buf[i].b.len * sizeof(int));
for(j = 0; j < buf[i].b.len; j++)
((int *)buf[i].b.p)[j] = (int)(i * 10 + j);
- buf[i].c = 1. / (i + 1.);
+ buf[i].c = 1.0F / (i + 1.0F);
} /* end for */
/* Initialize the filenames */
diff --git a/test/plugin.c b/test/plugin.c
index 86bc952..74a8f4b 100644
--- a/test/plugin.c
+++ b/test/plugin.c
@@ -456,14 +456,14 @@ error:
}
/*-------------------------------------------------------------------------
- * Function: test_read_with_filters
+ * Function: test_read_with_filters
*
- * Purpose: Tests reading dataset created with dynamically loaded filters
+ * Purpose: Tests reading dataset created with dynamically loaded filters
*
- * Return: Success: 0
- * Failure: -1
+ * Return: Success: 0
+ * Failure: -1
*
- * Programmer: Raymond Lu
+ * Programmer: Raymond Lu
* 14 March 2013
*
*-------------------------------------------------------------------------
@@ -471,7 +471,7 @@ error:
static herr_t
test_read_with_filters(hid_t file)
{
- hid_t dset; /* Dataset ID */
+ hid_t dset; /* Dataset ID */
/*----------------------------------------------------------
* STEP 1: Test deflation by itself.
@@ -526,6 +526,77 @@ error:
}
/*-------------------------------------------------------------------------
+ * Function: test_noread_data
+ *
+ * Purpose: Tests not reading data
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_noread_data(hid_t dataset)
+{
+ int check[DSET_DIM1][DSET_DIM2];
+ herr_t ret;
+
+ /* Read the dataset back */
+ H5E_BEGIN_TRY {
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, check);
+ } H5E_END_TRY
+ if(ret >= 0)
+ TEST_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ return -1;
+}
+
+/*-------------------------------------------------------------------------
+ * Function: test_noread_with_filters
+ *
+ * Purpose: Tests reading dataset created with dynamically loaded filters disabled
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_noread_with_filters(hid_t file)
+{
+ hid_t dset; /* Dataset ID */
+ unsigned plugin_state; /* status of plugins */
+ TESTING("Testing DYNLIB1 filter with plugins disabled");
+
+ /* disable filter plugin */
+ if(H5PLget_loading_state(&plugin_state) < 0) TEST_ERROR
+ plugin_state = plugin_state & ~H5PL_FILTER_PLUGIN;
+ if(H5PLset_loading_state(plugin_state) < 0) TEST_ERROR
+
+ if((dset = H5Dopen2(file,DSET_DYNLIB1_NAME,H5P_DEFAULT)) < 0) TEST_ERROR
+
+ if(test_noread_data(dset) < 0) TEST_ERROR
+
+ if(H5Dclose(dset) < 0) TEST_ERROR
+
+ /* re-enable filter plugin */
+ plugin_state = plugin_state | H5PL_FILTER_PLUGIN;
+ if(H5PLset_loading_state(plugin_state) < 0) TEST_ERROR
+
+ return 0;
+
+error:
+ /* re-enable filter plugin */
+ plugin_state = plugin_state | H5PL_FILTER_PLUGIN;
+ if(H5PLset_loading_state(plugin_state) < 0) TEST_ERROR
+ return -1;
+}
+
+/*-------------------------------------------------------------------------
* Function: test_filters_for_groups
*
* Purpose: Tests creating group with dynamically loaded filters
@@ -713,6 +784,17 @@ main(void)
/* Open the groups with filters */
nerrors += (test_groups_with_filters(file) < 0 ? 1 : 0);
+ /* Close the library so that all loaded plugin libraries are unloaded */
+ h5_reset();
+ fapl = h5_fileaccess();
+
+ /* Reopen the file for testing data reading */
+ if((file = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ TEST_ERROR
+
+ /* Read the data with disabled filters */
+ nerrors += (test_noread_with_filters(file) < 0 ? 1 : 0);
+
if(H5Fclose(file) < 0)
TEST_ERROR
diff --git a/test/set_extent.c b/test/set_extent.c
index aaa6516..f6c3766 100644
--- a/test/set_extent.c
+++ b/test/set_extent.c
@@ -130,10 +130,10 @@ int main( void )
if((fapl2 = H5Pcopy(fapl)) < 0) TEST_ERROR
/* Set chunk cache so only part of the chunks can be cached on fapl */
- if(H5Pset_cache(fapl, 0, (size_t)8, 256 * sizeof(int), 0.75) < 0) TEST_ERROR
+ if(H5Pset_cache(fapl, 0, (size_t)8, 256 * sizeof(int), 0.75F) < 0) TEST_ERROR
/* Disable chunk caching on fapl2 */
- if(H5Pset_cache(fapl2, 0, (size_t)0, (size_t)0, 0.) < 0) TEST_ERROR
+ if(H5Pset_cache(fapl2, 0, (size_t)0, (size_t)0, 0.0F) < 0) TEST_ERROR
/* Set the "use the latest version of the format" bounds for creating objects in the file */
if(H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) TEST_ERROR
diff --git a/test/tarray.c b/test/tarray.c
index 1a53336..0923625 100644
--- a/test/tarray.c
+++ b/test/tarray.c
@@ -541,7 +541,7 @@ test_array_compound_atomic(void)
for(i = 0; i < SPACE1_DIM1; i++)
for(j = 0; j < ARRAY1_DIM1; j++) {
wdata[i][j].i = i * 10 + j;
- wdata[i][j].f = (float)(i * 2.5 + j);
+ wdata[i][j].f = (float)(i * 2.5F + j);
} /* end for */
/* Create file */
@@ -686,7 +686,7 @@ test_array_compound_atomic(void)
continue;
} /* end if */
if(!FLT_ABS_EQUAL(wdata[i][j].f, rdata[i][j].f)) {
- TestErrPrintf("Array data information doesn't match!, wdata[%d][%d].f=%f, rdata[%d][%d].f=%f\n", (int)i, (int)j, wdata[i][j].f, (int)i, (int)j, rdata[i][j].f);
+ TestErrPrintf("Array data information doesn't match!, wdata[%d][%d].f=%f, rdata[%d][%d].f=%f\n", (int)i, (int)j, (double)wdata[i][j].f, (int)i, (int)j, (double)rdata[i][j].f);
continue;
} /* end if */
} /* end for */
@@ -745,7 +745,7 @@ test_array_compound_array(void)
for(j=0; j<ARRAY1_DIM1; j++) {
wdata[i][j].i=i*10+j;
for(k=0; k<ARRAY1_DIM1; k++)
- wdata[i][j].f[k]=(float)(i*10+j*2.5+k);
+ wdata[i][j].f[k]=(float)(i * 10.0F + j * 2.5F + k);
} /* end for */
/* Create file */
@@ -929,7 +929,7 @@ test_array_compound_array(void)
} /* end if */
for(k=0; k<ARRAY1_DIM1; k++)
if(!FLT_ABS_EQUAL(wdata[i][j].f[k],rdata[i][j].f[k])) {
- TestErrPrintf("Array data information doesn't match!, wdata[%d][%d].f[%d]=%f, rdata[%d][%d].f[%d]=%f\n",(int)i,(int)j,(int)k,wdata[i][j].f[k],(int)i,(int)j,(int)k,rdata[i][j].f[k]);
+ TestErrPrintf("Array data information doesn't match!, wdata[%d][%d].f[%d]=%f, rdata[%d][%d].f[%d]=%f\n",(int)i,(int)j,(int)k,(double)wdata[i][j].f[k],(int)i,(int)j,(int)k,(double)rdata[i][j].f[k]);
continue;
} /* end if */
} /* end for */
@@ -1534,8 +1534,8 @@ test_array_bkg(void)
for (j = 0; j < ALEN; j++)
{
cf[i].a[j] = 100*(i+1) + j;
- cf[i].b[j] = (float)(100.*(i+1) + 0.01*j);
- cf[i].c[j] = 100.*(i+1) + 0.02*j;
+ cf[i].b[j] = (float)(100.0F*(i+1) + 0.01F*j);
+ cf[i].c[j] = (double)(100.0F*(i+1) + 0.02F*j);
}
}
@@ -1619,11 +1619,11 @@ test_array_bkg(void)
continue;
}
if(!FLT_ABS_EQUAL(cf[i].b[j],cfr[i].b[j])) {
- TestErrPrintf("Field b data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n",(int)i,(int)j,(float)cf[i].b[j],(int)i,(int)j,(float)cfr[i].b[j]);
+ TestErrPrintf("Field b data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n",(int)i,(int)j,(double)cf[i].b[j],(int)i,(int)j,(double)cfr[i].b[j]);
continue;
}
if(!DBL_ABS_EQUAL(cf[i].c[j],cfr[i].c[j])) {
- TestErrPrintf("Field c data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n",(int)i,(int)j,(float)cf[i].c[j],(int)i,(int)j,(float)cfr[i].c[j]);
+ TestErrPrintf("Field c data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n",(int)i,(int)j,(double)cf[i].c[j],(int)i,(int)j,(double)cfr[i].c[j]);
continue;
}
}
@@ -1674,7 +1674,7 @@ test_array_bkg(void)
/* -------------------------------- */
for (i=0; i< LENGTH; i++)
for (j = 0; j < ALEN; j++)
- cf[i].b[j]=fld[i].b[j] = (float)1.313;
+ cf[i].b[j]=fld[i].b[j] = 1.313F;
status = H5Dwrite (dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, fld);
CHECK(status, FAIL, "H5Dwrite");
@@ -1686,7 +1686,7 @@ test_array_bkg(void)
for (i=0; i< LENGTH; i++)
for (j = 0; j < ALEN; j++)
if(!FLT_ABS_EQUAL(fld[i].b[j],fldr[i].b[j])) {
- TestErrPrintf("Field data doesn't match, fld[%d].b[%d]=%f, fldr[%d].b[%d]=%f\n",(int)i,(int)j,(float)fld[i].b[j],(int)i,(int)j,(float)fldr[i].b[j]);
+ TestErrPrintf("Field data doesn't match, fld[%d].b[%d]=%f, fldr[%d].b[%d]=%f\n",(int)i,(int)j,(double)fld[i].b[j],(int)i,(int)j,(double)fldr[i].b[j]);
continue;
}
@@ -1712,11 +1712,11 @@ test_array_bkg(void)
continue;
}
if(!FLT_ABS_EQUAL(cf[i].b[j],cfr[i].b[j])) {
- TestErrPrintf("Field b data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n",(int)i,(int)j,(float)cf[i].b[j],(int)i,(int)j,(float)cfr[i].b[j]);
+ TestErrPrintf("Field b data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n",(int)i,(int)j,(double)cf[i].b[j],(int)i,(int)j,(double)cfr[i].b[j]);
continue;
}
if(!DBL_ABS_EQUAL(cf[i].c[j],cfr[i].c[j])) {
- TestErrPrintf("Field c data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n",(int)i,(int)j,(float)cf[i].c[j],(int)i,(int)j,(float)cfr[i].c[j]);
+ TestErrPrintf("Field c data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n",(int)i,(int)j,(double)cf[i].c[j],(int)i,(int)j,(double)cfr[i].c[j]);
continue;
}
}
@@ -1761,11 +1761,11 @@ test_array_bkg(void)
continue;
}
if(!FLT_ABS_EQUAL(cf[i].b[j],cfr[i].b[j])) {
- TestErrPrintf("Field b data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n",(int)i,(int)j,(float)cf[i].b[j],(int)i,(int)j,(float)cfr[i].b[j]);
+ TestErrPrintf("Field b data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n",(int)i,(int)j,(double)cf[i].b[j],(int)i,(int)j,(double)cfr[i].b[j]);
continue;
}
if(!DBL_ABS_EQUAL(cf[i].c[j],cfr[i].c[j])) {
- TestErrPrintf("Field c data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n",(int)i,(int)j,(float)cf[i].c[j],(int)i,(int)j,(float)cfr[i].c[j]);
+ TestErrPrintf("Field c data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n",(int)i,(int)j,(double)cf[i].c[j],(int)i,(int)j,(double)cfr[i].c[j]);
continue;
}
}
diff --git a/test/tattr.c b/test/tattr.c
index 2885124..31a3844 100644
--- a/test/tattr.c
+++ b/test/tattr.c
@@ -97,7 +97,7 @@ int attr_data2[ATTR2_DIM1][ATTR2_DIM2]={{7614,-416},{197814,-3}}; /* Test data f
#define ATTR3_DIM1 2
#define ATTR3_DIM2 2
#define ATTR3_DIM3 2
-double attr_data3[ATTR3_DIM1][ATTR3_DIM2][ATTR3_DIM3]={{{2.3,-26.1},{0.123,-10.0}},{{973.23,-0.91827},{2.0,23.0}}}; /* Test data for 3rd attribute */
+double attr_data3[ATTR3_DIM1][ATTR3_DIM2][ATTR3_DIM3]={{{2.3F,-26.1F}, {0.123F,-10.0F}},{{973.23F,-0.91827F},{2.0F,23.0F}}}; /* Test data for 3rd attribute */
#define ATTR4_NAME "Attr4"
#define ATTR4_RANK 2
@@ -113,12 +113,12 @@ struct attr4_struct {
int i;
double d;
char c;
- } attr_data4[ATTR4_DIM1][ATTR4_DIM2]={{{3,-26.1,'d'},{-100000, 0.123,'3'}},
- {{-23,981724.2,'Q'},{0,2.0,'\n'}}}; /* Test data for 4th attribute */
+ } attr_data4[ATTR4_DIM1][ATTR4_DIM2]={{{3,-26.1F,'d'},{-100000, 0.123F,'3'}},
+ {{-23,981724.2F,'Q'},{0,2.0F,'\n'}}}; /* Test data for 4th attribute */
#define ATTR5_NAME "Attr5"
#define ATTR5_RANK 0
-float attr_data5=(float)-5.123; /* Test data for 5th attribute */
+float attr_data5=-5.123F; /* Test data for 5th attribute */
#define ATTR6_RANK 3
#define ATTR6_DIM1 100
@@ -500,7 +500,7 @@ test_attr_flush(hid_t fapl)
att, /* Attribute ID */
spc, /* Dataspace ID */
set; /* Dataset ID */
- double wdata=3.14159; /* Data to write */
+ double wdata=3.14159F; /* Data to write */
double rdata; /* Data read in */
herr_t ret; /* Generic return value */
@@ -522,8 +522,8 @@ test_attr_flush(hid_t fapl)
ret=H5Aread(att, H5T_NATIVE_DOUBLE, &rdata);
CHECK(ret, FAIL, "H5Awrite");
- if(!DBL_ABS_EQUAL(rdata,0.0))
- TestErrPrintf("attribute value wrong: rdata=%f, should be %f\n",rdata,0.0);
+ if(!DBL_ABS_EQUAL(rdata,0.0F))
+ TestErrPrintf("attribute value wrong: rdata=%f, should be %f\n",rdata,(double)0.0F);
ret=H5Fflush(fil, H5F_SCOPE_GLOBAL);
CHECK(ret, FAIL, "H5Fflush");
@@ -531,8 +531,8 @@ test_attr_flush(hid_t fapl)
ret=H5Aread(att, H5T_NATIVE_DOUBLE, &rdata);
CHECK(ret, FAIL, "H5Awrite");
- if(!DBL_ABS_EQUAL(rdata,0.0))
- TestErrPrintf("attribute value wrong: rdata=%f, should be %f\n",rdata,0.0);
+ if(!DBL_ABS_EQUAL(rdata,0.0F))
+ TestErrPrintf("attribute value wrong: rdata=%f, should be %f\n",rdata,(double)0.0F);
ret=H5Awrite(att, H5T_NATIVE_DOUBLE, &wdata);
CHECK(ret, FAIL, "H5Awrite");
@@ -985,7 +985,7 @@ test_attr_scalar_read(hid_t fapl)
hid_t sid; /* Dataspace ID */
hid_t attr; /* Attribute ID */
H5S_class_t stype; /* Dataspace class */
- float rdata = 0.0; /* Buffer for reading 1st attribute */
+ float rdata = 0.0F; /* Buffer for reading 1st attribute */
H5O_info_t oinfo; /* Object info */
herr_t ret; /* Generic return value */
@@ -1016,7 +1016,7 @@ test_attr_scalar_read(hid_t fapl)
/* Verify the floating-poing value in this way to avoid compiler warning. */
if(!FLT_ABS_EQUAL(rdata, attr_data5))
printf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n",
- "H5Aread", attr_data5, rdata, (int)__LINE__, __FILE__);
+ "H5Aread", (double)attr_data5, (double)rdata, (int)__LINE__, __FILE__);
/* Get the attribute's dataspace */
sid = H5Aget_space(attr);
diff --git a/test/testhdf5.h b/test/testhdf5.h
index 907fce9..62dadde 100644
--- a/test/testhdf5.h
+++ b/test/testhdf5.h
@@ -68,6 +68,18 @@
} \
}
+#define CHECK_PTR_NULL(ret,where) { \
+ if (VERBOSE_HI) { \
+ print_func(" Call to routine: %15s at line %4d in %s returned %p\n", \
+ (where), (int)__LINE__, __FILE__, (ret)); \
+ } \
+ if (ret) { \
+ TestErrPrintf ("*** UNEXPECTED RETURN from %s is not NULL line %4d in %s\n", \
+ (where), (int)__LINE__, __FILE__); \
+ H5Eprint2(H5E_DEFAULT, stdout); \
+ } \
+}
+
/* Used to make certain a return value _is_ a value */
#define VERIFY(_x, _val, where) do { \
long __x = (long)_x, __val = (long)_val; \
diff --git a/test/tfile.c b/test/tfile.c
index 5de035a..52c8fd9 100644
--- a/test/tfile.c
+++ b/test/tfile.c
@@ -2362,7 +2362,7 @@ test_rw_noupdate(void)
diff = HDdifftime(sb2.st_mtime, sb1.st_mtime);
/* Check That Timestamps Are Equal */
- if(diff > 0.0) {
+ if(diff > 0.0F) {
/* Output message about test being performed */
MESSAGE(1, ("Testing to verify that nothing is written if nothing is changed: This test is skipped on this system because the modification time from stat is the same as the last access time (We know OpenVMS behaves in this way).\n"));
} /* end if */
@@ -2395,7 +2395,7 @@ test_rw_noupdate(void)
/* Ensure That Timestamps Are Equal */
diff = HDdifftime(sb2.st_mtime, sb1.st_mtime);
- ret = (diff > 0.0);
+ ret = (diff > 0.0F);
VERIFY(ret, 0, "Timestamp");
} /* end else */
} /* end test_rw_noupdate() */
diff --git a/test/tgenprop.c b/test/tgenprop.c
index 004e346..a3f1752 100644
--- a/test/tgenprop.c
+++ b/test/tgenprop.c
@@ -47,7 +47,7 @@ int prop1_def=10; /* Property 1 default value */
#define PROP1_DEF_VALUE (&prop1_def)
#define PROP2_NAME "Property 2"
-float prop2_def=(float)3.14; /* Property 2 default value */
+float prop2_def=3.14F; /* Property 2 default value */
#define PROP2_SIZE sizeof(prop2_def)
#define PROP2_DEF_VALUE (&prop2_def)
@@ -57,7 +57,7 @@ char prop3_def[10]="Ten chars"; /* Property 3 default value */
#define PROP3_DEF_VALUE (&prop3_def)
#define PROP4_NAME "Property 4"
-double prop4_def=1.41; /* Property 4 default value */
+double prop4_def=1.41F; /* Property 4 default value */
#define PROP4_SIZE sizeof(prop4_def)
#define PROP4_DEF_VALUE (&prop4_def)
@@ -683,7 +683,7 @@ test_genprop_basic_list(void)
/* Verify the floating-poing value in this way to avoid compiler warning. */
if(!FLT_ABS_EQUAL(prop2_value,*PROP2_DEF_VALUE))
printf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n",
- "H5Pget", *PROP2_DEF_VALUE, prop2_value, (int)__LINE__, __FILE__);
+ "H5Pget", (double)*PROP2_DEF_VALUE, (double)prop2_value, (int)__LINE__, __FILE__);
/* Close list */
@@ -775,7 +775,7 @@ test_genprop_basic_list_prop(void)
/* Verify the floating-poing value in this way to avoid compiler warning. */
if(!FLT_ABS_EQUAL(prop2_value,*PROP2_DEF_VALUE))
printf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n",
- "H5Pget", *PROP2_DEF_VALUE, prop2_value, (int)__LINE__, __FILE__);
+ "H5Pget", (double)*PROP2_DEF_VALUE, (double)prop2_value, (int)__LINE__, __FILE__);
/* Check values of temporary properties (set with regular values) */
@@ -1239,7 +1239,7 @@ test_genprop_list_callback(void)
/* Verify the floating-poing value in this way to avoid compiler warning. */
if(!FLT_ABS_EQUAL(prop2_value,*PROP2_DEF_VALUE))
printf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n",
- "H5Pget", *PROP2_DEF_VALUE, prop2_value, (int)__LINE__, __FILE__);
+ "H5Pget", (double)*PROP2_DEF_VALUE, (double)prop2_value, (int)__LINE__, __FILE__);
/* Check values of temporary properties (set with regular values) */
ret = H5Pget(lid1, PROP3_NAME,&prop3_value);
diff --git a/test/th5s.c b/test/th5s.c
index d3a651c..7c1c46e 100644
--- a/test/th5s.c
+++ b/test/th5s.c
@@ -79,7 +79,7 @@ struct space4_struct {
unsigned u;
float f;
char c2;
- } space4_data={'v',987123,(float)-3.14,'g'}; /* Test data for 4th dataspace */
+ } space4_data={'v',987123,-3.14F,'g'}; /* Test data for 4th dataspace */
/****************************************************************
**
@@ -1581,7 +1581,7 @@ test_h5s_compound_scalar_read(void)
if(HDmemcmp(&space4_data,&rdata,sizeof(struct space4_struct))) {
printf("scalar data different: space4_data.c1=%c, read_data4.c1=%c\n",space4_data.c1,rdata.c1);
printf("scalar data different: space4_data.u=%u, read_data4.u=%u\n",space4_data.u,rdata.u);
- printf("scalar data different: space4_data.f=%f, read_data4.f=%f\n",space4_data.f,rdata.f);
+ printf("scalar data different: space4_data.f=%f, read_data4.f=%f\n",(double)space4_data.f,(double)rdata.f);
TestErrPrintf("scalar data different: space4_data.c1=%c, read_data4.c1=%c\n",space4_data.c1,rdata.c2);
} /* end if */
@@ -1647,7 +1647,7 @@ test_h5s_chunk(void)
/* Initialize float array */
for(i = 0; i < 50000; i++)
for(j = 0; j < 3; j++)
- chunk_data_flt[i][j] = (float)((i + 1) * 2.5 - j * 100.3);
+ chunk_data_flt[i][j] = (float)((i + 1) * 2.5F - j * 100.3F);
status = H5Dwrite(dsetID, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, chunk_data_flt);
CHECK(status, FAIL, "H5Dwrite");
@@ -1683,8 +1683,8 @@ test_h5s_chunk(void)
for(i=0; i<50000; i++) {
for(j=0; j<3; j++) {
/* Check if the two values are within 0.001% range. */
- if(!DBL_REL_EQUAL(chunk_data_dbl[i][j], chunk_data_flt[i][j], 0.00001))
- TestErrPrintf("%u: chunk_data_dbl[%d][%d]=%e, chunk_data_flt[%d][%d]=%e\n", (unsigned)__LINE__, i, j, chunk_data_dbl[i][j], i, j, chunk_data_flt[i][j]);
+ if(!DBL_REL_EQUAL(chunk_data_dbl[i][j], chunk_data_flt[i][j], 0.00001F))
+ TestErrPrintf("%u: chunk_data_dbl[%d][%d]=%e, chunk_data_flt[%d][%d]=%e\n", (unsigned)__LINE__, i, j, chunk_data_dbl[i][j], i, j, (double)chunk_data_flt[i][j]);
} /* end for */
} /* end for */
} /* test_h5s_chunk() */
@@ -2224,6 +2224,8 @@ test_h5s_extent_copy(void)
hsize_t d3_dims1[3] = {10, 10, 10}, /* 3-D dimensions */
d3_dims2[3] = {20, 20, 20},
d3_dims3[3] = {H5S_UNLIMITED, H5S_UNLIMITED, H5S_UNLIMITED};
+ hsize_t npoints[14]; /* Expected number of points in selection for each element in spaces */
+ hssize_t npoints_ret; /* Number of points returned by H5Sget_select_npoints() */
htri_t ext_equal; /* Whether two dataspace extents are equal */
const unsigned num_spaces = sizeof(spaces) / sizeof(spaces[0]);
unsigned i, j;
@@ -2232,36 +2234,50 @@ test_h5s_extent_copy(void)
/* Create dataspaces */
spaces[0] = H5Screate(H5S_NULL);
CHECK(spaces[0], FAIL, "H5Screate");
+ npoints[0] = (hsize_t)0;
spaces[1] = H5Screate(H5S_SCALAR);
CHECK(spaces[1], FAIL, "H5Screate");
+ npoints[1] = (hsize_t)1;
spaces[2] = H5Screate_simple(1, d1_dims1, NULL);
CHECK(spaces[2], FAIL, "H5Screate");
+ npoints[2] = d1_dims1[0];
spaces[3] = H5Screate_simple(1, d1_dims2, NULL);
CHECK(spaces[3], FAIL, "H5Screate");
+ npoints[3] = d1_dims2[0];
spaces[4] = H5Screate_simple(1, d1_dims1, d1_dims2);
CHECK(spaces[4], FAIL, "H5Screate");
+ npoints[4] = d1_dims1[0];
spaces[5] = H5Screate_simple(1, d1_dims1, d1_dims3);
CHECK(spaces[5], FAIL, "H5Screate");
+ npoints[5] = d1_dims1[0];
spaces[6] = H5Screate_simple(2, d2_dims1, NULL);
CHECK(spaces[6], FAIL, "H5Screate");
+ npoints[6] = d2_dims1[0] * d2_dims1[1];
spaces[7] = H5Screate_simple(2, d2_dims2, NULL);
CHECK(spaces[7], FAIL, "H5Screate");
+ npoints[7] = d2_dims2[0] * d2_dims2[1];
spaces[8] = H5Screate_simple(2, d2_dims1, d2_dims2);
CHECK(spaces[8], FAIL, "H5Screate");
+ npoints[8] = d2_dims1[0] * d2_dims1[1];
spaces[9] = H5Screate_simple(2, d2_dims1, d2_dims3);
CHECK(spaces[9], FAIL, "H5Screate");
+ npoints[9] = d2_dims1[0] * d2_dims1[1];
spaces[10] = H5Screate_simple(3, d3_dims1, NULL);
CHECK(spaces[10], FAIL, "H5Screate");
+ npoints[10] = d3_dims1[0] * d3_dims1[1] * d3_dims1[2];
spaces[11] = H5Screate_simple(3, d3_dims2, NULL);
CHECK(spaces[11], FAIL, "H5Screate");
+ npoints[11] = d3_dims2[0] * d3_dims2[1] * d3_dims2[2];
spaces[12] = H5Screate_simple(3, d3_dims1, d3_dims2);
CHECK(spaces[12], FAIL, "H5Screate");
+ npoints[12] = d3_dims1[0] * d3_dims1[1] * d3_dims1[2];
spaces[13] = H5Screate_simple(3, d3_dims1, d3_dims3);
CHECK(spaces[13], FAIL, "H5Screate");
+ npoints[13] = d3_dims1[0] * d3_dims1[1] * d3_dims1[2];
tmp_space = H5Screate(H5S_NULL);
CHECK(tmp_space, FAIL, "H5Screate");
@@ -2275,14 +2291,26 @@ test_h5s_extent_copy(void)
* will test copying from i/j to i/j */
ret = H5Sextent_copy(tmp_space, spaces[j]);
CHECK(ret, FAIL, "H5Sextent_copy");
+
+ /* Verify that the extents are equal */
ext_equal = H5Sextent_equal(tmp_space, spaces[j]);
VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+ /* Verify that the correct number of elements is selected */
+ npoints_ret = H5Sget_select_npoints(tmp_space);
+ VERIFY((hsize_t)npoints_ret, npoints[j], "H5Sget_select_npoints");
+
/* Copy from j to i */
ret = H5Sextent_copy(tmp_space, spaces[i]);
CHECK(ret, FAIL, "H5Sextent_copy");
+
+ /* Verify that the extents are equal */
ext_equal = H5Sextent_equal(tmp_space, spaces[i]);
VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+
+ /* Verify that the correct number of elements is selected */
+ npoints_ret = H5Sget_select_npoints(tmp_space);
+ VERIFY((hsize_t)npoints_ret, npoints[i], "H5Sget_select_npoints");
} /* end for */
/* Close dataspaces */
diff --git a/test/tmisc.c b/test/tmisc.c
index cac6b04..869557c 100644
--- a/test/tmisc.c
+++ b/test/tmisc.c
@@ -1847,9 +1847,21 @@ test_misc11(void)
ret = H5Pset_sizes(fcpl, (size_t)MISC11_SIZEOF_OFF, (size_t)MISC11_SIZEOF_LEN);
CHECK(ret, FAIL, "H5Pset_sizes");
+ /* This should fail as (32770*2) will exceed ^16 - 2 bytes for storing btree entries */
+ H5E_BEGIN_TRY {
+ ret=H5Pset_sym_k(fcpl, 32770, 0);
+ } H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_sym_k");
+
ret=H5Pset_sym_k(fcpl,MISC11_SYM_IK,MISC11_SYM_LK);
CHECK(ret, FAIL, "H5Pset_sym_k");
+ /* This should fail as (32770*2) will exceed ^16 - 2 bytes for storing btree entries */
+ H5E_BEGIN_TRY {
+ ret=H5Pset_istore_k(fcpl, 32770);
+ } H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_istore_k");
+
ret=H5Pset_istore_k(fcpl,MISC11_ISTORE_IK);
CHECK(ret, FAIL, "H5Pset_istore_k");
@@ -2427,17 +2439,17 @@ test_misc13(void)
static void
test_misc14(void)
{
- hid_t file_id; /* File ID */
- hid_t fapl; /* File access property list ID */
- hid_t DataSpace; /* Dataspace ID */
- hid_t Dataset1; /* Dataset ID #1 */
- hid_t Dataset2; /* Dataset ID #2 */
- hid_t Dataset3; /* Dataset ID #3 */
- double data1 = 5.0; /* Data to write for dataset #1 */
- double data2 = 10.0; /* Data to write for dataset #2 */
- double data3 = 15.0; /* Data to write for dataset #3 */
- double rdata; /* Data read in */
- herr_t ret; /* Generic return value */
+ hid_t file_id; /* File ID */
+ hid_t fapl; /* File access property list ID */
+ hid_t DataSpace; /* Dataspace ID */
+ hid_t Dataset1; /* Dataset ID #1 */
+ hid_t Dataset2; /* Dataset ID #2 */
+ hid_t Dataset3; /* Dataset ID #3 */
+ double data1 = 5.0F; /* Data to write for dataset #1 */
+ double data2 = 10.0F; /* Data to write for dataset #2 */
+ double data3 = 15.0F; /* Data to write for dataset #3 */
+ double rdata; /* Data read in */
+ herr_t ret; /* Generic return value */
/* Test creating two datasets and deleting the second */
@@ -4949,7 +4961,7 @@ test_misc28(void)
* bytes). */
fapl = H5Pcreate(H5P_FILE_ACCESS);
CHECK(fapl, FAIL, "H5Pcreate");
- ret = H5Pset_cache(fapl, MISC28_NSLOTS, MISC28_NSLOTS, MISC28_SIZE, 0.75);
+ ret = H5Pset_cache(fapl, MISC28_NSLOTS, MISC28_NSLOTS, MISC28_SIZE, 0.75F);
CHECK(ret, FAIL, "H5Pset_cache");
/* Create the dcpl and set the chunk size */
@@ -5304,6 +5316,73 @@ test_misc31(void)
/****************************************************************
+ *
+ * test_misc32(): Simple test of filter memory allocation
+ * functions.
+ *
+ ***************************************************************/
+static void
+test_misc32(void)
+{
+ void *buffer;
+ void *resized;
+ size_t size;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Edge case test of filter memory allocation functions\n"));
+
+ /* Test that the filter memory allocation functions behave correctly
+ * at edge cases.
+ */
+
+ /* FREE */
+
+ /* Test freeing a NULL pointer.
+ * No real confirmation check here, but Valgrind will confirm no
+ * shenanigans.
+ */
+ buffer = NULL;
+ H5free_memory(buffer);
+
+ /* ALLOCATE */
+
+ /* Size zero returns NULL.
+ * Also checks that a size of zero and setting the buffer clear flag
+ * to TRUE can be used together.
+ *
+ * Note that we have asserts in the code, so only check when NDEBUG
+ * is defined.
+ */
+#ifdef NDEBUG
+ buffer = H5allocate_memory(0, FALSE);
+ CHECK_PTR_NULL(buffer, "H5allocate_memory"); /*BAD*/
+ buffer = H5allocate_memory(0, TRUE);
+ CHECK_PTR_NULL(buffer, "H5allocate_memory"); /*BAD*/
+#endif /* NDEBUG */
+
+ /* RESIZE */
+
+ /* Size zero returns NULL. Valgrind will confirm buffer is freed. */
+ size = 1024;
+ buffer = H5allocate_memory(size, TRUE);
+ resized = H5resize_memory(buffer, 0);
+ CHECK_PTR_NULL(resized, "H5resize_memory");
+
+ /* NULL input pointer returns new buffer */
+ resized = H5resize_memory(NULL, 1024);
+ CHECK_PTR(resized, "H5resize_memory");
+ H5free_memory(resized);
+
+ /* NULL input pointer and size zero returns NULL */
+#ifdef NDEBUG
+ resized = H5resize_memory(NULL, 0);
+ CHECK_PTR_NULL(resized, "H5resize_memory"); /*BAD*/
+#endif /* NDEBUG */
+
+} /* end test_misc32() */
+
+
+/****************************************************************
**
** test_misc(): Main misc. test routine.
**
@@ -5349,6 +5428,7 @@ test_misc(void)
test_misc29(); /* Test that speculative metadata reads are handled correctly */
test_misc30(); /* Exercise local heap loading bug where free lists were getting dropped */
test_misc31(); /* Test Reentering library through deprecated routines after H5close() */
+ test_misc32(); /* Test filter memory allocation functions */
} /* test_misc() */
diff --git a/test/trefer.c b/test/trefer.c
index 9cb7f26..6d72aee 100644
--- a/test/trefer.c
+++ b/test/trefer.c
@@ -522,6 +522,12 @@ test_reference_region(void)
H5O_type_t obj_type; /* Type of object */
int i, j; /* counting variables */
herr_t ret; /* Generic return value */
+ haddr_t addr = HADDR_UNDEF; /* test for undefined reference */
+ hid_t dset_NA; /* Dataset id for undefined reference */
+ hid_t space_NA; /* Dataspace id for undefined reference */
+ hsize_t dims_NA[1] = {1}; /* Dims array for undefined reference */
+ hdset_reg_ref_t wdata_NA[1], /* Write buffer */
+ rdata_NA[1]; /* Read buffer */
/* Output message about test being performed */
MESSAGE(5, ("Testing Dataset Region Reference Functions\n"));
@@ -612,6 +618,31 @@ test_reference_region(void)
ret = H5Dwrite(dset1, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
CHECK(ret, FAIL, "H5Dwrite");
+ /*
+ * Store a dataset region reference which will not get written to disk
+ */
+
+ /* Create reference to an element in dset1 */
+ ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)1, (const hsize_t *)coord1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+ ret = H5Rcreate(&wdata_NA[0], fid1, "/Dataset1", H5R_DATASET_REGION, sid2);
+ CHECK(ret, FAIL, "H5Rcreate");
+
+ /* Create the dataspace of the region references */
+ space_NA = H5Screate_simple(1, dims_NA, NULL);
+ CHECK(space_NA, FAIL, "H5Screate_simple");
+
+ /* Create the dataset and write the region references to it */
+ dset_NA = H5Dcreate2(fid1, "DS_NA", H5T_STD_REF_DSETREG, space_NA, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset_NA, FAIL, "H5Dcreate");
+
+ /* Close and release resources for undefined region reference tests */
+ ret = H5Dclose(dset_NA);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Sclose(space_NA);
+ CHECK(ret, FAIL, "H5Sclose");
+
/* Close disk dataspace */
ret = H5Sclose(sid1);
CHECK(ret, FAIL, "H5Sclose");
@@ -632,6 +663,41 @@ test_reference_region(void)
fid1 = H5Fopen(FILE2, H5F_ACC_RDWR, H5P_DEFAULT);
CHECK(fid1, FAIL, "H5Fopen");
+ /*
+ * Start the test of an undefined reference
+ */
+
+ /* Open the dataset of the undefined references */
+ dset_NA = H5Dopen2(fid1, "DS_NA", H5P_DEFAULT);
+ CHECK(dset_NA, FAIL, "H5Dopen2");
+
+ /* Read the data */
+ ret = H5Dread(dset_NA, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata_NA);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /*
+ * Dereference an undefined reference (should fail)
+ */
+ H5E_BEGIN_TRY {
+ dset2 = H5Rdereference2(dset_NA, H5P_DEFAULT, H5R_DATASET_REGION, &rdata_NA[0]);
+ } H5E_END_TRY;
+ VERIFY(dset2, FAIL, "H5Rdereference2");
+
+ /* Close and release resources. */
+ ret = H5Dclose(dset_NA);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* This close should fail since H5Rdereference2 never created
+ * the id of the referenced object. */
+ H5E_BEGIN_TRY {
+ ret = H5Dclose(dset2);
+ } H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Dclose");
+
+ /*
+ * End the test of an undefined reference
+ */
+
/* Open the dataset */
dset1 = H5Dopen2(fid1, "/Dataset1", H5P_DEFAULT);
CHECK(dset1, FAIL, "H5Dopen2");
@@ -640,6 +706,10 @@ test_reference_region(void)
ret = H5Dread(dset1, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf);
CHECK(ret, FAIL, "H5Dread");
+ /* Try to read an unaddressed dataset */
+ dset2 = H5Rdereference2(dset1, dapl_id, H5R_DATASET_REGION, &addr);
+ VERIFY(dset2, FAIL, "H5Rdereference2 haddr_undef");
+
/* Try to open objects */
dset2 = H5Rdereference2(dset1, dapl_id, H5R_DATASET_REGION, &rbuf[0]);
CHECK(dset2, FAIL, "H5Rdereference2");
@@ -1070,6 +1140,7 @@ test_reference_obj_deleted(void)
hid_t sid1; /* Dataspace ID */
hobj_ref_t oref; /* Object Reference to test */
H5O_type_t obj_type; /* Object type */
+ haddr_t addr = HADDR_UNDEF; /* test for undefined reference */
herr_t ret; /* Generic return value */
/* Create file */
@@ -1127,6 +1198,10 @@ test_reference_obj_deleted(void)
dataset = H5Dopen2(fid1, "/Dataset2", H5P_DEFAULT);
CHECK(ret, FAIL, "H5Dopen2");
+ /* Open undefined reference */
+ dset2 = H5Rdereference2(dataset, H5P_DEFAULT, H5R_OBJECT, &addr);
+ VERIFY(dset2, FAIL, "H5Rdereference2");
+
/* Read selection from disk */
HDmemset(&oref, 0, sizeof(hobj_ref_t));
ret = H5Dread(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, &oref);
diff --git a/test/tsohm.c b/test/tsohm.c
index 8ebfd52..9da655e 100644
--- a/test/tsohm.c
+++ b/test/tsohm.c
@@ -57,7 +57,7 @@ const unsigned test_minsizes[H5O_SHMESG_MAX_NINDEXES] = {0, 2, 40, 100, 3, 1000}
#define NAME_BUF_SIZE 512
/* How much overhead counts as "not much" when converting B-trees, etc. */
-#define OVERHEAD_ALLOWED 1.15
+#define OVERHEAD_ALLOWED 1.15F
#define NUM_DATASETS 10
#define NUM_ATTRIBUTES 100
@@ -600,7 +600,7 @@ size1_helper(hid_t file, const char* filename, hid_t fapl_id, int test_file_clos
wdata.i6 = 66;
wdata.i7 = 77;
wdata.i8 = 88;
- wdata.f1 = 0.0;
+ wdata.f1 = 0.0F;
/* Intialize rdata */
HDmemset(&rdata, 0, sizeof(rdata));
diff --git a/test/ttsafe_cancel.c b/test/ttsafe_cancel.c
index 56f431e..d8ed462 100644
--- a/test/ttsafe_cancel.c
+++ b/test/ttsafe_cancel.c
@@ -123,7 +123,7 @@ void tts_cancel(void)
void *tts_cancel_thread(void UNUSED *arg)
{
int datavalue;
- int *buffer;
+ int buffer;
hid_t dataspace, datatype, dataset;
hsize_t dimsf[1]; /* dataset dimensions */
cancel_cleanup_t *cleanup_structure;
@@ -156,10 +156,9 @@ void *tts_cancel_thread(void UNUSED *arg)
ret=H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &datavalue);
assert(ret>=0);
- buffer = HDmalloc(sizeof(int));
- ret=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buffer);
+ ret=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &buffer);
assert(ret>=0);
- ret=H5Diterate(buffer, H5T_NATIVE_INT, dataspace, tts_cancel_callback, &dataset);
+ ret=H5Diterate(&buffer, H5T_NATIVE_INT, dataspace, tts_cancel_callback, &dataset);
assert(ret>=0);
sleep(3);
diff --git a/test/tunicode.c b/test/tunicode.c
index 7dc3391..82dc429 100644
--- a/test/tunicode.c
+++ b/test/tunicode.c
@@ -39,8 +39,8 @@
#define RANK 1
#define COMP_INT_VAL 7
-#define COMP_FLOAT_VAL -42.0
-#define COMP_DOUBLE_VAL 42.0
+#define COMP_FLOAT_VAL -42.0F
+#define COMP_DOUBLE_VAL 42.0F
/* Test function prototypes */
void test_fl_string(hid_t fid, const char *string);
diff --git a/test/tvltypes.c b/test/tvltypes.c
index 516974e..19e8252 100644
--- a/test/tvltypes.c
+++ b/test/tvltypes.c
@@ -731,7 +731,7 @@ test_vltypes_vlen_compound(void)
wdata[i].len=i+1;
for(j=0; j<(i+1); j++) {
((s1 *)wdata[i].p)[j].i=i*10+j;
- ((s1 *)wdata[i].p)[j].f=(float)((i*20+j)/3.0);
+ ((s1 *)wdata[i].p)[j].f=(float)((i*20+j)/3.0F);
} /* end for */
} /* end for */
@@ -877,7 +877,7 @@ rewrite_vltypes_vlen_compound(void)
wdata[i].len = i + increment;
for(j = 0; j < (i + increment); j++) {
((s1 *)wdata[i].p)[j].i = i * 40 + j;
- ((s1 *)wdata[i].p)[j].f = (float)((i * 60 + j) / 3.0);
+ ((s1 *)wdata[i].p)[j].f = (float)((i * 60 + j) / 3.0F);
} /* end for */
} /* end for */
@@ -1018,7 +1018,7 @@ test_vltypes_compound_vlen_vlen(void)
/* Allocate and initialize VL data to write */
for(i=0; i<SPACE3_DIM1; i++) {
wdata[i].i=i*10;
- wdata[i].f=(float)((i*20)/3.0);
+ wdata[i].f=(float)((i*20)/3.0F);
wdata[i].v.p=HDmalloc((i+L1_INCM)*sizeof(hvl_t));
wdata[i].v.len=i+L1_INCM;
for(t1=(wdata[i].v).p,j=0; j<(i+L1_INCM); j++, t1++) {
@@ -1479,7 +1479,7 @@ test_vltypes_compound_vlen_atomic(void)
/* Allocate and initialize VL data to write */
for(i=0; i<SPACE1_DIM1; i++) {
wdata[i].i=i*10;
- wdata[i].f=(float)((i*20)/3.0);
+ wdata[i].f=(float)((i*20)/3.0F);
wdata[i].v.p=HDmalloc((i+1)*sizeof(unsigned int));
wdata[i].v.len=i+1;
for(j=0; j<(i+1); j++)
@@ -1595,8 +1595,8 @@ test_vltypes_compound_vlen_atomic(void)
/* Check data read in */
for(i = 0; i < SPACE1_DIM1; i++)
- if(rdata[i].i != 0 || !FLT_ABS_EQUAL(rdata[i].f, 0.0) || rdata[i].v.len != 0 || rdata[i].v.p != NULL)
- TestErrPrintf("VL doesn't match!, rdata[%d].i=%d, rdata[%d].f=%f, rdata[%d].v.len=%u, rdata[%d].v.p=%p\n",(int)i,rdata[i].i,(int)i,rdata[i].f,(int)i,(unsigned)rdata[i].v.len,(int)i,rdata[i].v.p);
+ if(rdata[i].i != 0 || !FLT_ABS_EQUAL(rdata[i].f, 0.0F) || rdata[i].v.len != 0 || rdata[i].v.p != NULL)
+ TestErrPrintf("VL doesn't match!, rdata[%d].i=%d, rdata[%d].f=%f, rdata[%d].v.len=%u, rdata[%d].v.p=%p\n",(int)i,rdata[i].i,(int)i,(double)rdata[i].f,(int)i,(unsigned)rdata[i].v.len,(int)i,rdata[i].v.p);
/* Write dataset to disk */
ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
@@ -1697,7 +1697,7 @@ rewrite_vltypes_compound_vlen_atomic(void)
/* Allocate and initialize VL data to write */
for(i = 0; i < SPACE1_DIM1; i++) {
wdata[i].i = i * 40;
- wdata[i].f = (float)((i * 50) / 3.0);
+ wdata[i].f = (float)((i * 50) / 3.0F);
wdata[i].v.p = HDmalloc((i + increment) * sizeof(unsigned int));
wdata[i].v.len = i + increment;
for(j = 0; j < (i + increment); j++)
@@ -2430,8 +2430,8 @@ test_vltypes_fill_value(void)
hsize_t small_dims[] = {SPACE4_DIM_SMALL};
hsize_t large_dims[] = {SPACE4_DIM_LARGE};
size_t dset_elmts; /* Number of elements in a particular dataset */
- const dtype1_struct fill1 = {1, 2, "foobar", "", NULL, "\0", "dead", 3, 4.0, 100.0, 1.0, "liquid", "meter"};
- const dtype1_struct wdata = {3, 4, "", NULL, "\0", "foo", "two", 6, 8.0, 200.0, 2.0, "solid", "yard"};
+ const dtype1_struct fill1 = {1, 2, "foobar", "", NULL, "\0", "dead", 3, 4.0F, 100.0F, 1.0F, "liquid", "meter"};
+ const dtype1_struct wdata = {3, 4, "", NULL, "\0", "foo", "two", 6, 8.0F, 200.0F, 2.0F, "solid", "yard"};
dtype1_struct *rbuf = NULL; /* Buffer for reading data */
size_t mem_used = 0; /* Memory used during allocation */
H5D_layout_t layout; /* Dataset storage layout */